repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
goldmedal/spark | python/pyspark/ml/fpm.py | 9 | 15928 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark import keyword_only, since
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql import DataFrame
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams
from pyspark.ml.param.shared import *
__all__ = ["FPGrowth", "FPGrowthModel", "PrefixSpan"]
class _FPGrowthParams(HasPredictionCol):
"""
Params for :py:class:`FPGrowth` and :py:class:`FPGrowthModel`.
.. versionadded:: 3.0.0
"""
itemsCol = Param(Params._dummy(), "itemsCol",
"items column name", typeConverter=TypeConverters.toString)
minSupport = Param(
Params._dummy(),
"minSupport",
"Minimal support level of the frequent pattern. [0.0, 1.0]. " +
"Any pattern that appears more than (minSupport * size-of-the-dataset) " +
"times will be output in the frequent itemsets.",
typeConverter=TypeConverters.toFloat)
numPartitions = Param(
Params._dummy(),
"numPartitions",
"Number of partitions (at least 1) used by parallel FP-growth. " +
"By default the param is not set, " +
"and partition number of the input dataset is used.",
typeConverter=TypeConverters.toInt)
minConfidence = Param(
Params._dummy(),
"minConfidence",
"Minimal confidence for generating Association Rule. [0.0, 1.0]. " +
"minConfidence will not affect the mining for frequent itemsets, " +
"but will affect the association rules generation.",
typeConverter=TypeConverters.toFloat)
def getItemsCol(self):
"""
Gets the value of itemsCol or its default value.
"""
return self.getOrDefault(self.itemsCol)
def getMinSupport(self):
"""
Gets the value of minSupport or its default value.
"""
return self.getOrDefault(self.minSupport)
def getNumPartitions(self):
"""
Gets the value of :py:attr:`numPartitions` or its default value.
"""
return self.getOrDefault(self.numPartitions)
def getMinConfidence(self):
"""
Gets the value of minConfidence or its default value.
"""
return self.getOrDefault(self.minConfidence)
class FPGrowthModel(JavaModel, _FPGrowthParams, JavaMLWritable, JavaMLReadable):
"""
Model fitted by FPGrowth.
.. versionadded:: 2.2.0
"""
@since("3.0.0")
def setItemsCol(self, value):
"""
Sets the value of :py:attr:`itemsCol`.
"""
return self._set(itemsCol=value)
@since("3.0.0")
def setMinConfidence(self, value):
"""
Sets the value of :py:attr:`minConfidence`.
"""
return self._set(minConfidence=value)
@since("3.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@property
@since("2.2.0")
def freqItemsets(self):
"""
DataFrame with two columns:
* `items` - Itemset of the same type as the input column.
* `freq` - Frequency of the itemset (`LongType`).
"""
return self._call_java("freqItemsets")
@property
@since("2.2.0")
def associationRules(self):
"""
DataFrame with four columns:
* `antecedent` - Array of the same type as the input column.
* `consequent` - Array of the same type as the input column.
* `confidence` - Confidence for the rule (`DoubleType`).
* `lift` - Lift for the rule (`DoubleType`).
"""
return self._call_java("associationRules")
@ignore_unicode_prefix
class FPGrowth(JavaEstimator, _FPGrowthParams, JavaMLWritable, JavaMLReadable):
r"""
A parallel FP-growth algorithm to mine frequent itemsets. The algorithm is described in
Li et al., PFP: Parallel FP-Growth for Query Recommendation [LI2008]_.
PFP distributes computation in such a way that each worker executes an
independent group of mining tasks. The FP-Growth algorithm is described in
Han et al., Mining frequent patterns without candidate generation [HAN2000]_
.. [LI2008] https://doi.org/10.1145/1454008.1454027
.. [HAN2000] https://doi.org/10.1145/335191.335372
.. note:: null values in the feature column are ignored during fit().
.. note:: Internally `transform` `collects` and `broadcasts` association rules.
>>> from pyspark.sql.functions import split
>>> data = (spark.read
... .text("data/mllib/sample_fpgrowth.txt")
... .select(split("value", "\s+").alias("items")))
>>> data.show(truncate=False)
+------------------------+
|items |
+------------------------+
|[r, z, h, k, p] |
|[z, y, x, w, v, u, t, s]|
|[s, x, o, n, r] |
|[x, z, y, m, t, s, q, e]|
|[z] |
|[x, z, y, r, q, t, p] |
+------------------------+
...
>>> fp = FPGrowth(minSupport=0.2, minConfidence=0.7)
>>> fpm = fp.fit(data)
>>> fpm.setPredictionCol("newPrediction")
FPGrowthModel...
>>> fpm.freqItemsets.show(5)
+---------+----+
| items|freq|
+---------+----+
| [s]| 3|
| [s, x]| 3|
|[s, x, z]| 2|
| [s, z]| 2|
| [r]| 3|
+---------+----+
only showing top 5 rows
...
>>> fpm.associationRules.show(5)
+----------+----------+----------+----+
|antecedent|consequent|confidence|lift|
+----------+----------+----------+----+
| [t, s]| [y]| 1.0| 2.0|
| [t, s]| [x]| 1.0| 1.5|
| [t, s]| [z]| 1.0| 1.2|
| [p]| [r]| 1.0| 2.0|
| [p]| [z]| 1.0| 1.2|
+----------+----------+----------+----+
only showing top 5 rows
...
>>> new_data = spark.createDataFrame([(["t", "s"], )], ["items"])
>>> sorted(fpm.transform(new_data).first().newPrediction)
[u'x', u'y', u'z']
.. versionadded:: 2.2.0
"""
@keyword_only
def __init__(self, minSupport=0.3, minConfidence=0.8, itemsCol="items",
predictionCol="prediction", numPartitions=None):
"""
__init__(self, minSupport=0.3, minConfidence=0.8, itemsCol="items", \
predictionCol="prediction", numPartitions=None)
"""
super(FPGrowth, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.fpm.FPGrowth", self.uid)
self._setDefault(minSupport=0.3, minConfidence=0.8,
itemsCol="items", predictionCol="prediction")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.2.0")
def setParams(self, minSupport=0.3, minConfidence=0.8, itemsCol="items",
predictionCol="prediction", numPartitions=None):
"""
setParams(self, minSupport=0.3, minConfidence=0.8, itemsCol="items", \
predictionCol="prediction", numPartitions=None)
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def setItemsCol(self, value):
"""
Sets the value of :py:attr:`itemsCol`.
"""
return self._set(itemsCol=value)
def setMinSupport(self, value):
"""
Sets the value of :py:attr:`minSupport`.
"""
return self._set(minSupport=value)
def setNumPartitions(self, value):
"""
Sets the value of :py:attr:`numPartitions`.
"""
return self._set(numPartitions=value)
def setMinConfidence(self, value):
"""
Sets the value of :py:attr:`minConfidence`.
"""
return self._set(minConfidence=value)
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
def _create_model(self, java_model):
return FPGrowthModel(java_model)
class PrefixSpan(JavaParams):
"""
A parallel PrefixSpan algorithm to mine frequent sequential patterns.
The PrefixSpan algorithm is described in J. Pei, et al., PrefixSpan: Mining Sequential Patterns
Efficiently by Prefix-Projected Pattern Growth
(see <a href="https://doi.org/10.1109/ICDE.2001.914830">here</a>).
This class is not yet an Estimator/Transformer, use :py:func:`findFrequentSequentialPatterns`
method to run the PrefixSpan algorithm.
@see <a href="https://en.wikipedia.org/wiki/Sequential_Pattern_Mining">Sequential Pattern Mining
(Wikipedia)</a>
>>> from pyspark.ml.fpm import PrefixSpan
>>> from pyspark.sql import Row
>>> df = sc.parallelize([Row(sequence=[[1, 2], [3]]),
... Row(sequence=[[1], [3, 2], [1, 2]]),
... Row(sequence=[[1, 2], [5]]),
... Row(sequence=[[6]])]).toDF()
>>> prefixSpan = PrefixSpan()
>>> prefixSpan.getMaxLocalProjDBSize()
32000000
>>> prefixSpan.getSequenceCol()
'sequence'
>>> prefixSpan.setMinSupport(0.5)
PrefixSpan...
>>> prefixSpan.setMaxPatternLength(5)
PrefixSpan...
>>> prefixSpan.findFrequentSequentialPatterns(df).sort("sequence").show(truncate=False)
+----------+----+
|sequence |freq|
+----------+----+
|[[1]] |3 |
|[[1], [3]]|2 |
|[[2]] |3 |
|[[2, 1]] |3 |
|[[3]] |2 |
+----------+----+
...
.. versionadded:: 2.4.0
"""
minSupport = Param(Params._dummy(), "minSupport", "The minimal support level of the " +
"sequential pattern. Sequential pattern that appears more than " +
"(minSupport * size-of-the-dataset) times will be output. Must be >= 0.",
typeConverter=TypeConverters.toFloat)
maxPatternLength = Param(Params._dummy(), "maxPatternLength",
"The maximal length of the sequential pattern. Must be > 0.",
typeConverter=TypeConverters.toInt)
maxLocalProjDBSize = Param(Params._dummy(), "maxLocalProjDBSize",
"The maximum number of items (including delimiters used in the " +
"internal storage format) allowed in a projected database before " +
"local processing. If a projected database exceeds this size, " +
"another iteration of distributed prefix growth is run. " +
"Must be > 0.",
typeConverter=TypeConverters.toInt)
sequenceCol = Param(Params._dummy(), "sequenceCol", "The name of the sequence column in " +
"dataset, rows with nulls in this column are ignored.",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, minSupport=0.1, maxPatternLength=10, maxLocalProjDBSize=32000000,
sequenceCol="sequence"):
"""
__init__(self, minSupport=0.1, maxPatternLength=10, maxLocalProjDBSize=32000000, \
sequenceCol="sequence")
"""
super(PrefixSpan, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.fpm.PrefixSpan", self.uid)
self._setDefault(minSupport=0.1, maxPatternLength=10, maxLocalProjDBSize=32000000,
sequenceCol="sequence")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.4.0")
def setParams(self, minSupport=0.1, maxPatternLength=10, maxLocalProjDBSize=32000000,
sequenceCol="sequence"):
"""
setParams(self, minSupport=0.1, maxPatternLength=10, maxLocalProjDBSize=32000000, \
sequenceCol="sequence")
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("3.0.0")
def setMinSupport(self, value):
"""
Sets the value of :py:attr:`minSupport`.
"""
return self._set(minSupport=value)
@since("3.0.0")
def getMinSupport(self):
"""
Gets the value of minSupport or its default value.
"""
return self.getOrDefault(self.minSupport)
@since("3.0.0")
def setMaxPatternLength(self, value):
"""
Sets the value of :py:attr:`maxPatternLength`.
"""
return self._set(maxPatternLength=value)
@since("3.0.0")
def getMaxPatternLength(self):
"""
Gets the value of maxPatternLength or its default value.
"""
return self.getOrDefault(self.maxPatternLength)
@since("3.0.0")
def setMaxLocalProjDBSize(self, value):
"""
Sets the value of :py:attr:`maxLocalProjDBSize`.
"""
return self._set(maxLocalProjDBSize=value)
@since("3.0.0")
def getMaxLocalProjDBSize(self):
"""
Gets the value of maxLocalProjDBSize or its default value.
"""
return self.getOrDefault(self.maxLocalProjDBSize)
@since("3.0.0")
def setSequenceCol(self, value):
"""
Sets the value of :py:attr:`sequenceCol`.
"""
return self._set(sequenceCol=value)
@since("3.0.0")
def getSequenceCol(self):
"""
Gets the value of sequenceCol or its default value.
"""
return self.getOrDefault(self.sequenceCol)
@since("2.4.0")
def findFrequentSequentialPatterns(self, dataset):
"""
Finds the complete set of frequent sequential patterns in the input sequences of itemsets.
:param dataset: A dataframe containing a sequence column which is
`ArrayType(ArrayType(T))` type, T is the item type for the input dataset.
:return: A `DataFrame` that contains columns of sequence and corresponding frequency.
The schema of it will be:
- `sequence: ArrayType(ArrayType(T))` (T is the item type)
- `freq: Long`
.. versionadded:: 2.4.0
"""
self._transfer_params_to_java()
jdf = self._java_obj.findFrequentSequentialPatterns(dataset._jdf)
return DataFrame(jdf, dataset.sql_ctx)
if __name__ == "__main__":
import doctest
import pyspark.ml.fpm
from pyspark.sql import SparkSession
globs = pyspark.ml.fpm.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.fpm tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
| apache-2.0 |
mattnenterprise/servo | tests/wpt/web-platform-tests/tools/third_party/py/py/_code/assertion.py | 60 | 3174 | import sys
import py
BuiltinAssertionError = py.builtin.builtins.AssertionError
_reprcompare = None # if set, will be called by assert reinterp for comparison ops
def _format_explanation(explanation):
"""This formats an explanation
Normally all embedded newlines are escaped, however there are
three exceptions: \n{, \n} and \n~. The first two are intended
cover nested explanations, see function and attribute explanations
for examples (.visit_Call(), visit_Attribute()). The last one is
for when one explanation needs to span multiple lines, e.g. when
displaying diffs.
"""
raw_lines = (explanation or '').split('\n')
# escape newlines not followed by {, } and ~
lines = [raw_lines[0]]
for l in raw_lines[1:]:
if l.startswith('{') or l.startswith('}') or l.startswith('~'):
lines.append(l)
else:
lines[-1] += '\\n' + l
result = lines[:1]
stack = [0]
stackcnt = [0]
for line in lines[1:]:
if line.startswith('{'):
if stackcnt[-1]:
s = 'and '
else:
s = 'where '
stack.append(len(result))
stackcnt[-1] += 1
stackcnt.append(0)
result.append(' +' + ' '*(len(stack)-1) + s + line[1:])
elif line.startswith('}'):
assert line.startswith('}')
stack.pop()
stackcnt.pop()
result[stack[-1]] += line[1:]
else:
assert line.startswith('~')
result.append(' '*len(stack) + line[1:])
assert len(stack) == 1
return '\n'.join(result)
class AssertionError(BuiltinAssertionError):
def __init__(self, *args):
BuiltinAssertionError.__init__(self, *args)
if args:
try:
self.msg = str(args[0])
except py.builtin._sysex:
raise
except:
self.msg = "<[broken __repr__] %s at %0xd>" %(
args[0].__class__, id(args[0]))
else:
f = py.code.Frame(sys._getframe(1))
try:
source = f.code.fullsource
if source is not None:
try:
source = source.getstatement(f.lineno, assertion=True)
except IndexError:
source = None
else:
source = str(source.deindent()).strip()
except py.error.ENOENT:
source = None
# this can also occur during reinterpretation, when the
# co_filename is set to "<run>".
if source:
self.msg = reinterpret(source, f, should_fail=True)
else:
self.msg = "<could not determine information>"
if not self.args:
self.args = (self.msg,)
if sys.version_info > (3, 0):
AssertionError.__module__ = "builtins"
reinterpret_old = "old reinterpretation not available for py3"
else:
from py._code._assertionold import interpret as reinterpret_old
from py._code._assertionnew import interpret as reinterpret
| mpl-2.0 |
dulaccc/django-select2 | docs/conf.py | 14 | 9543 | # -*- coding: utf-8 -*-
#
# Django-Select2 documentation build configuration file, created by
# sphinx-quickstart on Sat Aug 25 10:23:46 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# This is needed since django_select2 requires django model modules
# and those modules assume that django settings is configured and
# have proper DB settings.
# Using this we give a proper environment with working django settings.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testapp.settings")
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../testapp'))
sys.path.insert(0, os.path.abspath('..'))
import datetime
year = datetime.datetime.now().strftime("%Y")
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.inheritance_diagram', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode']
intersphinx_mapping = {
'python': ('http://docs.python.org/2.7', None),
'django': ('https://docs.djangoproject.com/en/1.4/',
'http://www.applegrew.com/others/django.inv'),
#'http://docs.djangoproject.com/en/1.4/_objects/'),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django-Select2'
copyright = u'%s, Nirupam Biswas' % year
def get_version():
import django_select2
return django_select2.__version__
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = get_version()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
def skip(app, what, name, obj, skip, options):
if name == "__init__" and obj.__doc__:
return False
return skip
def setup(app):
app.connect("autodoc-skip-member", skip)
autodoc_default_flags = ['members', 'show-inheritance']
autodoc_member_order = 'bysource'
inheritance_graph_attrs = dict(rankdir="TB", size='""')
inheritance_node_attrs = dict(shape='ellipse', fontsize=9,
color='"#97C9FD"', style='filled')
inheritance_edge_attrs = dict(penwidth=0.75)
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if on_rtd:
html_theme = 'default'
else:
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Django-Select2doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Django-Select2.tex', u'Django-Select2 Documentation',
u'Nirupam Biswas', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-select2', u'Django-Select2 Documentation',
[u'Nirupam Biswas'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Django-Select2', u'Django-Select2 Documentation',
u'Nirupam Biswas', 'Django-Select2', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| apache-2.0 |
Bachaco-ve/odoo | addons/payment_authorize/tests/test_authorize.py | 195 | 7565 | # -*- coding: utf-8 -*-
import hashlib
import hmac
import time
import urlparse
from lxml import objectify
import openerp
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment.tests.common import PaymentAcquirerCommon
from openerp.addons.payment_authorize.controllers.main import AuthorizeController
from openerp.tools import mute_logger
@openerp.tests.common.at_install(True)
@openerp.tests.common.post_install(True)
class AuthorizeCommon(PaymentAcquirerCommon):
def setUp(self):
super(AuthorizeCommon, self).setUp()
self.base_url = self.env['ir.config_parameter'].get_param('web.base.url')
# authorize only support USD in test environment
self.currency_usd = self.env['res.currency'].search([('name', '=', 'USD')], limit=1)[0]
# get the authorize account
model, self.authorize_id = self.env['ir.model.data'].get_object_reference('payment_authorize', 'payment_acquirer_authorize')
@openerp.tests.common.at_install(True)
@openerp.tests.common.post_install(True)
class AuthorizeForm(AuthorizeCommon):
def _authorize_generate_hashing(self, values):
data = '^'.join([
values['x_login'],
values['x_fp_sequence'],
values['x_fp_timestamp'],
values['x_amount'],
]) + '^'
return hmac.new(str(values['x_trans_key']), data, hashlib.md5).hexdigest()
def test_10_Authorize_form_render(self):
authorize = self.env['payment.acquirer'].browse(self.authorize_id)
self.assertEqual(authorize.environment, 'test', 'test without test environment')
# ----------------------------------------
# Test: button direct rendering
# ----------------------------------------
form_values = {
'x_login': authorize.authorize_login,
'x_trans_key': authorize.authorize_transaction_key,
'x_amount': '320.0',
'x_show_form': 'PAYMENT_FORM',
'x_type': 'AUTH_CAPTURE',
'x_method': 'CC',
'x_fp_sequence': '%s%s' % (authorize.id, int(time.time())),
'x_version': '3.1',
'x_relay_response': 'TRUE',
'x_fp_timestamp': str(int(time.time())),
'x_relay_url': '%s' % urlparse.urljoin(self.base_url, AuthorizeController._return_url),
'x_cancel_url': '%s' % urlparse.urljoin(self.base_url, AuthorizeController._cancel_url),
'return_url': None,
'x_currency_code': 'USD',
'x_invoice_num': 'SO004',
'x_first_name': 'Norbert',
'x_last_name': 'Buyer',
'x_address': 'Huge Street 2/543',
'x_city': 'Sin City',
'x_zip': '1000',
'x_country': 'Belgium',
'x_phone': '0032 12 34 56 78',
'x_email': 'norbert.buyer@example.com',
'x_state': None,
}
form_values['x_fp_hash'] = self._authorize_generate_hashing(form_values)
# render the button
cr, uid, context = self.env.cr, self.env.uid, {}
res = self.payment_acquirer.render(
cr, uid, self.authorize_id, 'SO004', 320.0, self.currency_usd.id,
partner_id=None, partner_values=self.buyer_values, context=context)
# check form result
tree = objectify.fromstring(res)
self.assertEqual(tree.get('action'), 'https://test.authorize.net/gateway/transact.dll', 'Authorize: wrong form POST url')
for form_input in tree.input:
# Generated and received 'x_fp_hash' are always different so skeep it.
if form_input.get('name') in ['submit', 'x_fp_hash']:
continue
self.assertEqual(
form_input.get('value'),
form_values[form_input.get('name')],
'Authorize: wrong value for input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')])
)
@mute_logger('openerp.addons.payment_authorize.models.authorize', 'ValidationError')
def test_20_authorize_form_management(self):
cr, uid, context = self.env.cr, self.env.uid, {}
# be sure not to do stupid thing
authorize = self.env['payment.acquirer'].browse(self.authorize_id)
self.assertEqual(authorize.environment, 'test', 'test without test environment')
# typical data posted by authorize after client has successfully paid
authorize_post_data = {
'return_url': u'/shop/payment/validate',
'x_MD5_Hash': u'7934485E1C105940BE854208D10FAB4F',
'x_account_number': u'XXXX0027',
'x_address': u'Huge Street 2/543',
'x_amount': u'320.00',
'x_auth_code': u'E4W7IU',
'x_avs_code': u'Y',
'x_card_type': u'Visa',
'x_cavv_response': u'2',
'x_city': u'Sun City',
'x_company': u'',
'x_country': u'Belgium',
'x_cust_id': u'',
'x_cvv2_resp_code': u'',
'x_description': u'',
'x_duty': u'0.00',
'x_email': u'norbert.buyer@exampl',
'x_fax': u'',
'x_first_name': u'Norbert',
'x_freight': u'0.00',
'x_invoice_num': u'SO004',
'x_last_name': u'Buyer',
'x_method': u'CC',
'x_phone': u'0032 12 34 56 78',
'x_po_num': u'',
'x_response_code': u'1',
'x_response_reason_code': u'1',
'x_response_reason_text': u'This transaction has been approved.',
'x_ship_to_address': u'Huge Street 2/543',
'x_ship_to_city': u'Sun City',
'x_ship_to_company': u'',
'x_ship_to_country': u'Belgium',
'x_ship_to_first_name': u'Norbert',
'x_ship_to_last_name': u'Buyer',
'x_ship_to_state': u'',
'x_ship_to_zip': u'1000',
'x_state': u'',
'x_tax': u'0.00',
'x_tax_exempt': u'FALSE',
'x_test_request': u'false',
'x_trans_id': u'2217460311',
'x_type': u'auth_capture',
'x_zip': u'1000'
}
# should raise error about unknown tx
with self.assertRaises(ValidationError):
self.payment_transaction.form_feedback(cr, uid, authorize_post_data, 'authorize', context=context)
tx = self.env['payment.transaction'].create({
'amount': 320.0,
'acquirer_id': self.authorize_id,
'currency_id': self.currency_usd.id,
'reference': 'SO004',
'partner_name': 'Norbert Buyer',
'partner_country_id': self.country_france_id})
# validate it
self.payment_transaction.form_feedback(cr, uid, authorize_post_data, 'authorize', context=context)
# check state
self.assertEqual(tx.state, 'done', 'Authorize: validation did not put tx into done state')
self.assertEqual(tx.authorize_txnid, authorize_post_data.get('x_trans_id'), 'Authorize: validation did not update tx payid')
# reset tx
tx.write({'state': 'draft', 'date_validate': False, 'authorize_txnid': False})
# simulate an error
authorize_post_data['x_response_code'] = u'3'
self.payment_transaction.form_feedback(cr, uid, authorize_post_data, 'authorize', context=context)
# check state
self.assertEqual(tx.state, 'error', 'Authorize: erroneous validation did not put tx into error state')
| agpl-3.0 |
ryfeus/lambda-packs | Spacy/source2.7/numpy/lib/tests/test_arraysetops.py | 2 | 18032 | """Test functions for 1D array set operations.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
run_module_suite, assert_array_equal, assert_equal, assert_raises,
)
from numpy.lib.arraysetops import (
ediff1d, intersect1d, setxor1d, union1d, setdiff1d, unique, in1d, isin
)
class TestSetOps(object):
def test_intersect1d(self):
# unique inputs
a = np.array([5, 7, 1, 2])
b = np.array([2, 4, 3, 1, 5])
ec = np.array([1, 2, 5])
c = intersect1d(a, b, assume_unique=True)
assert_array_equal(c, ec)
# non-unique inputs
a = np.array([5, 5, 7, 1, 2])
b = np.array([2, 1, 4, 3, 3, 1, 5])
ed = np.array([1, 2, 5])
c = intersect1d(a, b)
assert_array_equal(c, ed)
assert_array_equal([], intersect1d([], []))
def test_setxor1d(self):
a = np.array([5, 7, 1, 2])
b = np.array([2, 4, 3, 1, 5])
ec = np.array([3, 4, 7])
c = setxor1d(a, b)
assert_array_equal(c, ec)
a = np.array([1, 2, 3])
b = np.array([6, 5, 4])
ec = np.array([1, 2, 3, 4, 5, 6])
c = setxor1d(a, b)
assert_array_equal(c, ec)
a = np.array([1, 8, 2, 3])
b = np.array([6, 5, 4, 8])
ec = np.array([1, 2, 3, 4, 5, 6])
c = setxor1d(a, b)
assert_array_equal(c, ec)
assert_array_equal([], setxor1d([], []))
def test_ediff1d(self):
zero_elem = np.array([])
one_elem = np.array([1])
two_elem = np.array([1, 2])
assert_array_equal([], ediff1d(zero_elem))
assert_array_equal([0], ediff1d(zero_elem, to_begin=0))
assert_array_equal([0], ediff1d(zero_elem, to_end=0))
assert_array_equal([-1, 0], ediff1d(zero_elem, to_begin=-1, to_end=0))
assert_array_equal([], ediff1d(one_elem))
assert_array_equal([1], ediff1d(two_elem))
assert_array_equal([7,1,9], ediff1d(two_elem, to_begin=7, to_end=9))
assert_array_equal([5,6,1,7,8], ediff1d(two_elem, to_begin=[5,6], to_end=[7,8]))
assert_array_equal([1,9], ediff1d(two_elem, to_end=9))
assert_array_equal([1,7,8], ediff1d(two_elem, to_end=[7,8]))
assert_array_equal([7,1], ediff1d(two_elem, to_begin=7))
assert_array_equal([5,6,1], ediff1d(two_elem, to_begin=[5,6]))
assert(isinstance(ediff1d(np.matrix(1)), np.matrix))
assert(isinstance(ediff1d(np.matrix(1), to_begin=1), np.matrix))
def test_isin(self):
# the tests for in1d cover most of isin's behavior
# if in1d is removed, would need to change those tests to test
# isin instead.
def _isin_slow(a, b):
b = np.asarray(b).flatten().tolist()
return a in b
isin_slow = np.vectorize(_isin_slow, otypes=[bool], excluded={1})
def assert_isin_equal(a, b):
x = isin(a, b)
y = isin_slow(a, b)
assert_array_equal(x, y)
#multidimensional arrays in both arguments
a = np.arange(24).reshape([2, 3, 4])
b = np.array([[10, 20, 30], [0, 1, 3], [11, 22, 33]])
assert_isin_equal(a, b)
#array-likes as both arguments
c = [(9, 8), (7, 6)]
d = (9, 7)
assert_isin_equal(c, d)
#zero-d array:
f = np.array(3)
assert_isin_equal(f, b)
assert_isin_equal(a, f)
assert_isin_equal(f, f)
#scalar:
assert_isin_equal(5, b)
assert_isin_equal(a, 6)
assert_isin_equal(5, 6)
#empty array-like:
x = []
assert_isin_equal(x, b)
assert_isin_equal(a, x)
assert_isin_equal(x, x)
def test_in1d(self):
# we use two different sizes for the b array here to test the
# two different paths in in1d().
for mult in (1, 10):
# One check without np.array to make sure lists are handled correct
a = [5, 7, 1, 2]
b = [2, 4, 3, 1, 5] * mult
ec = np.array([True, False, True, True])
c = in1d(a, b, assume_unique=True)
assert_array_equal(c, ec)
a[0] = 8
ec = np.array([False, False, True, True])
c = in1d(a, b, assume_unique=True)
assert_array_equal(c, ec)
a[0], a[3] = 4, 8
ec = np.array([True, False, True, False])
c = in1d(a, b, assume_unique=True)
assert_array_equal(c, ec)
a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5])
b = [2, 3, 4] * mult
ec = [False, True, False, True, True, True, True, True, True,
False, True, False, False, False]
c = in1d(a, b)
assert_array_equal(c, ec)
b = b + [5, 5, 4] * mult
ec = [True, True, True, True, True, True, True, True, True, True,
True, False, True, True]
c = in1d(a, b)
assert_array_equal(c, ec)
a = np.array([5, 7, 1, 2])
b = np.array([2, 4, 3, 1, 5] * mult)
ec = np.array([True, False, True, True])
c = in1d(a, b)
assert_array_equal(c, ec)
a = np.array([5, 7, 1, 1, 2])
b = np.array([2, 4, 3, 3, 1, 5] * mult)
ec = np.array([True, False, True, True, True])
c = in1d(a, b)
assert_array_equal(c, ec)
a = np.array([5, 5])
b = np.array([2, 2] * mult)
ec = np.array([False, False])
c = in1d(a, b)
assert_array_equal(c, ec)
a = np.array([5])
b = np.array([2])
ec = np.array([False])
c = in1d(a, b)
assert_array_equal(c, ec)
assert_array_equal(in1d([], []), [])
def test_in1d_char_array(self):
a = np.array(['a', 'b', 'c', 'd', 'e', 'c', 'e', 'b'])
b = np.array(['a', 'c'])
ec = np.array([True, False, True, False, False, True, False, False])
c = in1d(a, b)
assert_array_equal(c, ec)
def test_in1d_invert(self):
"Test in1d's invert parameter"
# We use two different sizes for the b array here to test the
# two different paths in in1d().
for mult in (1, 10):
a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5])
b = [2, 3, 4] * mult
assert_array_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True))
def test_in1d_ravel(self):
# Test that in1d ravels its input arrays. This is not documented
# behavior however. The test is to ensure consistentency.
a = np.arange(6).reshape(2, 3)
b = np.arange(3, 9).reshape(3, 2)
long_b = np.arange(3, 63).reshape(30, 2)
ec = np.array([False, False, False, True, True, True])
assert_array_equal(in1d(a, b, assume_unique=True), ec)
assert_array_equal(in1d(a, b, assume_unique=False), ec)
assert_array_equal(in1d(a, long_b, assume_unique=True), ec)
assert_array_equal(in1d(a, long_b, assume_unique=False), ec)
def test_in1d_first_array_is_object(self):
ar1 = [None]
ar2 = np.array([1]*10)
expected = np.array([False])
result = np.in1d(ar1, ar2)
assert_array_equal(result, expected)
def test_in1d_second_array_is_object(self):
ar1 = 1
ar2 = np.array([None]*10)
expected = np.array([False])
result = np.in1d(ar1, ar2)
assert_array_equal(result, expected)
def test_in1d_both_arrays_are_object(self):
ar1 = [None]
ar2 = np.array([None]*10)
expected = np.array([True])
result = np.in1d(ar1, ar2)
assert_array_equal(result, expected)
def test_in1d_both_arrays_have_structured_dtype(self):
# Test arrays of a structured data type containing an integer field
# and a field of dtype `object` allowing for arbitrary Python objects
dt = np.dtype([('field1', int), ('field2', object)])
ar1 = np.array([(1, None)], dtype=dt)
ar2 = np.array([(1, None)]*10, dtype=dt)
expected = np.array([True])
result = np.in1d(ar1, ar2)
assert_array_equal(result, expected)
def test_union1d(self):
a = np.array([5, 4, 7, 1, 2])
b = np.array([2, 4, 3, 3, 2, 1, 5])
ec = np.array([1, 2, 3, 4, 5, 7])
c = union1d(a, b)
assert_array_equal(c, ec)
# Tests gh-10340, arguments to union1d should be
# flattened if they are not already 1D
x = np.array([[0, 1, 2], [3, 4, 5]])
y = np.array([0, 1, 2, 3, 4])
ez = np.array([0, 1, 2, 3, 4, 5])
z = union1d(x, y)
assert_array_equal(z, ez)
assert_array_equal([], union1d([], []))
def test_setdiff1d(self):
a = np.array([6, 5, 4, 7, 1, 2, 7, 4])
b = np.array([2, 4, 3, 3, 2, 1, 5])
ec = np.array([6, 7])
c = setdiff1d(a, b)
assert_array_equal(c, ec)
a = np.arange(21)
b = np.arange(19)
ec = np.array([19, 20])
c = setdiff1d(a, b)
assert_array_equal(c, ec)
assert_array_equal([], setdiff1d([], []))
a = np.array((), np.uint32)
assert_equal(setdiff1d(a, []).dtype, np.uint32)
def test_setdiff1d_char_array(self):
a = np.array(['a', 'b', 'c'])
b = np.array(['a', 'b', 's'])
assert_array_equal(setdiff1d(a, b), np.array(['c']))
def test_manyways(self):
a = np.array([5, 7, 1, 2, 8])
b = np.array([9, 8, 2, 4, 3, 1, 5])
c1 = setxor1d(a, b)
aux1 = intersect1d(a, b)
aux2 = union1d(a, b)
c2 = setdiff1d(aux2, aux1)
assert_array_equal(c1, c2)
class TestUnique(object):
def test_unique_1d(self):
def check_all(a, b, i1, i2, c, dt):
base_msg = 'check {0} failed for type {1}'
msg = base_msg.format('values', dt)
v = unique(a)
assert_array_equal(v, b, msg)
msg = base_msg.format('return_index', dt)
v, j = unique(a, 1, 0, 0)
assert_array_equal(v, b, msg)
assert_array_equal(j, i1, msg)
msg = base_msg.format('return_inverse', dt)
v, j = unique(a, 0, 1, 0)
assert_array_equal(v, b, msg)
assert_array_equal(j, i2, msg)
msg = base_msg.format('return_counts', dt)
v, j = unique(a, 0, 0, 1)
assert_array_equal(v, b, msg)
assert_array_equal(j, c, msg)
msg = base_msg.format('return_index and return_inverse', dt)
v, j1, j2 = unique(a, 1, 1, 0)
assert_array_equal(v, b, msg)
assert_array_equal(j1, i1, msg)
assert_array_equal(j2, i2, msg)
msg = base_msg.format('return_index and return_counts', dt)
v, j1, j2 = unique(a, 1, 0, 1)
assert_array_equal(v, b, msg)
assert_array_equal(j1, i1, msg)
assert_array_equal(j2, c, msg)
msg = base_msg.format('return_inverse and return_counts', dt)
v, j1, j2 = unique(a, 0, 1, 1)
assert_array_equal(v, b, msg)
assert_array_equal(j1, i2, msg)
assert_array_equal(j2, c, msg)
msg = base_msg.format(('return_index, return_inverse '
'and return_counts'), dt)
v, j1, j2, j3 = unique(a, 1, 1, 1)
assert_array_equal(v, b, msg)
assert_array_equal(j1, i1, msg)
assert_array_equal(j2, i2, msg)
assert_array_equal(j3, c, msg)
a = [5, 7, 1, 2, 1, 5, 7]*10
b = [1, 2, 5, 7]
i1 = [2, 3, 0, 1]
i2 = [2, 3, 0, 1, 0, 2, 3]*10
c = np.multiply([2, 1, 2, 2], 10)
# test for numeric arrays
types = []
types.extend(np.typecodes['AllInteger'])
types.extend(np.typecodes['AllFloat'])
types.append('datetime64[D]')
types.append('timedelta64[D]')
for dt in types:
aa = np.array(a, dt)
bb = np.array(b, dt)
check_all(aa, bb, i1, i2, c, dt)
# test for object arrays
dt = 'O'
aa = np.empty(len(a), dt)
aa[:] = a
bb = np.empty(len(b), dt)
bb[:] = b
check_all(aa, bb, i1, i2, c, dt)
# test for structured arrays
dt = [('', 'i'), ('', 'i')]
aa = np.array(list(zip(a, a)), dt)
bb = np.array(list(zip(b, b)), dt)
check_all(aa, bb, i1, i2, c, dt)
# test for ticket #2799
aa = [1. + 0.j, 1 - 1.j, 1]
assert_array_equal(np.unique(aa), [1. - 1.j, 1. + 0.j])
# test for ticket #4785
a = [(1, 2), (1, 2), (2, 3)]
unq = [1, 2, 3]
inv = [0, 1, 0, 1, 1, 2]
a1 = unique(a)
assert_array_equal(a1, unq)
a2, a2_inv = unique(a, return_inverse=True)
assert_array_equal(a2, unq)
assert_array_equal(a2_inv, inv)
# test for chararrays with return_inverse (gh-5099)
a = np.chararray(5)
a[...] = ''
a2, a2_inv = np.unique(a, return_inverse=True)
assert_array_equal(a2_inv, np.zeros(5))
# test for ticket #9137
a = []
a1_idx = np.unique(a, return_index=True)[1]
a2_inv = np.unique(a, return_inverse=True)[1]
a3_idx, a3_inv = np.unique(a, return_index=True, return_inverse=True)[1:]
assert_equal(a1_idx.dtype, np.intp)
assert_equal(a2_inv.dtype, np.intp)
assert_equal(a3_idx.dtype, np.intp)
assert_equal(a3_inv.dtype, np.intp)
def test_unique_axis_errors(self):
assert_raises(TypeError, self._run_axis_tests, object)
assert_raises(TypeError, self._run_axis_tests,
[('a', int), ('b', object)])
assert_raises(ValueError, unique, np.arange(10), axis=2)
assert_raises(ValueError, unique, np.arange(10), axis=-2)
def test_unique_axis_list(self):
msg = "Unique failed on list of lists"
inp = [[0, 1, 0], [0, 1, 0]]
inp_arr = np.asarray(inp)
assert_array_equal(unique(inp, axis=0), unique(inp_arr, axis=0), msg)
assert_array_equal(unique(inp, axis=1), unique(inp_arr, axis=1), msg)
def test_unique_axis(self):
types = []
types.extend(np.typecodes['AllInteger'])
types.extend(np.typecodes['AllFloat'])
types.append('datetime64[D]')
types.append('timedelta64[D]')
types.append([('a', int), ('b', int)])
types.append([('a', int), ('b', float)])
for dtype in types:
self._run_axis_tests(dtype)
msg = 'Non-bitwise-equal booleans test failed'
data = np.arange(10, dtype=np.uint8).reshape(-1, 2).view(bool)
result = np.array([[False, True], [True, True]], dtype=bool)
assert_array_equal(unique(data, axis=0), result, msg)
msg = 'Negative zero equality test failed'
data = np.array([[-0.0, 0.0], [0.0, -0.0], [-0.0, 0.0], [0.0, -0.0]])
result = np.array([[-0.0, 0.0]])
assert_array_equal(unique(data, axis=0), result, msg)
def test_unique_masked(self):
# issue 8664
x = np.array([64, 0, 1, 2, 3, 63, 63, 0, 0, 0, 1, 2, 0, 63, 0], dtype='uint8')
y = np.ma.masked_equal(x, 0)
v = np.unique(y)
v2, i, c = np.unique(y, return_index=True, return_counts=True)
msg = 'Unique returned different results when asked for index'
assert_array_equal(v.data, v2.data, msg)
assert_array_equal(v.mask, v2.mask, msg)
def test_unique_sort_order_with_axis(self):
# These tests fail if sorting along axis is done by treating subarrays
# as unsigned byte strings. See gh-10495.
fmt = "sort order incorrect for integer type '%s'"
for dt in 'bhilq':
a = np.array([[-1],[0]], dt)
b = np.unique(a, axis=0)
assert_array_equal(a, b, fmt % dt)
def _run_axis_tests(self, dtype):
data = np.array([[0, 1, 0, 0],
[1, 0, 0, 0],
[0, 1, 0, 0],
[1, 0, 0, 0]]).astype(dtype)
msg = 'Unique with 1d array and axis=0 failed'
result = np.array([0, 1])
assert_array_equal(unique(data), result.astype(dtype), msg)
msg = 'Unique with 2d array and axis=0 failed'
result = np.array([[0, 1, 0, 0], [1, 0, 0, 0]])
assert_array_equal(unique(data, axis=0), result.astype(dtype), msg)
msg = 'Unique with 2d array and axis=1 failed'
result = np.array([[0, 0, 1], [0, 1, 0], [0, 0, 1], [0, 1, 0]])
assert_array_equal(unique(data, axis=1), result.astype(dtype), msg)
msg = 'Unique with 3d array and axis=2 failed'
data3d = np.dstack([data] * 3)
result = data3d[..., :1]
assert_array_equal(unique(data3d, axis=2), result, msg)
uniq, idx, inv, cnt = unique(data, axis=0, return_index=True,
return_inverse=True, return_counts=True)
msg = "Unique's return_index=True failed with axis=0"
assert_array_equal(data[idx], uniq, msg)
msg = "Unique's return_inverse=True failed with axis=0"
assert_array_equal(uniq[inv], data)
msg = "Unique's return_counts=True failed with axis=0"
assert_array_equal(cnt, np.array([2, 2]), msg)
uniq, idx, inv, cnt = unique(data, axis=1, return_index=True,
return_inverse=True, return_counts=True)
msg = "Unique's return_index=True failed with axis=1"
assert_array_equal(data[:, idx], uniq)
msg = "Unique's return_inverse=True failed with axis=1"
assert_array_equal(uniq[:, inv], data)
msg = "Unique's return_counts=True failed with axis=1"
assert_array_equal(cnt, np.array([2, 1, 1]), msg)
if __name__ == "__main__":
run_module_suite()
| mit |
Jorge-Rodriguez/ansible | lib/ansible/modules/net_tools/haproxy.py | 17 | 16304 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Ravi Bhure <ravibhure@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: haproxy
version_added: "1.9"
short_description: Enable, disable, and set weights for HAProxy backend servers using socket commands.
author: "Ravi Bhure (@ravibhure)"
description:
- Enable, disable, drain and set weights for HAProxy backend servers using socket
commands.
notes:
- Enable, disable and drain commands are restricted and can only be issued on
sockets configured for level 'admin'. For example, you can add the line
'stats socket /var/run/haproxy.sock level admin' to the general section of
haproxy.cfg. See U(http://haproxy.1wt.eu/download/1.5/doc/configuration.txt).
- Depends on netcat (nc) being available; you need to install the appropriate
package for your operating system before this module can be used.
options:
backend:
description:
- Name of the HAProxy backend pool.
default: auto-detected
drain:
description:
- Wait until the server has no active connections or until the timeout
determined by wait_interval and wait_retries is reached. Continue only
after the status changes to 'MAINT'. This overrides the
shutdown_sessions option.
type: bool
version_added: "2.4"
host:
description:
- Name of the backend host to change.
required: true
shutdown_sessions:
description:
- When disabling a server, immediately terminate all the sessions attached
to the specified server. This can be used to terminate long-running
sessions after a server is put into maintenance mode. Overridden by the
drain option.
type: bool
default: 'no'
socket:
description:
- Path to the HAProxy socket file.
default: /var/run/haproxy.sock
state:
description:
- Desired state of the provided backend host.
- Note that C(drain) state was added in version 2.4. It is supported only by HAProxy version 1.5 or later,
if used on versions < 1.5, it will be ignored.
required: true
choices: [ "enabled", "disabled", "drain" ]
fail_on_not_found:
description:
- Fail whenever trying to enable/disable a backend host that does not exist
type: bool
default: 'no'
version_added: "2.2"
wait:
description:
- Wait until the server reports a status of 'UP' when `state=enabled`,
status of 'MAINT' when `state=disabled` or status of 'DRAIN' when `state=drain`
type: bool
default: 'no'
version_added: "2.0"
wait_interval:
description:
- Number of seconds to wait between retries.
default: 5
version_added: "2.0"
wait_retries:
description:
- Number of times to check for status after changing the state.
default: 25
version_added: "2.0"
weight:
description:
- The value passed in argument. If the value ends with the `%` sign, then
the new weight will be relative to the initially configured weight.
Relative weights are only permitted between 0 and 100% and absolute
weights are permitted between 0 and 256.
'''
EXAMPLES = '''
# disable server in 'www' backend pool
- haproxy:
state: disabled
host: '{{ inventory_hostname }}'
backend: www
# disable server without backend pool name (apply to all available backend pool)
- haproxy:
state: disabled
host: '{{ inventory_hostname }}'
# disable server, provide socket file
- haproxy:
state: disabled
host: '{{ inventory_hostname }}'
socket: /var/run/haproxy.sock
backend: www
# disable server, provide socket file, wait until status reports in maintenance
- haproxy:
state: disabled
host: '{{ inventory_hostname }}'
socket: /var/run/haproxy.sock
backend: www
wait: yes
# Place server in drain mode, providing a socket file. Then check the server's
# status every minute to see if it changes to maintenance mode, continuing if it
# does in an hour and failing otherwise.
- haproxy:
state: disabled
host: '{{ inventory_hostname }}'
socket: /var/run/haproxy.sock
backend: www
wait: yes
drain: yes
wait_interval: 1
wait_retries: 60
# disable backend server in 'www' backend pool and drop open sessions to it
- haproxy:
state: disabled
host: '{{ inventory_hostname }}'
backend: www
socket: /var/run/haproxy.sock
shutdown_sessions: true
# disable server without backend pool name (apply to all available backend pool) but fail when the backend host is not found
- haproxy:
state: disabled
host: '{{ inventory_hostname }}'
fail_on_not_found: yes
# enable server in 'www' backend pool
- haproxy:
state: enabled
host: '{{ inventory_hostname }}'
backend: www
# enable server in 'www' backend pool wait until healthy
- haproxy:
state: enabled
host: '{{ inventory_hostname }}'
backend: www
wait: yes
# enable server in 'www' backend pool wait until healthy. Retry 10 times with intervals of 5 seconds to retrieve the health
- haproxy:
state: enabled
host: '{{ inventory_hostname }}'
backend: www
wait: yes
wait_retries: 10
wait_interval: 5
# enable server in 'www' backend pool with change server(s) weight
- haproxy:
state: enabled
host: '{{ inventory_hostname }}'
socket: /var/run/haproxy.sock
weight: 10
backend: www
# set the server in 'www' backend pool to drain mode
- haproxy:
state: drain
host: '{{ inventory_hostname }}'
socket: /var/run/haproxy.sock
backend: www
'''
import csv
import socket
import time
from string import Template
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_text
DEFAULT_SOCKET_LOCATION = "/var/run/haproxy.sock"
RECV_SIZE = 1024
ACTION_CHOICES = ['enabled', 'disabled', 'drain']
WAIT_RETRIES = 25
WAIT_INTERVAL = 5
######################################################################
class TimeoutException(Exception):
pass
class HAProxy(object):
"""
Used for communicating with HAProxy through its local UNIX socket interface.
Perform common tasks in Haproxy related to enable server and
disable server.
The complete set of external commands Haproxy handles is documented
on their website:
http://haproxy.1wt.eu/download/1.5/doc/configuration.txt#Unix Socket commands
"""
def __init__(self, module):
self.module = module
self.state = self.module.params['state']
self.host = self.module.params['host']
self.backend = self.module.params['backend']
self.weight = self.module.params['weight']
self.socket = self.module.params['socket']
self.shutdown_sessions = self.module.params['shutdown_sessions']
self.fail_on_not_found = self.module.params['fail_on_not_found']
self.wait = self.module.params['wait']
self.wait_retries = self.module.params['wait_retries']
self.wait_interval = self.module.params['wait_interval']
self._drain = self.module.params['drain']
self.command_results = {}
def execute(self, cmd, timeout=200, capture_output=True):
"""
Executes a HAProxy command by sending a message to a HAProxy's local
UNIX socket and waiting up to 'timeout' milliseconds for the response.
"""
self.client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.client.connect(self.socket)
self.client.sendall(to_bytes('%s\n' % cmd))
result = b''
buf = b''
buf = self.client.recv(RECV_SIZE)
while buf:
result += buf
buf = self.client.recv(RECV_SIZE)
result = to_text(result, errors='surrogate_or_strict')
if capture_output:
self.capture_command_output(cmd, result.strip())
self.client.close()
return result
def capture_command_output(self, cmd, output):
"""
Capture the output for a command
"""
if 'command' not in self.command_results:
self.command_results['command'] = []
self.command_results['command'].append(cmd)
if 'output' not in self.command_results:
self.command_results['output'] = []
self.command_results['output'].append(output)
def discover_all_backends(self):
"""
Discover all entries with svname = 'BACKEND' and return a list of their corresponding
pxnames
"""
data = self.execute('show stat', 200, False).lstrip('# ')
r = csv.DictReader(data.splitlines())
return tuple(map(lambda d: d['pxname'], filter(lambda d: d['svname'] == 'BACKEND', r)))
def discover_version(self):
"""
Attempt to extract the haproxy version.
Return a tuple containing major and minor version.
"""
data = self.execute('show info', 200, False)
lines = data.splitlines()
line = [x for x in lines if 'Version:' in x]
try:
version_values = line[0].partition(':')[2].strip().split('.', 3)
version = (int(version_values[0]), int(version_values[1]))
except (ValueError, TypeError, IndexError):
version = None
return version
def execute_for_backends(self, cmd, pxname, svname, wait_for_status=None):
"""
Run some command on the specified backends. If no backends are provided they will
be discovered automatically (all backends)
"""
# Discover backends if none are given
if pxname is None:
backends = self.discover_all_backends()
else:
backends = [pxname]
# Run the command for each requested backend
for backend in backends:
# Fail when backends were not found
state = self.get_state_for(backend, svname)
if (self.fail_on_not_found) and state is None:
self.module.fail_json(
msg="The specified backend '%s/%s' was not found!" % (backend, svname))
if state is not None:
self.execute(Template(cmd).substitute(pxname=backend, svname=svname))
if self.wait:
self.wait_until_status(backend, svname, wait_for_status)
def get_state_for(self, pxname, svname):
"""
Find the state of specific services. When pxname is not set, get all backends for a specific host.
Returns a list of dictionaries containing the status and weight for those services.
"""
data = self.execute('show stat', 200, False).lstrip('# ')
r = csv.DictReader(data.splitlines())
state = tuple(
map(
lambda d: {'status': d['status'], 'weight': d['weight'], 'scur': d['scur']},
filter(lambda d: (pxname is None or d['pxname']
== pxname) and d['svname'] == svname, r)
)
)
return state or None
def wait_until_status(self, pxname, svname, status):
"""
Wait for a service to reach the specified status. Try RETRIES times
with INTERVAL seconds of sleep in between. If the service has not reached
the expected status in that time, the module will fail. If the service was
not found, the module will fail.
"""
for i in range(1, self.wait_retries):
state = self.get_state_for(pxname, svname)
# We can assume there will only be 1 element in state because both svname and pxname are always set when we get here
if state[0]['status'] == status:
if not self._drain or (state[0]['scur'] == '0' and state == 'MAINT'):
return True
else:
time.sleep(self.wait_interval)
self.module.fail_json(msg="server %s/%s not status '%s' after %d retries. Aborting." %
(pxname, svname, status, self.wait_retries))
def enabled(self, host, backend, weight):
"""
Enabled action, marks server to UP and checks are re-enabled,
also supports to get current weight for server (default) and
set the weight for haproxy backend server when provides.
"""
cmd = "get weight $pxname/$svname; enable server $pxname/$svname"
if weight:
cmd += "; set weight $pxname/$svname %s" % weight
self.execute_for_backends(cmd, backend, host, 'UP')
def disabled(self, host, backend, shutdown_sessions):
"""
Disabled action, marks server to DOWN for maintenance. In this mode, no more checks will be
performed on the server until it leaves maintenance,
also it shutdown sessions while disabling backend host server.
"""
cmd = "get weight $pxname/$svname; disable server $pxname/$svname"
if shutdown_sessions:
cmd += "; shutdown sessions server $pxname/$svname"
self.execute_for_backends(cmd, backend, host, 'MAINT')
def drain(self, host, backend, status='DRAIN'):
"""
Drain action, sets the server to DRAIN mode.
In this mode mode, the server will not accept any new connections
other than those that are accepted via persistence.
"""
haproxy_version = self.discover_version()
# check if haproxy version suppots DRAIN state (starting with 1.5)
if haproxy_version and (1, 5) <= haproxy_version:
cmd = "set server $pxname/$svname state drain"
self.execute_for_backends(cmd, backend, host, status)
def act(self):
"""
Figure out what you want to do from ansible, and then do it.
"""
# Get the state before the run
state_before = self.get_state_for(self.backend, self.host)
self.command_results['state_before'] = state_before
# toggle enable/disbale server
if self.state == 'enabled':
self.enabled(self.host, self.backend, self.weight)
elif self.state == 'disabled' and self._drain:
self.drain(self.host, self.backend, status='MAINT')
elif self.state == 'disabled':
self.disabled(self.host, self.backend, self.shutdown_sessions)
elif self.state == 'drain':
self.drain(self.host, self.backend)
else:
self.module.fail_json(msg="unknown state specified: '%s'" % self.state)
# Get the state after the run
state_after = self.get_state_for(self.backend, self.host)
self.command_results['state_after'] = state_after
# Report change status
if state_before != state_after:
self.command_results['changed'] = True
self.module.exit_json(**self.command_results)
else:
self.command_results['changed'] = False
self.module.exit_json(**self.command_results)
def main():
# load ansible module object
module = AnsibleModule(
argument_spec=dict(
state=dict(required=True, default=None, choices=ACTION_CHOICES),
host=dict(required=True, default=None),
backend=dict(required=False, default=None),
weight=dict(required=False, default=None),
socket=dict(required=False, default=DEFAULT_SOCKET_LOCATION),
shutdown_sessions=dict(required=False, default=False, type='bool'),
fail_on_not_found=dict(required=False, default=False, type='bool'),
wait=dict(required=False, default=False, type='bool'),
wait_retries=dict(required=False, default=WAIT_RETRIES, type='int'),
wait_interval=dict(required=False, default=WAIT_INTERVAL, type='int'),
drain=dict(default=False, type='bool'),
),
)
if not socket:
module.fail_json(msg="unable to locate haproxy socket")
ansible_haproxy = HAProxy(module)
ansible_haproxy.act()
if __name__ == '__main__':
main()
| gpl-3.0 |
byzvulture/android_kernel_zte_nx503a | tools/perf/util/setup.py | 4998 | 1330 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
kubeflow/pipelines | components/gcp/container/component_sdk/python/kfp_component/google/dataflow/_launch_template.py | 1 | 3920 | # Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import re
import time
from google.cloud import storage
from kfp_component.core import KfpExecutionContext
from ._client import DataflowClient
from ._common_ops import (wait_and_dump_job, get_staging_location,
read_job_id_and_location, upload_job_id_and_location)
def launch_template(project_id, gcs_path, launch_parameters,
location=None, validate_only=None, staging_dir=None,
wait_interval=30,
job_id_output_path='/tmp/kfp/output/dataflow/job_id.txt',
job_object_output_path='/tmp/kfp/output/dataflow/job.json',
):
"""Launchs a dataflow job from template.
Args:
project_id (str): Required. The ID of the Cloud Platform project
that the job belongs to.
gcs_path (str): Required. A Cloud Storage path to the template
from which to create the job. Must be valid Cloud
Storage URL, beginning with 'gs://'.
launch_parameters (dict): Parameters to provide to the template
being launched. Schema defined in
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/LaunchTemplateParameters.
`jobName` will be replaced by generated name.
location (str): The regional endpoint to which to direct the
request.
validate_only (boolean): If true, the request is validated but
not actually executed. Defaults to false.
staging_dir (str): Optional. The GCS directory for keeping staging files.
A random subdirectory will be created under the directory to keep job info
for resuming the job in case of failure.
wait_interval (int): The wait seconds between polling.
Returns:
The completed job.
"""
storage_client = storage.Client()
df_client = DataflowClient()
job_id = None
def cancel():
if job_id:
df_client.cancel_job(
project_id,
job_id,
location
)
with KfpExecutionContext(on_cancel=cancel) as ctx:
staging_location = get_staging_location(staging_dir, ctx.context_id())
job_id, _ = read_job_id_and_location(storage_client, staging_location)
# Continue waiting for the job if it's has been uploaded to staging location.
if job_id:
job = df_client.get_job(project_id, job_id, location)
return wait_and_dump_job(df_client, project_id, location, job,
wait_interval,
job_id_output_path=job_id_output_path,
job_object_output_path=job_object_output_path,
)
if not launch_parameters:
launch_parameters = {}
launch_parameters['jobName'] = 'job-' + ctx.context_id()
response = df_client.launch_template(project_id, gcs_path,
location, validate_only, launch_parameters)
job = response.get('job', None)
if not job:
# Validate only mode
return job
job_id = job.get('id')
upload_job_id_and_location(storage_client, staging_location, job_id, location)
return wait_and_dump_job(df_client, project_id, location, job,
wait_interval,
job_id_output_path=job_id_output_path,
job_object_output_path=job_object_output_path,
)
| apache-2.0 |
ecino/compassion-switzerland | hr_switzerland/__manifest__.py | 3 | 1954 | # -*- coding: utf-8 -*-
##############################################################################
#
# ______ Releasing children from poverty _
# / ____/___ ____ ___ ____ ____ ___________(_)___ ____
# / / / __ \/ __ `__ \/ __ \/ __ `/ ___/ ___/ / __ \/ __ \
# / /___/ /_/ / / / / / / /_/ / /_/ (__ |__ ) / /_/ / / / /
# \____/\____/_/ /_/ /_/ .___/\__,_/____/____/_/\____/_/ /_/
# /_/
# in Jesus' name
#
# Copyright (C) 2017 Compassion CH (http://www.compassion.ch)
# @author: Emanuel Cino <ecino@compassion.ch>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# pylint: disable=C8101
{
'name': 'Compassion CH - HR Management',
'version': '10.0.1.2.0',
'license': 'AGPL-3',
'category': 'HR',
'author': 'Emanuel Cino',
'website': 'http://www.compassion.ch',
'data': [
'views/hr_expense_custom.xml',
'views/res_users_view.xml',
'views/hr_payslip_view.xml',
'data/hr_config.xml'
],
'depends': [
'hr_expense', 'hr_attendance_management', 'asterisk_click2dial',
'web_notify', 'hr_payroll'],
'demo': [],
'installable': True,
'auto_install': False,
}
| agpl-3.0 |
s3nk4s/flaskTutorials | FlaskApp/FlaskApp/venv/local/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/utf8prober.py | 2919 | 2652 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .mbcssm import UTF8SMModel
ONE_CHAR_PROB = 0.5
class UTF8Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(UTF8SMModel)
self.reset()
def reset(self):
CharSetProber.reset(self)
self._mCodingSM.reset()
self._mNumOfMBChar = 0
def get_charset_name(self):
return "utf-8"
def feed(self, aBuf):
for c in aBuf:
codingState = self._mCodingSM.next_state(c)
if codingState == constants.eError:
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
if self._mCodingSM.get_current_charlen() >= 2:
self._mNumOfMBChar += 1
if self.get_state() == constants.eDetecting:
if self.get_confidence() > constants.SHORTCUT_THRESHOLD:
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
unlike = 0.99
if self._mNumOfMBChar < 6:
for i in range(0, self._mNumOfMBChar):
unlike = unlike * ONE_CHAR_PROB
return 1.0 - unlike
else:
return unlike
| mit |
newrocknj/horizon | openstack_dashboard/dashboards/project/data_processing/cluster_templates/forms.py | 43 | 2206 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from openstack_dashboard.api import sahara as saharaclient
from openstack_dashboard.dashboards.project.data_processing. \
utils import workflow_helpers
LOG = logging.getLogger(__name__)
class UploadFileForm(forms.SelfHandlingForm,
workflow_helpers.PluginAndVersionMixin):
template_name = forms.CharField(max_length=80,
label=_("Cluster Template Name"))
def __init__(self, request, *args, **kwargs):
super(UploadFileForm, self).__init__(request, *args, **kwargs)
sahara = saharaclient.client(request)
self._generate_plugin_version_fields(sahara)
self.fields['template_file'] = forms.FileField(label=_("Template"))
def handle(self, request, data):
try:
# we can set a limit on file size, but should we?
filecontent = self.files['template_file'].read()
plugin_name = data['plugin_name']
hadoop_version = data.get(plugin_name + "_version")
saharaclient.plugin_convert_to_template(request,
plugin_name,
hadoop_version,
data['template_name'],
filecontent)
return True
except Exception:
exceptions.handle(request,
_("Unable to upload cluster template file"))
return False
| apache-2.0 |
mujiansu/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/ftplib.py | 50 | 28528 | """An FTP client class and some helper functions.
Based on RFC 959: File Transfer Protocol (FTP), by J. Postel and J. Reynolds
Example:
>>> from ftplib import FTP
>>> ftp = FTP('ftp.python.org') # connect to host, default port
>>> ftp.login() # default, i.e.: user anonymous, passwd anonymous@
'230 Guest login ok, access restrictions apply.'
>>> ftp.retrlines('LIST') # list directory contents
total 9
drwxr-xr-x 8 root wheel 1024 Jan 3 1994 .
drwxr-xr-x 8 root wheel 1024 Jan 3 1994 ..
drwxr-xr-x 2 root wheel 1024 Jan 3 1994 bin
drwxr-xr-x 2 root wheel 1024 Jan 3 1994 etc
d-wxrwxr-x 2 ftp wheel 1024 Sep 5 13:43 incoming
drwxr-xr-x 2 root wheel 1024 Nov 17 1993 lib
drwxr-xr-x 6 1094 wheel 1024 Sep 13 19:07 pub
drwxr-xr-x 3 root wheel 1024 Jan 3 1994 usr
-rw-r--r-- 1 root root 312 Aug 1 1994 welcome.msg
'226 Transfer complete.'
>>> ftp.quit()
'221 Goodbye.'
>>>
A nice test that reveals some of the network dialogue would be:
python ftplib.py -d localhost -l -p -l
"""
#
# Changes and improvements suggested by Steve Majewski.
# Modified by Jack to work on the mac.
# Modified by Siebren to support docstrings and PASV.
# Modified by Phil Schwartz to add storbinary and storlines callbacks.
#
import os
import sys
# Import SOCKS module if it exists, else standard socket module socket
try:
import SOCKS; socket = SOCKS; del SOCKS # import SOCKS as socket
from socket import getfqdn; socket.getfqdn = getfqdn; del getfqdn
except ImportError:
import socket
from socket import _GLOBAL_DEFAULT_TIMEOUT
__all__ = ["FTP","Netrc"]
# Magic number from <socket.h>
MSG_OOB = 0x1 # Process data out of band
# The standard FTP server control port
FTP_PORT = 21
# Exception raised when an error or invalid response is received
class Error(Exception): pass
class error_reply(Error): pass # unexpected [123]xx reply
class error_temp(Error): pass # 4xx errors
class error_perm(Error): pass # 5xx errors
class error_proto(Error): pass # response does not begin with [1-5]
# All exceptions (hopefully) that may be raised here and that aren't
# (always) programming errors on our side
all_errors = (Error, IOError, EOFError)
# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
CRLF = '\r\n'
# The class itself
class FTP:
'''An FTP client class.
To create a connection, call the class using these arguments:
host, user, passwd, acct, timeout
The first four arguments are all strings, and have default value ''.
timeout must be numeric and defaults to None if not passed,
meaning that no timeout will be set on any ftp socket(s)
If a timeout is passed, then this is now the default timeout for all ftp
socket operations for this instance.
Then use self.connect() with optional host and port argument.
To download a file, use ftp.retrlines('RETR ' + filename),
or ftp.retrbinary() with slightly different arguments.
To upload a file, use ftp.storlines() or ftp.storbinary(),
which have an open file as argument (see their definitions
below for details).
The download/upload functions first issue appropriate TYPE
and PORT or PASV commands.
'''
debugging = 0
host = ''
port = FTP_PORT
sock = None
file = None
welcome = None
passiveserver = 1
# Initialization method (called by class instantiation).
# Initialize host to localhost, port to standard ftp port
# Optional arguments are host (for connect()),
# and user, passwd, acct (for login())
def __init__(self, host='', user='', passwd='', acct='',
timeout=_GLOBAL_DEFAULT_TIMEOUT):
self.timeout = timeout
if host:
self.connect(host)
if user:
self.login(user, passwd, acct)
def connect(self, host='', port=0, timeout=-999):
'''Connect to host. Arguments are:
- host: hostname to connect to (string, default previous host)
- port: port to connect to (integer, default previous port)
'''
if host != '':
self.host = host
if port > 0:
self.port = port
if timeout != -999:
self.timeout = timeout
self.sock = socket.create_connection((self.host, self.port), self.timeout)
self.af = self.sock.family
self.file = self.sock.makefile('rb')
self.welcome = self.getresp()
return self.welcome
def getwelcome(self):
'''Get the welcome message from the server.
(this is read and squirreled away by connect())'''
if self.debugging:
print '*welcome*', self.sanitize(self.welcome)
return self.welcome
def set_debuglevel(self, level):
'''Set the debugging level.
The required argument level means:
0: no debugging output (default)
1: print commands and responses but not body text etc.
2: also print raw lines read and sent before stripping CR/LF'''
self.debugging = level
debug = set_debuglevel
def set_pasv(self, val):
'''Use passive or active mode for data transfers.
With a false argument, use the normal PORT mode,
With a true argument, use the PASV command.'''
self.passiveserver = val
# Internal: "sanitize" a string for printing
def sanitize(self, s):
if s[:5] == 'pass ' or s[:5] == 'PASS ':
i = len(s)
while i > 5 and s[i-1] in '\r\n':
i = i-1
s = s[:5] + '*'*(i-5) + s[i:]
return repr(s)
# Internal: send one line to the server, appending CRLF
def putline(self, line):
line = line + CRLF
if self.debugging > 1: print '*put*', self.sanitize(line)
self.sock.sendall(line)
# Internal: send one command to the server (through putline())
def putcmd(self, line):
if self.debugging: print '*cmd*', self.sanitize(line)
self.putline(line)
# Internal: return one line from the server, stripping CRLF.
# Raise EOFError if the connection is closed
def getline(self):
line = self.file.readline()
if self.debugging > 1:
print '*get*', self.sanitize(line)
if not line: raise EOFError
if line[-2:] == CRLF: line = line[:-2]
elif line[-1:] in CRLF: line = line[:-1]
return line
# Internal: get a response from the server, which may possibly
# consist of multiple lines. Return a single string with no
# trailing CRLF. If the response consists of multiple lines,
# these are separated by '\n' characters in the string
def getmultiline(self):
line = self.getline()
if line[3:4] == '-':
code = line[:3]
while 1:
nextline = self.getline()
line = line + ('\n' + nextline)
if nextline[:3] == code and \
nextline[3:4] != '-':
break
return line
# Internal: get a response from the server.
# Raise various errors if the response indicates an error
def getresp(self):
resp = self.getmultiline()
if self.debugging: print '*resp*', self.sanitize(resp)
self.lastresp = resp[:3]
c = resp[:1]
if c in ('1', '2', '3'):
return resp
if c == '4':
raise error_temp, resp
if c == '5':
raise error_perm, resp
raise error_proto, resp
def voidresp(self):
"""Expect a response beginning with '2'."""
resp = self.getresp()
if resp[:1] != '2':
raise error_reply, resp
return resp
def abort(self):
'''Abort a file transfer. Uses out-of-band data.
This does not follow the procedure from the RFC to send Telnet
IP and Synch; that doesn't seem to work with the servers I've
tried. Instead, just send the ABOR command as OOB data.'''
line = 'ABOR' + CRLF
if self.debugging > 1: print '*put urgent*', self.sanitize(line)
self.sock.sendall(line, MSG_OOB)
resp = self.getmultiline()
if resp[:3] not in ('426', '226'):
raise error_proto, resp
def sendcmd(self, cmd):
'''Send a command and return the response.'''
self.putcmd(cmd)
return self.getresp()
def voidcmd(self, cmd):
"""Send a command and expect a response beginning with '2'."""
self.putcmd(cmd)
return self.voidresp()
def sendport(self, host, port):
'''Send a PORT command with the current host and the given
port number.
'''
hbytes = host.split('.')
pbytes = [repr(port//256), repr(port%256)]
bytes = hbytes + pbytes
cmd = 'PORT ' + ','.join(bytes)
return self.voidcmd(cmd)
def sendeprt(self, host, port):
'''Send a EPRT command with the current host and the given port number.'''
af = 0
if self.af == socket.AF_INET:
af = 1
if self.af == socket.AF_INET6:
af = 2
if af == 0:
raise error_proto, 'unsupported address family'
fields = ['', repr(af), host, repr(port), '']
cmd = 'EPRT ' + '|'.join(fields)
return self.voidcmd(cmd)
def makeport(self):
'''Create a new socket and send a PORT command for it.'''
msg = "getaddrinfo returns an empty list"
sock = None
for res in socket.getaddrinfo(None, 0, self.af, socket.SOCK_STREAM, 0, socket.AI_PASSIVE):
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
sock.bind(sa)
except socket.error, msg:
if sock:
sock.close()
sock = None
continue
break
if not sock:
raise socket.error, msg
sock.listen(1)
port = sock.getsockname()[1] # Get proper port
host = self.sock.getsockname()[0] # Get proper host
if self.af == socket.AF_INET:
resp = self.sendport(host, port)
else:
resp = self.sendeprt(host, port)
return sock
def makepasv(self):
if self.af == socket.AF_INET:
host, port = parse227(self.sendcmd('PASV'))
else:
host, port = parse229(self.sendcmd('EPSV'), self.sock.getpeername())
return host, port
def ntransfercmd(self, cmd, rest=None):
"""Initiate a transfer over the data connection.
If the transfer is active, send a port command and the
transfer command, and accept the connection. If the server is
passive, send a pasv command, connect to it, and start the
transfer command. Either way, return the socket for the
connection and the expected size of the transfer. The
expected size may be None if it could not be determined.
Optional `rest' argument can be a string that is sent as the
argument to a REST command. This is essentially a server
marker used to tell the server to skip over any data up to the
given marker.
"""
size = None
if self.passiveserver:
host, port = self.makepasv()
conn = socket.create_connection((host, port), self.timeout)
if rest is not None:
self.sendcmd("REST %s" % rest)
resp = self.sendcmd(cmd)
# Some servers apparently send a 200 reply to
# a LIST or STOR command, before the 150 reply
# (and way before the 226 reply). This seems to
# be in violation of the protocol (which only allows
# 1xx or error messages for LIST), so we just discard
# this response.
if resp[0] == '2':
resp = self.getresp()
if resp[0] != '1':
raise error_reply, resp
else:
sock = self.makeport()
if rest is not None:
self.sendcmd("REST %s" % rest)
resp = self.sendcmd(cmd)
# See above.
if resp[0] == '2':
resp = self.getresp()
if resp[0] != '1':
raise error_reply, resp
conn, sockaddr = sock.accept()
if resp[:3] == '150':
# this is conditional in case we received a 125
size = parse150(resp)
return conn, size
def transfercmd(self, cmd, rest=None):
"""Like ntransfercmd() but returns only the socket."""
return self.ntransfercmd(cmd, rest)[0]
def login(self, user = '', passwd = '', acct = ''):
'''Login, default anonymous.'''
if not user: user = 'anonymous'
if not passwd: passwd = ''
if not acct: acct = ''
if user == 'anonymous' and passwd in ('', '-'):
# If there is no anonymous ftp password specified
# then we'll just use anonymous@
# We don't send any other thing because:
# - We want to remain anonymous
# - We want to stop SPAM
# - We don't want to let ftp sites to discriminate by the user,
# host or country.
passwd = passwd + 'anonymous@'
resp = self.sendcmd('USER ' + user)
if resp[0] == '3': resp = self.sendcmd('PASS ' + passwd)
if resp[0] == '3': resp = self.sendcmd('ACCT ' + acct)
if resp[0] != '2':
raise error_reply, resp
return resp
def retrbinary(self, cmd, callback, blocksize=8192, rest=None):
"""Retrieve data in binary mode. A new port is created for you.
Args:
cmd: A RETR command.
callback: A single parameter callable to be called on each
block of data read.
blocksize: The maximum number of bytes to read from the
socket at one time. [default: 8192]
rest: Passed to transfercmd(). [default: None]
Returns:
The response code.
"""
self.voidcmd('TYPE I')
conn = self.transfercmd(cmd, rest)
while 1:
data = conn.recv(blocksize)
if not data:
break
callback(data)
conn.close()
return self.voidresp()
def retrlines(self, cmd, callback = None):
"""Retrieve data in line mode. A new port is created for you.
Args:
cmd: A RETR, LIST, NLST, or MLSD command.
callback: An optional single parameter callable that is called
for each line with the trailing CRLF stripped.
[default: print_line()]
Returns:
The response code.
"""
if callback is None: callback = print_line
resp = self.sendcmd('TYPE A')
conn = self.transfercmd(cmd)
fp = conn.makefile('rb')
while 1:
line = fp.readline()
if self.debugging > 2: print '*retr*', repr(line)
if not line:
break
if line[-2:] == CRLF:
line = line[:-2]
elif line[-1:] == '\n':
line = line[:-1]
callback(line)
fp.close()
conn.close()
return self.voidresp()
def storbinary(self, cmd, fp, blocksize=8192, callback=None):
"""Store a file in binary mode. A new port is created for you.
Args:
cmd: A STOR command.
fp: A file-like object with a read(num_bytes) method.
blocksize: The maximum data size to read from fp and send over
the connection at once. [default: 8192]
callback: An optional single parameter callable that is called on
on each block of data after it is sent. [default: None]
Returns:
The response code.
"""
self.voidcmd('TYPE I')
conn = self.transfercmd(cmd)
while 1:
buf = fp.read(blocksize)
if not buf: break
conn.sendall(buf)
if callback: callback(buf)
conn.close()
return self.voidresp()
def storlines(self, cmd, fp, callback=None):
"""Store a file in line mode. A new port is created for you.
Args:
cmd: A STOR command.
fp: A file-like object with a readline() method.
callback: An optional single parameter callable that is called on
on each line after it is sent. [default: None]
Returns:
The response code.
"""
self.voidcmd('TYPE A')
conn = self.transfercmd(cmd)
while 1:
buf = fp.readline()
if not buf: break
if buf[-2:] != CRLF:
if buf[-1] in CRLF: buf = buf[:-1]
buf = buf + CRLF
conn.sendall(buf)
if callback: callback(buf)
conn.close()
return self.voidresp()
def acct(self, password):
'''Send new account name.'''
cmd = 'ACCT ' + password
return self.voidcmd(cmd)
def nlst(self, *args):
'''Return a list of files in a given directory (default the current).'''
cmd = 'NLST'
for arg in args:
cmd = cmd + (' ' + arg)
files = []
self.retrlines(cmd, files.append)
return files
def dir(self, *args):
'''List a directory in long form.
By default list current directory to stdout.
Optional last argument is callback function; all
non-empty arguments before it are concatenated to the
LIST command. (This *should* only be used for a pathname.)'''
cmd = 'LIST'
func = None
if args[-1:] and type(args[-1]) != type(''):
args, func = args[:-1], args[-1]
for arg in args:
if arg:
cmd = cmd + (' ' + arg)
self.retrlines(cmd, func)
def rename(self, fromname, toname):
'''Rename a file.'''
resp = self.sendcmd('RNFR ' + fromname)
if resp[0] != '3':
raise error_reply, resp
return self.voidcmd('RNTO ' + toname)
def delete(self, filename):
'''Delete a file.'''
resp = self.sendcmd('DELE ' + filename)
if resp[:3] in ('250', '200'):
return resp
else:
raise error_reply, resp
def cwd(self, dirname):
'''Change to a directory.'''
if dirname == '..':
try:
return self.voidcmd('CDUP')
except error_perm, msg:
if msg.args[0][:3] != '500':
raise
elif dirname == '':
dirname = '.' # does nothing, but could return error
cmd = 'CWD ' + dirname
return self.voidcmd(cmd)
def size(self, filename):
'''Retrieve the size of a file.'''
# The SIZE command is defined in RFC-3659
resp = self.sendcmd('SIZE ' + filename)
if resp[:3] == '213':
s = resp[3:].strip()
try:
return int(s)
except (OverflowError, ValueError):
return long(s)
def mkd(self, dirname):
'''Make a directory, return its full pathname.'''
resp = self.sendcmd('MKD ' + dirname)
return parse257(resp)
def rmd(self, dirname):
'''Remove a directory.'''
return self.voidcmd('RMD ' + dirname)
def pwd(self):
'''Return current working directory.'''
resp = self.sendcmd('PWD')
return parse257(resp)
def quit(self):
'''Quit, and close the connection.'''
resp = self.voidcmd('QUIT')
self.close()
return resp
def close(self):
'''Close the connection without assuming anything about it.'''
if self.file:
self.file.close()
self.sock.close()
self.file = self.sock = None
_150_re = None
def parse150(resp):
'''Parse the '150' response for a RETR request.
Returns the expected transfer size or None; size is not guaranteed to
be present in the 150 message.
'''
if resp[:3] != '150':
raise error_reply, resp
global _150_re
if _150_re is None:
import re
_150_re = re.compile("150 .* \((\d+) bytes\)", re.IGNORECASE)
m = _150_re.match(resp)
if not m:
return None
s = m.group(1)
try:
return int(s)
except (OverflowError, ValueError):
return long(s)
_227_re = None
def parse227(resp):
'''Parse the '227' response for a PASV request.
Raises error_proto if it does not contain '(h1,h2,h3,h4,p1,p2)'
Return ('host.addr.as.numbers', port#) tuple.'''
if resp[:3] != '227':
raise error_reply, resp
global _227_re
if _227_re is None:
import re
_227_re = re.compile(r'(\d+),(\d+),(\d+),(\d+),(\d+),(\d+)')
m = _227_re.search(resp)
if not m:
raise error_proto, resp
numbers = m.groups()
host = '.'.join(numbers[:4])
port = (int(numbers[4]) << 8) + int(numbers[5])
return host, port
def parse229(resp, peer):
'''Parse the '229' response for a EPSV request.
Raises error_proto if it does not contain '(|||port|)'
Return ('host.addr.as.numbers', port#) tuple.'''
if resp[:3] != '229':
raise error_reply, resp
left = resp.find('(')
if left < 0: raise error_proto, resp
right = resp.find(')', left + 1)
if right < 0:
raise error_proto, resp # should contain '(|||port|)'
if resp[left + 1] != resp[right - 1]:
raise error_proto, resp
parts = resp[left + 1:right].split(resp[left+1])
if len(parts) != 5:
raise error_proto, resp
host = peer[0]
port = int(parts[3])
return host, port
def parse257(resp):
'''Parse the '257' response for a MKD or PWD request.
This is a response to a MKD or PWD request: a directory name.
Returns the directoryname in the 257 reply.'''
if resp[:3] != '257':
raise error_reply, resp
if resp[3:5] != ' "':
return '' # Not compliant to RFC 959, but UNIX ftpd does this
dirname = ''
i = 5
n = len(resp)
while i < n:
c = resp[i]
i = i+1
if c == '"':
if i >= n or resp[i] != '"':
break
i = i+1
dirname = dirname + c
return dirname
def print_line(line):
'''Default retrlines callback to print a line.'''
print line
def ftpcp(source, sourcename, target, targetname = '', type = 'I'):
'''Copy file from one FTP-instance to another.'''
if not targetname: targetname = sourcename
type = 'TYPE ' + type
source.voidcmd(type)
target.voidcmd(type)
sourcehost, sourceport = parse227(source.sendcmd('PASV'))
target.sendport(sourcehost, sourceport)
# RFC 959: the user must "listen" [...] BEFORE sending the
# transfer request.
# So: STOR before RETR, because here the target is a "user".
treply = target.sendcmd('STOR ' + targetname)
if treply[:3] not in ('125', '150'): raise error_proto # RFC 959
sreply = source.sendcmd('RETR ' + sourcename)
if sreply[:3] not in ('125', '150'): raise error_proto # RFC 959
source.voidresp()
target.voidresp()
class Netrc:
"""Class to parse & provide access to 'netrc' format files.
See the netrc(4) man page for information on the file format.
WARNING: This class is obsolete -- use module netrc instead.
"""
__defuser = None
__defpasswd = None
__defacct = None
def __init__(self, filename=None):
if filename is None:
if "HOME" in os.environ:
filename = os.path.join(os.environ["HOME"],
".netrc")
else:
raise IOError, \
"specify file to load or set $HOME"
self.__hosts = {}
self.__macros = {}
fp = open(filename, "r")
in_macro = 0
while 1:
line = fp.readline()
if not line: break
if in_macro and line.strip():
macro_lines.append(line)
continue
elif in_macro:
self.__macros[macro_name] = tuple(macro_lines)
in_macro = 0
words = line.split()
host = user = passwd = acct = None
default = 0
i = 0
while i < len(words):
w1 = words[i]
if i+1 < len(words):
w2 = words[i + 1]
else:
w2 = None
if w1 == 'default':
default = 1
elif w1 == 'machine' and w2:
host = w2.lower()
i = i + 1
elif w1 == 'login' and w2:
user = w2
i = i + 1
elif w1 == 'password' and w2:
passwd = w2
i = i + 1
elif w1 == 'account' and w2:
acct = w2
i = i + 1
elif w1 == 'macdef' and w2:
macro_name = w2
macro_lines = []
in_macro = 1
break
i = i + 1
if default:
self.__defuser = user or self.__defuser
self.__defpasswd = passwd or self.__defpasswd
self.__defacct = acct or self.__defacct
if host:
if host in self.__hosts:
ouser, opasswd, oacct = \
self.__hosts[host]
user = user or ouser
passwd = passwd or opasswd
acct = acct or oacct
self.__hosts[host] = user, passwd, acct
fp.close()
def get_hosts(self):
"""Return a list of hosts mentioned in the .netrc file."""
return self.__hosts.keys()
def get_account(self, host):
"""Returns login information for the named host.
The return value is a triple containing userid,
password, and the accounting field.
"""
host = host.lower()
user = passwd = acct = None
if host in self.__hosts:
user, passwd, acct = self.__hosts[host]
user = user or self.__defuser
passwd = passwd or self.__defpasswd
acct = acct or self.__defacct
return user, passwd, acct
def get_macros(self):
"""Return a list of all defined macro names."""
return self.__macros.keys()
def get_macro(self, macro):
"""Return a sequence of lines which define a named macro."""
return self.__macros[macro]
def test():
'''Test program.
Usage: ftp [-d] [-r[file]] host [-l[dir]] [-d[dir]] [-p] [file] ...
-d dir
-l list
-p password
'''
if len(sys.argv) < 2:
print test.__doc__
sys.exit(0)
debugging = 0
rcfile = None
while sys.argv[1] == '-d':
debugging = debugging+1
del sys.argv[1]
if sys.argv[1][:2] == '-r':
# get name of alternate ~/.netrc file:
rcfile = sys.argv[1][2:]
del sys.argv[1]
host = sys.argv[1]
ftp = FTP(host)
ftp.set_debuglevel(debugging)
userid = passwd = acct = ''
try:
netrc = Netrc(rcfile)
except IOError:
if rcfile is not None:
sys.stderr.write("Could not open account file"
" -- using anonymous login.")
else:
try:
userid, passwd, acct = netrc.get_account(host)
except KeyError:
# no account for host
sys.stderr.write(
"No account -- using anonymous login.")
ftp.login(userid, passwd, acct)
for file in sys.argv[2:]:
if file[:2] == '-l':
ftp.dir(file[2:])
elif file[:2] == '-d':
cmd = 'CWD'
if file[2:]: cmd = cmd + ' ' + file[2:]
resp = ftp.sendcmd(cmd)
elif file == '-p':
ftp.set_pasv(not ftp.passiveserver)
else:
ftp.retrbinary('RETR ' + file, \
sys.stdout.write, 1024)
ftp.quit()
if __name__ == '__main__':
test()
| apache-2.0 |
airodactyl/qutebrowser | qutebrowser/browser/webengine/webengineinspector.py | 2 | 2192 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2018 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Customized QWebInspector for QtWebEngine."""
import os
from PyQt5.QtCore import QUrl
from PyQt5.QtWebEngineWidgets import QWebEngineView, QWebEngineSettings
from qutebrowser.browser import inspector
class WebEngineInspector(inspector.AbstractWebInspector):
"""A web inspector for QtWebEngine."""
def __init__(self, parent=None):
super().__init__(parent)
self.port = None
view = QWebEngineView()
settings = view.settings()
settings.setAttribute(QWebEngineSettings.JavascriptEnabled, True)
self._set_widget(view)
def _inspect_old(self, page):
"""Set up the inspector for Qt < 5.11."""
try:
port = int(os.environ['QTWEBENGINE_REMOTE_DEBUGGING'])
except KeyError:
raise inspector.WebInspectorError(
"QtWebEngine inspector is not enabled. See "
"'qutebrowser --help' for details.")
url = QUrl('http://localhost:{}/'.format(port))
if page is None:
self._widget.load(QUrl('about:blank'))
else:
self._widget.load(url)
def _inspect_new(self, page):
"""Set up the inspector for Qt >= 5.11."""
self._widget.page().setInspectedPage(page)
def inspect(self, page):
try:
self._inspect_new(page)
except AttributeError:
self._inspect_old(page)
| gpl-3.0 |
wimmuskee/mangrove | test/test_interface.py | 1 | 1296 | from unittest import TestCase
from mangrove_libs import common
from mangrove_libs.interface import Interface
from storage.mysql import Database
import re
class InterfaceTestCase(TestCase):
@classmethod
def setUpClass(self):
config = common.getConfig("mangrove-crawler-config.json.test", "default_collection")
self.DB = Database(config["db_host"],config["db_user"],config["db_passwd"],config["db_name"])
self.DB.initDB()
self.interface = Interface(config)
@classmethod
def tearDownClass(self):
self.interface.FS.cleanupFS()
del self.interface
self.DB.cleanupDB()
def test_collection_id(self):
self.assertEqual(self.interface.DB.collection_id, 1)
def test_collection_updated(self):
self.assertEqual(self.interface.DB.collection_updated, 0)
def test_collection_pushed(self):
self.assertEqual(self.interface.DB.collection_pushed, 0)
def test_startts(self):
self.assertGreater(self.interface.startts,1490000000)
def test_requests_proxy(self):
self.interface.handleRequestsProxy()
self.assertEqual(self.interface.httpProxy["http"], "localhost:3128")
def test_new_identifier_uuid(self):
identifier = self.interface.getNewIdentifier()
self.assertTrue(re.match(r'^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$', identifier))
| gpl-3.0 |
adammaikai/OmicsPipe2.0 | build/lib.linux-x86_64-2.7/omics_pipe/modules/GATK_BQSR.py | 3 | 1560 | #!/usr/bin/env python
from omics_pipe.parameters.default_parameters import default_parameters
from omics_pipe.utils import *
p = Bunch(default_parameters)
def GATK_BQSR(sample, extension, GATK_BQSR_flag):
'''Recalibrate base quality scores.
input:
_WES_realigned_sorted.bam
output:
_gatk_recal.bam
citation:
link:
parameters from parameters file:
TEMP_DIR:
GENOME:
GATK_VERSION:
R_VERSION:
CAPTURE_KIT_BED:
ALIGNMENT_DIR:
DBSNP:
MILLS:
G1000:
'''
sample = sample + extension
spawn_job(jobname = 'GATK_BQSR', SAMPLE = sample, LOG_PATH = p.OMICSPIPE["LOG_PATH"], RESULTS_EMAIL = p.OMICSPIPE["EMAIL"], SCHEDULER = p.OMICSPIPE["SCHEDULER"], walltime = p.BQSR["WALLTIME"], queue = p.OMICSPIPE["QUEUE"], nodes = p.BQSR["NODES"], ppn = p.BQSR["CPU"], memory = p.BQSR["MEMORY"], script = "/GATK_BQSR.sh", args_list = [sample, p.OMICSPIPE["TEMP_DIR"], p.SAMTOOLS["GENOME"], p.BQSR["VERSION"], p.VARSCAN["R_VERSION"], p.CAPTURE_KIT_BED, p.BQSR["ALIGNMENT_DIR"], p.BQSR["DBSNP"], p.BQSR["MILLS"], p.BQSR["G1000"]])
job_status(jobname = 'GATK_BQSR', resultspath = p.BQSR["ALIGNMENT_DIR"] + "/" + sample, SAMPLE = sample, outputfilename = sample + "_gatk_recal.bam", FLAG_PATH = p.OMICSPIPE["FLAG_PATH"])
return
if __name__ == '__main__':
GATK_BQSR(sample, extension, GATK_BQSR_flag)
sys.exit(0)
| mit |
tdtrask/ansible | lib/ansible/modules/clustering/znode.py | 46 | 7699 | #!/usr/bin/python
# Copyright 2015 WP Engine, Inc. All rights reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: znode
version_added: "2.0"
short_description: Create, delete, retrieve, and update znodes using ZooKeeper
description:
- Create, delete, retrieve, and update znodes using ZooKeeper.
options:
hosts:
description:
- A list of ZooKeeper servers (format '[server]:[port]').
required: true
name:
description:
- The path of the znode.
required: true
value:
description:
- The value assigned to the znode.
default: None
required: false
op:
description:
- An operation to perform. Mutually exclusive with state.
default: None
required: false
state:
description:
- The state to enforce. Mutually exclusive with op.
default: None
required: false
timeout:
description:
- The amount of time to wait for a node to appear.
default: 300
required: false
recursive:
description:
- Recursively delete node and all its children.
default: False
required: false
version_added: "2.1"
requirements:
- kazoo >= 2.1
- python >= 2.6
author: "Trey Perry (@treyperry)"
"""
EXAMPLES = """
# Creating or updating a znode with a given value
- znode:
hosts: 'localhost:2181'
name: /mypath
value: myvalue
state: present
# Getting the value and stat structure for a znode
- znode:
hosts: 'localhost:2181'
name: /mypath
op: get
# Listing a particular znode's children
- znode:
hosts: 'localhost:2181'
name: /zookeeper
op: list
# Waiting 20 seconds for a znode to appear at path /mypath
- znode:
hosts: 'localhost:2181'
name: /mypath
op: wait
timeout: 20
# Deleting a znode at path /mypath
- znode:
hosts: 'localhost:2181'
name: /mypath
state: absent
# Creating or updating a znode with a given value on a remote Zookeeper
- znode:
hosts: 'my-zookeeper-node:2181'
name: /mypath
value: myvalue
state: present
delegate_to: 127.0.0.1
"""
import time
try:
from kazoo.client import KazooClient
from kazoo.handlers.threading import KazooTimeoutError
KAZOO_INSTALLED = True
except ImportError:
KAZOO_INSTALLED = False
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
hosts=dict(required=True, type='str'),
name=dict(required=True, type='str'),
value=dict(required=False, default=None, type='str'),
op=dict(required=False, default=None, choices=['get', 'wait', 'list']),
state=dict(choices=['present', 'absent']),
timeout=dict(required=False, default=300, type='int'),
recursive=dict(required=False, default=False, type='bool')
),
supports_check_mode=False
)
if not KAZOO_INSTALLED:
module.fail_json(msg='kazoo >= 2.1 is required to use this module. Use pip to install it.')
check = check_params(module.params)
if not check['success']:
module.fail_json(msg=check['msg'])
zoo = KazooCommandProxy(module)
try:
zoo.start()
except KazooTimeoutError:
module.fail_json(msg='The connection to the ZooKeeper ensemble timed out.')
command_dict = {
'op': {
'get': zoo.get,
'list': zoo.list,
'wait': zoo.wait
},
'state': {
'present': zoo.present,
'absent': zoo.absent
}
}
command_type = 'op' if 'op' in module.params and module.params['op'] is not None else 'state'
method = module.params[command_type]
result, result_dict = command_dict[command_type][method]()
zoo.shutdown()
if result:
module.exit_json(**result_dict)
else:
module.fail_json(**result_dict)
def check_params(params):
if not params['state'] and not params['op']:
return {'success': False, 'msg': 'Please define an operation (op) or a state.'}
if params['state'] and params['op']:
return {'success': False, 'msg': 'Please choose an operation (op) or a state, but not both.'}
return {'success': True}
class KazooCommandProxy():
def __init__(self, module):
self.module = module
self.zk = KazooClient(module.params['hosts'])
def absent(self):
return self._absent(self.module.params['name'])
def exists(self, znode):
return self.zk.exists(znode)
def list(self):
children = self.zk.get_children(self.module.params['name'])
return True, {'count': len(children), 'items': children, 'msg': 'Retrieved znodes in path.',
'znode': self.module.params['name']}
def present(self):
return self._present(self.module.params['name'], self.module.params['value'])
def get(self):
return self._get(self.module.params['name'])
def shutdown(self):
self.zk.stop()
self.zk.close()
def start(self):
self.zk.start()
def wait(self):
return self._wait(self.module.params['name'], self.module.params['timeout'])
def _absent(self, znode):
if self.exists(znode):
self.zk.delete(znode, recursive=self.module.params['recursive'])
return True, {'changed': True, 'msg': 'The znode was deleted.'}
else:
return True, {'changed': False, 'msg': 'The znode does not exist.'}
def _get(self, path):
if self.exists(path):
value, zstat = self.zk.get(path)
stat_dict = {}
for i in dir(zstat):
if not i.startswith('_'):
attr = getattr(zstat, i)
if isinstance(attr, (int, str)):
stat_dict[i] = attr
result = True, {'msg': 'The node was retrieved.', 'znode': path, 'value': value,
'stat': stat_dict}
else:
result = False, {'msg': 'The requested node does not exist.'}
return result
def _present(self, path, value):
if self.exists(path):
(current_value, zstat) = self.zk.get(path)
if value != current_value:
self.zk.set(path, value)
return True, {'changed': True, 'msg': 'Updated the znode value.', 'znode': path,
'value': value}
else:
return True, {'changed': False, 'msg': 'No changes were necessary.', 'znode': path, 'value': value}
else:
self.zk.create(path, value, makepath=True)
return True, {'changed': True, 'msg': 'Created a new znode.', 'znode': path, 'value': value}
def _wait(self, path, timeout, interval=5):
lim = time.time() + timeout
while time.time() < lim:
if self.exists(path):
return True, {'msg': 'The node appeared before the configured timeout.',
'znode': path, 'timeout': timeout}
else:
time.sleep(interval)
return False, {'msg': 'The node did not appear before the operation timed out.', 'timeout': timeout,
'znode': path}
if __name__ == '__main__':
main()
| gpl-3.0 |
rismalrv/edx-platform | openedx/core/djangoapps/course_groups/management/commands/tests/test_remove_users_from_multiple_cohorts.py | 91 | 3951 | """
Tests for cleanup of users which are added in multiple cohorts of a course
"""
from django.core.exceptions import MultipleObjectsReturned
from django.core.management import call_command
from django.test.client import RequestFactory
from openedx.core.djangoapps.course_groups.views import cohort_handler
from openedx.core.djangoapps.course_groups.cohorts import get_cohort, get_cohort_by_name
from openedx.core.djangoapps.course_groups.tests.helpers import config_course_cohorts
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class TestMultipleCohortUsers(ModuleStoreTestCase):
"""
Base class for testing users with multiple cohorts
"""
def setUp(self):
"""
setup course, user and request for tests
"""
super(TestMultipleCohortUsers, self).setUp()
self.course1 = CourseFactory.create()
self.course2 = CourseFactory.create()
self.user1 = UserFactory(is_staff=True)
self.user2 = UserFactory(is_staff=True)
self.request = RequestFactory().get("dummy_url")
self.request.user = self.user1
def test_users_with_multiple_cohorts_cleanup(self):
"""
Test that user which have been added in multiple cohorts of a course,
can get cohorts without error after running cohorts cleanup command
"""
# set two auto_cohort_groups for both courses
config_course_cohorts(
self.course1, is_cohorted=True, auto_cohorts=["Course1AutoGroup1", "Course1AutoGroup2"]
)
config_course_cohorts(
self.course2, is_cohorted=True, auto_cohorts=["Course2AutoGroup1", "Course2AutoGroup2"]
)
# get the cohorts from the courses, which will cause auto cohorts to be created
cohort_handler(self.request, unicode(self.course1.id))
cohort_handler(self.request, unicode(self.course2.id))
course_1_auto_cohort_1 = get_cohort_by_name(self.course1.id, "Course1AutoGroup1")
course_1_auto_cohort_2 = get_cohort_by_name(self.course1.id, "Course1AutoGroup2")
course_2_auto_cohort_1 = get_cohort_by_name(self.course2.id, "Course2AutoGroup1")
# forcefully add user1 in two auto cohorts
course_1_auto_cohort_1.users.add(self.user1)
course_1_auto_cohort_2.users.add(self.user1)
# forcefully add user2 in auto cohorts of both courses
course_1_auto_cohort_1.users.add(self.user2)
course_2_auto_cohort_1.users.add(self.user2)
# now check that when user1 goes on discussion page and tries to get
# cohorts 'MultipleObjectsReturned' exception is returned
with self.assertRaises(MultipleObjectsReturned):
get_cohort(self.user1, self.course1.id)
# also check that user 2 can go on discussion page of both courses
# without any exception
get_cohort(self.user2, self.course1.id)
get_cohort(self.user2, self.course2.id)
# call command to remove users added in multiple cohorts of a course
# are removed from all cohort groups
call_command('remove_users_from_multiple_cohorts')
# check that only user1 (with multiple cohorts) is removed from cohorts
# and user2 is still in auto cohorts of both course after running
# 'remove_users_from_multiple_cohorts' management command
self.assertEqual(self.user1.course_groups.count(), 0)
self.assertEqual(self.user2.course_groups.count(), 2)
user2_cohorts = list(self.user2.course_groups.values_list('name', flat=True))
self.assertEqual(user2_cohorts, ['Course1AutoGroup1', 'Course2AutoGroup1'])
# now check that user1 can get cohorts in which he is added
response = cohort_handler(self.request, unicode(self.course1.id))
self.assertEqual(response.status_code, 200)
| agpl-3.0 |
tayfun/django | tests/check_framework/test_templates.py | 288 | 1403 | from copy import deepcopy
from django.core.checks.templates import E001
from django.test import SimpleTestCase
from django.test.utils import override_settings
class CheckTemplateSettingsAppDirsTest(SimpleTestCase):
TEMPLATES_APP_DIRS_AND_LOADERS = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'loaders': ['django.template.loaders.filesystem.Loader'],
},
},
]
@property
def func(self):
from django.core.checks.templates import check_setting_app_dirs_loaders
return check_setting_app_dirs_loaders
@override_settings(TEMPLATES=TEMPLATES_APP_DIRS_AND_LOADERS)
def test_app_dirs_and_loaders(self):
"""
Error if template loaders are specified and APP_DIRS is True.
"""
self.assertEqual(self.func(None), [E001])
def test_app_dirs_removed(self):
TEMPLATES = deepcopy(self.TEMPLATES_APP_DIRS_AND_LOADERS)
del TEMPLATES[0]['APP_DIRS']
with self.settings(TEMPLATES=TEMPLATES):
self.assertEqual(self.func(None), [])
def test_loaders_removed(self):
TEMPLATES = deepcopy(self.TEMPLATES_APP_DIRS_AND_LOADERS)
del TEMPLATES[0]['OPTIONS']['loaders']
with self.settings(TEMPLATES=TEMPLATES):
self.assertEqual(self.func(None), [])
| bsd-3-clause |
gale320/sync-engine | migrations/versions/123_remove_gmail_inbox_syncs.py | 8 | 1697 | """Remove gmail inbox syncs
Revision ID: 3c743bd31ee2
Revises:476c5185121b
Create Date: 2014-12-08 03:53:36.829238
"""
# revision identifiers, used by Alembic.
revision = '3c743bd31ee2'
down_revision = '476c5185121b'
def upgrade():
# Remove UIDs and sync status for inbox IMAP syncs -- otherwise
# archives/deletes may not be synced correctly.
from inbox.models.backends.imap import ImapFolderSyncStatus, ImapUid
from inbox.models.backends.gmail import GmailAccount
from inbox.models.session import session_scope
from inbox.heartbeat.config import STATUS_DATABASE, get_redis_client
from inbox.heartbeat.status import HeartbeatStatusKey
redis_client = get_redis_client(STATUS_DATABASE)
with session_scope(versioned=False) as \
db_session:
for account in db_session.query(GmailAccount):
if account.inbox_folder is None:
# May be the case for accounts that we can't sync, e.g. due to
# All Mail being disabled in IMAP.
continue
q = db_session.query(ImapFolderSyncStatus).filter(
ImapFolderSyncStatus.account_id == account.id,
ImapFolderSyncStatus.folder_id == account.inbox_folder.id)
q.delete()
q = db_session.query(ImapUid).filter(
ImapUid.account_id == account.id,
ImapUid.folder_id == account.inbox_folder.id)
q.delete()
db_session.commit()
# Also remove the corresponding status entry from Redis.
key = HeartbeatStatusKey(account.id, account.inbox_folder.id)
redis_client.delete(key)
def downgrade():
pass
| agpl-3.0 |
saiamrit/dora-s-nos | vpythonex.py | 1 | 4401 | """
This is an example for vpython
you need to install vpython to run this code.
"""
from vpython import *
# Bruce Sherwood
N = 3 # N by N by N array of atoms
# Surrounding the N**3 atoms is another layer of invisible fixed-position atoms
# that provide stability to the lattice.
k = 1
m = 1
spacing = 1
atom_radius = 0.3*spacing
L0 = spacing-1.8*atom_radius
V0 = pi*(0.5*atom_radius)**2*L0 # initial volume of spring
scene.center = 0.5*(N-1)*vector(1,1,1)
scene.range = 2.5
dt = 0.04*(2*pi*sqrt(m/k))
axes = [vector(1,0,0), vector(0,1,0), vector(0,0,1)]
scene.caption= """A model of a solid represented as atoms connected by interatomic bonds.
Right button drag or Ctrl-drag to rotate "camera" to view scene.
To zoom, drag with middle button or Alt/Option depressed, or use scroll wheel.
On a two-button mouse, middle is left + right.
Touch screen: pinch/extend to zoom, swipe or two-finger rotate."""
class crystal:
def __init__(self, N, atom_radius, spacing, momentumRange ):
self.atoms = []
self.springs = []
# Create (N+2)^3 atoms in a grid; the outermost atoms are fixed and invisible
for z in range(-1,N+1,1):
for y in range(-1,N+1,1):
for x in range(-1,N+1,1):
visible = True
if 0 <= x < N and 0 <= y < N and 0 <= z < N:
p = momentumRange*vector.random()
else:
p = vec(0,0,0)
visible = False
atom = sphere(pos=vector(x,y,z)*spacing,
radius=atom_radius, visible=visible,
color=vector(0,0.58,0.69), momentum=p)
atom.index = len(self.atoms)
self.atoms.append( atom )
for atom in self.atoms:
if atom.visible:
if atom.pos.x == 0:
self.make_spring(self.atoms[atom.index-1], atom, False)
self.make_spring(atom, self.atoms[atom.index+1], True)
elif atom.pos.x == N-1:
self.make_spring(atom, self.atoms[atom.index+1], False)
else:
self.make_spring(atom, self.atoms[atom.index+1], True)
if atom.pos.y == 0:
self.make_spring(self.atoms[atom.index-(N+2)], atom, False)
self.make_spring(atom, self.atoms[atom.index+(N+2)], True)
elif atom.pos.y == N-1:
self.make_spring(atom, self.atoms[atom.index+(N+2)], False)
else:
self.make_spring(atom, self.atoms[atom.index+(N+2)], True)
if atom.pos.z == 0:
self.make_spring(self.atoms[atom.index-(N+2)**2], atom, False)
self.make_spring(atom, self.atoms[atom.index+(N+2)**2], True)
elif atom.pos.z == N-1:
self.make_spring(atom, self.atoms[atom.index+(N+2)**2], False)
else:
self.make_spring(atom, self.atoms[atom.index+(N+2)**2], True)
# Create a grid of springs linking each atom to the adjacent atoms
# in each dimension, or to invisible motionless atoms
def make_spring(self, start, end, visible):
spring = helix(pos=start.pos, axis=end.pos-start.pos, visible=visible,
thickness=0.05, radius=0.5*atom_radius, length=spacing,
up=vector(1,1,1), # prevent fibrillation of vertical springs
color=color.orange)
spring.start = start
spring.end = end
self.springs.append(spring)
c = crystal(N, atom_radius, spacing, 0.1*spacing*sqrt(k/m))
while True:
rate(60)
for atom in c.atoms:
if atom.visible:
atom.pos = atom.pos + atom.momentum/m*dt
for spring in c.springs:
spring.axis = spring.end.pos - spring.start.pos
L = mag(spring.axis)
spring.axis = spring.axis.norm()
spring.pos = spring.start.pos+0.5*atom_radius*spring.axis
Ls = L-atom_radius
spring.length = Ls
Fdt = spring.axis * (k*dt * (1-spacing/L))
if spring.start.visible:
spring.start.momentum = spring.start.momentum + Fdt
if spring.end.visible:
spring.end.momentum = spring.end.momentum - Fdt
| mpl-2.0 |
krafczyk/spack | var/spack/repos/builtin/packages/gnu-prolog/package.py | 5 | 1762 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class GnuProlog(Package):
"""A free Prolog compiler with constraint solving over finite domains."""
homepage = "http://www.gprolog.org/"
url = "http://www.gprolog.org/gprolog-1.4.4.tar.gz"
version('1.4.4', '37009da471e5217ff637ad1c516448c8')
parallel = False
def install(self, spec, prefix):
with working_dir('src'):
configure('--with-install-dir=%s' % prefix,
'--without-links-dir')
make()
make('install')
| lgpl-2.1 |
chango/inferno | test/lib/test_reduce.py | 4 | 4567 | import types
from nose.tools import eq_
from nose.tools import ok_
from disco.worker.classic.worker import Params
from inferno.lib.reduce import keyset_reduce
class TestKeysetReduce(object):
def test_reduce(self):
data = [
('["keyset_BBB", "key1", "key2"]', [1, 10]),
('["keyset_BBB", "key1", "key2"]', [2, 20]),
('["keyset_ZZZ", "key1", "key2"]', [1, 100]),
('["keyset_ZZZ", "key1", "key2"]', [2, 200]),
('["keyset_AAA", "key1", "key2"]', [1, 1000]),
('["keyset_AAA", "key1", "key2"]', [2, 2000])]
expected = [
(['keyset_BBB', 'key1', 'key2'], [3, 30]),
(['keyset_ZZZ', 'key1', 'key2'], [3, 300]),
(['keyset_AAA', 'key1', 'key2'], [3, 3000])]
self._assert_reduce(data, expected)
def test_reduce_assumes_input_keys_are_grouped_together(self):
data = [
('["keyset_BBB", "key1", "key2"]', [1, 10]),
('["keyset_ZZZ", "key1", "key2"]', [1, 100]),
('["keyset_AAA", "key1", "key2"]', [1, 1000]),
('["keyset_BBB", "key1", "key2"]', [2, 20]),
('["keyset_ZZZ", "key1", "key2"]', [2, 200]),
('["keyset_AAA", "key1", "key2"]', [2, 2000])]
expected = [
(['keyset_BBB', 'key1', 'key2'], [1, 10]),
(['keyset_ZZZ', 'key1', 'key2'], [1, 100]),
(['keyset_AAA', 'key1', 'key2'], [1, 1000]),
(['keyset_BBB', 'key1', 'key2'], [2, 20]),
(['keyset_ZZZ', 'key1', 'key2'], [2, 200]),
(['keyset_AAA', 'key1', 'key2'], [2, 2000])]
# expected equals input b/c the input keys weren't grouped together
self._assert_reduce(data, expected)
def test_null_value(self):
data = [
('["keyset", "key1", "key2"]', [1, None])]
expected = [
(['keyset', 'key1', 'key2'], [1, 0])]
self._assert_reduce(data, expected)
def test_float_value(self):
data = [
('["keyset", "key1", "key2"]', [1, 1.234])]
expected = [
(['keyset', 'key1', 'key2'], [1, 1.234])]
self._assert_reduce(data, expected)
def test_long_value(self):
data = [
('["keyset", "key1", "key2"]', [1, 1326664799000])]
expected = [
(['keyset', 'key1', 'key2'], [1, 1326664799000])]
self._assert_reduce(data, expected)
def test_string_value(self):
data = [
('["keyset", "key1", "key2"]', [1, 'some_string'])]
expected = [
(['keyset', 'key1', 'key2'], [1, 0])]
self._assert_reduce(data, expected)
def test_should_throw_away_bad_data_and_continue(self):
data = [
('["keyset", "key1", "key2"]', [1, 1]),
('key_not_json', [1, 2]),
('["keyset", "key1", "key2"]', [1, 3])]
expected = [
(['keyset', 'key1', 'key2'], [1, 1]),
(['keyset', 'key1', 'key2'], [1, 3])]
self._assert_reduce(data, expected)
def test_parts_postprocess(self):
def filt(key, value, params):
if value[1] > 1:
yield key, value
def expand(key, value, params):
new_value = list(value)
new_value.append(value[0] + value[1])
yield key, new_value
def grow(key, value, params):
yield key, value
yield key, value
data = [
('["keyset", "key1", "key2"]', [1, 1]),
('["keyset", "key1", "key2"]', [1, 3]),
('["keyset", "key3", "key4"]', [1, 1]),
('["keyset", "key3", "key5"]', [1, 3]),
('["keyset2", "key3", "key5"]', [1, 3]),
]
expected = [
(['keyset', 'key1', 'key2'], [2, 4, 6]),
(['keyset', 'key1', 'key2'], [2, 4, 6]),
(['keyset', 'key3', 'key5'], [1, 3, 4]),
(['keyset', 'key3', 'key5'], [1, 3, 4]),
(['keyset2', 'key3', 'key5'], [1, 3]),
]
keysets = {
'keyset': {
'parts_postprocess': [filt, expand, grow]
}
}
self._assert_reduce(data, expected, keysets=keysets)
def _assert_reduce(self, data, expected, **kwargs):
# turn disco_debug on for more code coverage
if kwargs is None:
kwargs = dict()
kwargs['disco_debug'] = True
params = Params(**kwargs)
actual = keyset_reduce(data, params)
ok_(isinstance(actual, types.GeneratorType))
eq_(list(actual), expected)
| mit |
MoisesTedeschi/python | Scripts-Python/Modulos-Diversos/deteccao-de-faces-com-python-e-opencv/Lib/site-packages/pip/_vendor/html5lib/treebuilders/base.py | 56 | 14579 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from ..constants import scopingElements, tableInsertModeElements, namespaces
# The scope markers are inserted when entering object elements,
# marquees, table cells, and table captions, and are used to prevent formatting
# from "leaking" into tables, object elements, and marquees.
Marker = None
listElementsMap = {
None: (frozenset(scopingElements), False),
"button": (frozenset(scopingElements | set([(namespaces["html"], "button")])), False),
"list": (frozenset(scopingElements | set([(namespaces["html"], "ol"),
(namespaces["html"], "ul")])), False),
"table": (frozenset([(namespaces["html"], "html"),
(namespaces["html"], "table")]), False),
"select": (frozenset([(namespaces["html"], "optgroup"),
(namespaces["html"], "option")]), True)
}
class Node(object):
"""Represents an item in the tree"""
def __init__(self, name):
"""Creates a Node
:arg name: The tag name associated with the node
"""
# The tag name assocaited with the node
self.name = name
# The parent of the current node (or None for the document node)
self.parent = None
# The value of the current node (applies to text nodes and comments)
self.value = None
# A dict holding name -> value pairs for attributes of the node
self.attributes = {}
# A list of child nodes of the current node. This must include all
# elements but not necessarily other node types.
self.childNodes = []
# A list of miscellaneous flags that can be set on the node.
self._flags = []
def __str__(self):
attributesStr = " ".join(["%s=\"%s\"" % (name, value)
for name, value in
self.attributes.items()])
if attributesStr:
return "<%s %s>" % (self.name, attributesStr)
else:
return "<%s>" % (self.name)
def __repr__(self):
return "<%s>" % (self.name)
def appendChild(self, node):
"""Insert node as a child of the current node
:arg node: the node to insert
"""
raise NotImplementedError
def insertText(self, data, insertBefore=None):
"""Insert data as text in the current node, positioned before the
start of node insertBefore or to the end of the node's text.
:arg data: the data to insert
:arg insertBefore: True if you want to insert the text before the node
and False if you want to insert it after the node
"""
raise NotImplementedError
def insertBefore(self, node, refNode):
"""Insert node as a child of the current node, before refNode in the
list of child nodes. Raises ValueError if refNode is not a child of
the current node
:arg node: the node to insert
:arg refNode: the child node to insert the node before
"""
raise NotImplementedError
def removeChild(self, node):
"""Remove node from the children of the current node
:arg node: the child node to remove
"""
raise NotImplementedError
def reparentChildren(self, newParent):
"""Move all the children of the current node to newParent.
This is needed so that trees that don't store text as nodes move the
text in the correct way
:arg newParent: the node to move all this node's children to
"""
# XXX - should this method be made more general?
for child in self.childNodes:
newParent.appendChild(child)
self.childNodes = []
def cloneNode(self):
"""Return a shallow copy of the current node i.e. a node with the same
name and attributes but with no parent or child nodes
"""
raise NotImplementedError
def hasContent(self):
"""Return true if the node has children or text, false otherwise
"""
raise NotImplementedError
class ActiveFormattingElements(list):
def append(self, node):
equalCount = 0
if node != Marker:
for element in self[::-1]:
if element == Marker:
break
if self.nodesEqual(element, node):
equalCount += 1
if equalCount == 3:
self.remove(element)
break
list.append(self, node)
def nodesEqual(self, node1, node2):
if not node1.nameTuple == node2.nameTuple:
return False
if not node1.attributes == node2.attributes:
return False
return True
class TreeBuilder(object):
"""Base treebuilder implementation
* documentClass - the class to use for the bottommost node of a document
* elementClass - the class to use for HTML Elements
* commentClass - the class to use for comments
* doctypeClass - the class to use for doctypes
"""
# pylint:disable=not-callable
# Document class
documentClass = None
# The class to use for creating a node
elementClass = None
# The class to use for creating comments
commentClass = None
# The class to use for creating doctypes
doctypeClass = None
# Fragment class
fragmentClass = None
def __init__(self, namespaceHTMLElements):
"""Create a TreeBuilder
:arg namespaceHTMLElements: whether or not to namespace HTML elements
"""
if namespaceHTMLElements:
self.defaultNamespace = "http://www.w3.org/1999/xhtml"
else:
self.defaultNamespace = None
self.reset()
def reset(self):
self.openElements = []
self.activeFormattingElements = ActiveFormattingElements()
# XXX - rename these to headElement, formElement
self.headPointer = None
self.formPointer = None
self.insertFromTable = False
self.document = self.documentClass()
def elementInScope(self, target, variant=None):
# If we pass a node in we match that. if we pass a string
# match any node with that name
exactNode = hasattr(target, "nameTuple")
if not exactNode:
if isinstance(target, text_type):
target = (namespaces["html"], target)
assert isinstance(target, tuple)
listElements, invert = listElementsMap[variant]
for node in reversed(self.openElements):
if exactNode and node == target:
return True
elif not exactNode and node.nameTuple == target:
return True
elif (invert ^ (node.nameTuple in listElements)):
return False
assert False # We should never reach this point
def reconstructActiveFormattingElements(self):
# Within this algorithm the order of steps described in the
# specification is not quite the same as the order of steps in the
# code. It should still do the same though.
# Step 1: stop the algorithm when there's nothing to do.
if not self.activeFormattingElements:
return
# Step 2 and step 3: we start with the last element. So i is -1.
i = len(self.activeFormattingElements) - 1
entry = self.activeFormattingElements[i]
if entry == Marker or entry in self.openElements:
return
# Step 6
while entry != Marker and entry not in self.openElements:
if i == 0:
# This will be reset to 0 below
i = -1
break
i -= 1
# Step 5: let entry be one earlier in the list.
entry = self.activeFormattingElements[i]
while True:
# Step 7
i += 1
# Step 8
entry = self.activeFormattingElements[i]
clone = entry.cloneNode() # Mainly to get a new copy of the attributes
# Step 9
element = self.insertElement({"type": "StartTag",
"name": clone.name,
"namespace": clone.namespace,
"data": clone.attributes})
# Step 10
self.activeFormattingElements[i] = element
# Step 11
if element == self.activeFormattingElements[-1]:
break
def clearActiveFormattingElements(self):
entry = self.activeFormattingElements.pop()
while self.activeFormattingElements and entry != Marker:
entry = self.activeFormattingElements.pop()
def elementInActiveFormattingElements(self, name):
"""Check if an element exists between the end of the active
formatting elements and the last marker. If it does, return it, else
return false"""
for item in self.activeFormattingElements[::-1]:
# Check for Marker first because if it's a Marker it doesn't have a
# name attribute.
if item == Marker:
break
elif item.name == name:
return item
return False
def insertRoot(self, token):
element = self.createElement(token)
self.openElements.append(element)
self.document.appendChild(element)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = self.doctypeClass(name, publicId, systemId)
self.document.appendChild(doctype)
def insertComment(self, token, parent=None):
if parent is None:
parent = self.openElements[-1]
parent.appendChild(self.commentClass(token["data"]))
def createElement(self, token):
"""Create an element but don't insert it anywhere"""
name = token["name"]
namespace = token.get("namespace", self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token["data"]
return element
def _getInsertFromTable(self):
return self._insertFromTable
def _setInsertFromTable(self, value):
"""Switch the function used to insert an element from the
normal one to the misnested table one and back again"""
self._insertFromTable = value
if value:
self.insertElement = self.insertElementTable
else:
self.insertElement = self.insertElementNormal
insertFromTable = property(_getInsertFromTable, _setInsertFromTable)
def insertElementNormal(self, token):
name = token["name"]
assert isinstance(name, text_type), "Element %s not unicode" % name
namespace = token.get("namespace", self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token["data"]
self.openElements[-1].appendChild(element)
self.openElements.append(element)
return element
def insertElementTable(self, token):
"""Create an element and insert it into the tree"""
element = self.createElement(token)
if self.openElements[-1].name not in tableInsertModeElements:
return self.insertElementNormal(token)
else:
# We should be in the InTable mode. This means we want to do
# special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
if insertBefore is None:
parent.appendChild(element)
else:
parent.insertBefore(element, insertBefore)
self.openElements.append(element)
return element
def insertText(self, data, parent=None):
"""Insert text data."""
if parent is None:
parent = self.openElements[-1]
if (not self.insertFromTable or (self.insertFromTable and
self.openElements[-1].name
not in tableInsertModeElements)):
parent.insertText(data)
else:
# We should be in the InTable mode. This means we want to do
# special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
parent.insertText(data, insertBefore)
def getTableMisnestedNodePosition(self):
"""Get the foster parent element, and sibling to insert before
(or None) when inserting a misnested table node"""
# The foster parent element is the one which comes before the most
# recently opened table element
# XXX - this is really inelegant
lastTable = None
fosterParent = None
insertBefore = None
for elm in self.openElements[::-1]:
if elm.name == "table":
lastTable = elm
break
if lastTable:
# XXX - we should really check that this parent is actually a
# node here
if lastTable.parent:
fosterParent = lastTable.parent
insertBefore = lastTable
else:
fosterParent = self.openElements[
self.openElements.index(lastTable) - 1]
else:
fosterParent = self.openElements[0]
return fosterParent, insertBefore
def generateImpliedEndTags(self, exclude=None):
name = self.openElements[-1].name
# XXX td, th and tr are not actually needed
if (name in frozenset(("dd", "dt", "li", "option", "optgroup", "p", "rp", "rt")) and
name != exclude):
self.openElements.pop()
# XXX This is not entirely what the specification says. We should
# investigate it more closely.
self.generateImpliedEndTags(exclude)
def getDocument(self):
"""Return the final tree"""
return self.document
def getFragment(self):
"""Return the final fragment"""
# assert self.innerHTML
fragment = self.fragmentClass()
self.openElements[0].reparentChildren(fragment)
return fragment
def testSerializer(self, node):
"""Serialize the subtree of node in the format required by unit tests
:arg node: the node from which to start serializing
"""
raise NotImplementedError
| gpl-3.0 |
romanornr/viacoin | test/functional/p2p-fullblocktest.py | 3 | 52871 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test block processing.
This reimplements tests from the bitcoinj/FullBlockTestGenerator used
by the pull-tester.
We use the testing framework in which we expect a particular answer from
each test.
"""
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.comptool import TestManager, TestInstance, RejectResult
from test_framework.blocktools import *
import time
from test_framework.key import CECKey
from test_framework.script import *
import struct
class PreviousSpendableOutput(object):
def __init__(self, tx = CTransaction(), n = -1):
self.tx = tx
self.n = n # the output we're spending
# Use this class for tests that require behavior other than normal "mininode" behavior.
# For now, it is used to serialize a bloated varint (b64).
class CBrokenBlock(CBlock):
def __init__(self, header=None):
super(CBrokenBlock, self).__init__(header)
def initialize(self, base_block):
self.vtx = copy.deepcopy(base_block.vtx)
self.hashMerkleRoot = self.calc_merkle_root()
def serialize(self):
r = b""
r += super(CBlock, self).serialize()
r += struct.pack("<BQ", 255, len(self.vtx))
for tx in self.vtx:
r += tx.serialize()
return r
def normal_serialize(self):
r = b""
r += super(CBrokenBlock, self).serialize()
return r
class FullBlockTest(ComparisonTestFramework):
# Can either run this test as 1 node with expected answers, or two and compare them.
# Change the "outcome" variable from each TestInstance object to only do the comparison.
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.block_heights = {}
self.coinbase_key = CECKey()
self.coinbase_key.set_secretbytes(b"horsebattery")
self.coinbase_pubkey = self.coinbase_key.get_pubkey()
self.tip = None
self.blocks = {}
def add_options(self, parser):
super().add_options(parser)
parser.add_option("--runbarelyexpensive", dest="runbarelyexpensive", default=True)
def run_test(self):
self.test = TestManager(self, self.options.tmpdir)
self.test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
self.test.run()
def add_transactions_to_block(self, block, tx_list):
[ tx.rehash() for tx in tx_list ]
block.vtx.extend(tx_list)
# this is a little handier to use than the version in blocktools.py
def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])):
tx = create_transaction(spend_tx, n, b"", value, script)
return tx
# sign a transaction, using the key we know about
# this signs input 0 in tx, which is assumed to be spending output n in spend_tx
def sign_tx(self, tx, spend_tx, n):
scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey)
if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend
tx.vin[0].scriptSig = CScript()
return
(sighash, err) = SignatureHash(spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL)
tx.vin[0].scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))])
def create_and_sign_transaction(self, spend_tx, n, value, script=CScript([OP_TRUE])):
tx = self.create_tx(spend_tx, n, value, script)
self.sign_tx(tx, spend_tx, n)
tx.rehash()
return tx
def next_block(self, number, spend=None, additional_coinbase_value=0, script=CScript([OP_TRUE]), solve=True):
if self.tip == None:
base_block_hash = self.genesis_hash
block_time = int(time.time())+1
else:
base_block_hash = self.tip.sha256
block_time = self.tip.nTime + 1
# First create the coinbase
height = self.block_heights[base_block_hash] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
coinbase.vout[0].nValue += additional_coinbase_value
coinbase.rehash()
if spend == None:
block = create_block(base_block_hash, coinbase, block_time)
else:
coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1 # all but one satoshi to fees
coinbase.rehash()
block = create_block(base_block_hash, coinbase, block_time)
tx = create_transaction(spend.tx, spend.n, b"", 1, script) # spend 1 satoshi
self.sign_tx(tx, spend.tx, spend.n)
self.add_transactions_to_block(block, [tx])
block.hashMerkleRoot = block.calc_merkle_root()
if solve:
block.solve()
self.tip = block
self.block_heights[block.sha256] = height
assert number not in self.blocks
self.blocks[number] = block
return block
def get_tests(self):
self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16)
self.block_heights[self.genesis_hash] = 0
spendable_outputs = []
# save the current tip so it can be spent by a later block
def save_spendable_output():
spendable_outputs.append(self.tip)
# get an output that we previously marked as spendable
def get_spendable_output():
return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0)
# returns a test case that asserts that the current tip was accepted
def accepted():
return TestInstance([[self.tip, True]])
# returns a test case that asserts that the current tip was rejected
def rejected(reject = None):
if reject is None:
return TestInstance([[self.tip, False]])
else:
return TestInstance([[self.tip, reject]])
# move the tip back to a previous block
def tip(number):
self.tip = self.blocks[number]
# adds transactions to the block and updates state
def update_block(block_number, new_transactions):
block = self.blocks[block_number]
self.add_transactions_to_block(block, new_transactions)
old_sha256 = block.sha256
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
# Update the internal state just like in next_block
self.tip = block
if block.sha256 != old_sha256:
self.block_heights[block.sha256] = self.block_heights[old_sha256]
del self.block_heights[old_sha256]
self.blocks[block_number] = block
return block
# shorthand for functions
block = self.next_block
create_tx = self.create_tx
create_and_sign_tx = self.create_and_sign_transaction
# these must be updated if consensus changes
MAX_BLOCK_SIGOPS = 2000
# Create a new block
block(0)
save_spendable_output()
yield accepted()
# Now we need that block to mature so we can spend the coinbase.
test = TestInstance(sync_every_block=False)
for i in range(99):
block(5000 + i)
test.blocks_and_transactions.append([self.tip, True])
save_spendable_output()
yield test
# collect spendable outputs now to avoid cluttering the code later on
out = []
for i in range(33):
out.append(get_spendable_output())
# Start by building a couple of blocks on top (which output is spent is
# in parentheses):
# genesis -> b1 (0) -> b2 (1)
block(1, spend=out[0])
save_spendable_output()
yield accepted()
block(2, spend=out[1])
yield accepted()
save_spendable_output()
# so fork like this:
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1)
#
# Nothing should happen at this point. We saw b2 first so it takes priority.
tip(1)
b3 = block(3, spend=out[1])
txout_b3 = PreviousSpendableOutput(b3.vtx[1], 0)
yield rejected()
# Now we add another block to make the alternative chain longer.
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1) -> b4 (2)
block(4, spend=out[2])
yield accepted()
# ... and back to the first chain.
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b3 (1) -> b4 (2)
tip(2)
block(5, spend=out[2])
save_spendable_output()
yield rejected()
block(6, spend=out[3])
yield accepted()
# Try to create a fork that double-spends
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b7 (2) -> b8 (4)
# \-> b3 (1) -> b4 (2)
tip(5)
block(7, spend=out[2])
yield rejected()
block(8, spend=out[4])
yield rejected()
# Try to create a block that has too much fee
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b9 (4)
# \-> b3 (1) -> b4 (2)
tip(6)
block(9, spend=out[4], additional_coinbase_value=1)
yield rejected(RejectResult(16, b'bad-cb-amount'))
# Create a fork that ends in a block with too much fee (the one that causes the reorg)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b10 (3) -> b11 (4)
# \-> b3 (1) -> b4 (2)
tip(5)
block(10, spend=out[3])
yield rejected()
block(11, spend=out[4], additional_coinbase_value=1)
yield rejected(RejectResult(16, b'bad-cb-amount'))
# Try again, but with a valid fork first
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b14 (5)
# (b12 added last)
# \-> b3 (1) -> b4 (2)
tip(5)
b12 = block(12, spend=out[3])
save_spendable_output()
b13 = block(13, spend=out[4])
# Deliver the block header for b12, and the block b13.
# b13 should be accepted but the tip won't advance until b12 is delivered.
yield TestInstance([[CBlockHeader(b12), None], [b13, False]])
save_spendable_output()
# b14 is invalid, but the node won't know that until it tries to connect
# Tip still can't advance because b12 is missing
block(14, spend=out[5], additional_coinbase_value=1)
yield rejected()
yield TestInstance([[b12, True, b13.sha256]]) # New tip should be b13.
# Add a block with MAX_BLOCK_SIGOPS and one with one more sigop
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6)
# \-> b3 (1) -> b4 (2)
# Test that a block with a lot of checksigs is okay
lots_of_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS - 1))
tip(13)
block(15, spend=out[5], script=lots_of_checksigs)
yield accepted()
save_spendable_output()
# Test that a block with too many checksigs is rejected
too_many_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS))
block(16, spend=out[6], script=too_many_checksigs)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# Attempt to spend a transaction created on a different fork
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1])
# \-> b3 (1) -> b4 (2)
tip(15)
block(17, spend=txout_b3)
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# Attempt to spend a transaction created on a different fork (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b18 (b3.vtx[1]) -> b19 (6)
# \-> b3 (1) -> b4 (2)
tip(13)
block(18, spend=txout_b3)
yield rejected()
block(19, spend=out[6])
yield rejected()
# Attempt to spend a coinbase at depth too low
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7)
# \-> b3 (1) -> b4 (2)
tip(15)
block(20, spend=out[7])
yield rejected(RejectResult(16, b'bad-txns-premature-spend-of-coinbase'))
# Attempt to spend a coinbase at depth too low (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b21 (6) -> b22 (5)
# \-> b3 (1) -> b4 (2)
tip(13)
block(21, spend=out[6])
yield rejected()
block(22, spend=out[5])
yield rejected()
# Create a block on either side of MAX_BLOCK_BASE_SIZE and make sure its accepted/rejected
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6)
# \-> b24 (6) -> b25 (7)
# \-> b3 (1) -> b4 (2)
tip(15)
b23 = block(23, spend=out[6])
tx = CTransaction()
script_length = MAX_BLOCK_BASE_SIZE - len(b23.serialize()) - 65
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0)))
b23 = update_block(23, [tx])
# Make sure the math above worked out to produce a max-sized block
assert_equal(len(b23.serialize()), MAX_BLOCK_BASE_SIZE)
yield accepted()
save_spendable_output()
# Make the next block one byte bigger and check that it fails
tip(15)
b24 = block(24, spend=out[6])
script_length = MAX_BLOCK_BASE_SIZE - len(b24.serialize()) - 65
script_output = CScript([b'\x00' * (script_length+1)])
tx.vout = [CTxOut(0, script_output)]
b24 = update_block(24, [tx])
assert_equal(len(b24.serialize()), MAX_BLOCK_BASE_SIZE+1)
yield rejected(RejectResult(16, b'bad-blk-length'))
block(25, spend=out[7])
yield rejected()
# Create blocks with a coinbase input script size out of range
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7)
# \-> ... (6) -> ... (7)
# \-> b3 (1) -> b4 (2)
tip(15)
b26 = block(26, spend=out[6])
b26.vtx[0].vin[0].scriptSig = b'\x00'
b26.vtx[0].rehash()
# update_block causes the merkle root to get updated, even with no new
# transactions, and updates the required state.
b26 = update_block(26, [])
yield rejected(RejectResult(16, b'bad-cb-length'))
# Extend the b26 chain to make sure bitcoind isn't accepting b26
b27 = block(27, spend=out[7])
yield rejected(False)
# Now try a too-large-coinbase script
tip(15)
b28 = block(28, spend=out[6])
b28.vtx[0].vin[0].scriptSig = b'\x00' * 101
b28.vtx[0].rehash()
b28 = update_block(28, [])
yield rejected(RejectResult(16, b'bad-cb-length'))
# Extend the b28 chain to make sure bitcoind isn't accepting b28
b29 = block(29, spend=out[7])
yield rejected(False)
# b30 has a max-sized coinbase scriptSig.
tip(23)
b30 = block(30)
b30.vtx[0].vin[0].scriptSig = b'\x00' * 100
b30.vtx[0].rehash()
b30 = update_block(30, [])
yield accepted()
save_spendable_output()
# b31 - b35 - check sigops of OP_CHECKMULTISIG / OP_CHECKMULTISIGVERIFY / OP_CHECKSIGVERIFY
#
# genesis -> ... -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
# \-> b36 (11)
# \-> b34 (10)
# \-> b32 (9)
#
# MULTISIG: each op code counts as 20 sigops. To create the edge case, pack another 19 sigops at the end.
lots_of_multisigs = CScript([OP_CHECKMULTISIG] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19)
b31 = block(31, spend=out[8], script=lots_of_multisigs)
assert_equal(get_legacy_sigopcount_block(b31), MAX_BLOCK_SIGOPS)
yield accepted()
save_spendable_output()
# this goes over the limit because the coinbase has one sigop
too_many_multisigs = CScript([OP_CHECKMULTISIG] * (MAX_BLOCK_SIGOPS // 20))
b32 = block(32, spend=out[9], script=too_many_multisigs)
assert_equal(get_legacy_sigopcount_block(b32), MAX_BLOCK_SIGOPS + 1)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# CHECKMULTISIGVERIFY
tip(31)
lots_of_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19)
block(33, spend=out[9], script=lots_of_multisigs)
yield accepted()
save_spendable_output()
too_many_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * (MAX_BLOCK_SIGOPS // 20))
block(34, spend=out[10], script=too_many_multisigs)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# CHECKSIGVERIFY
tip(33)
lots_of_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS - 1))
b35 = block(35, spend=out[10], script=lots_of_checksigs)
yield accepted()
save_spendable_output()
too_many_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS))
block(36, spend=out[11], script=too_many_checksigs)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# Check spending of a transaction in a block which failed to connect
#
# b6 (3)
# b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
# \-> b37 (11)
# \-> b38 (11/37)
#
# save 37's spendable output, but then double-spend out11 to invalidate the block
tip(35)
b37 = block(37, spend=out[11])
txout_b37 = PreviousSpendableOutput(b37.vtx[1], 0)
tx = create_and_sign_tx(out[11].tx, out[11].n, 0)
b37 = update_block(37, [tx])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# attempt to spend b37's first non-coinbase tx, at which point b37 was still considered valid
tip(35)
block(38, spend=txout_b37)
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# Check P2SH SigOp counting
#
#
# 13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b41 (12)
# \-> b40 (12)
#
# b39 - create some P2SH outputs that will require 6 sigops to spend:
#
# redeem_script = COINBASE_PUBKEY, (OP_2DUP+OP_CHECKSIGVERIFY) * 5, OP_CHECKSIG
# p2sh_script = OP_HASH160, ripemd160(sha256(script)), OP_EQUAL
#
tip(35)
b39 = block(39)
b39_outputs = 0
b39_sigops_per_output = 6
# Build the redeem script, hash it, use hash to create the p2sh script
redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY]*5 + [OP_CHECKSIG])
redeem_script_hash = hash160(redeem_script)
p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL])
# Create a transaction that spends one satoshi to the p2sh_script, the rest to OP_TRUE
# This must be signed because it is spending a coinbase
spend = out[11]
tx = create_tx(spend.tx, spend.n, 1, p2sh_script)
tx.vout.append(CTxOut(spend.tx.vout[spend.n].nValue - 1, CScript([OP_TRUE])))
self.sign_tx(tx, spend.tx, spend.n)
tx.rehash()
b39 = update_block(39, [tx])
b39_outputs += 1
# Until block is full, add tx's with 1 satoshi to p2sh_script, the rest to OP_TRUE
tx_new = None
tx_last = tx
total_size=len(b39.serialize())
while(total_size < MAX_BLOCK_BASE_SIZE):
tx_new = create_tx(tx_last, 1, 1, p2sh_script)
tx_new.vout.append(CTxOut(tx_last.vout[1].nValue - 1, CScript([OP_TRUE])))
tx_new.rehash()
total_size += len(tx_new.serialize())
if total_size >= MAX_BLOCK_BASE_SIZE:
break
b39.vtx.append(tx_new) # add tx to block
tx_last = tx_new
b39_outputs += 1
b39 = update_block(39, [])
yield accepted()
save_spendable_output()
# Test sigops in P2SH redeem scripts
#
# b40 creates 3333 tx's spending the 6-sigop P2SH outputs from b39 for a total of 19998 sigops.
# The first tx has one sigop and then at the end we add 2 more to put us just over the max.
#
# b41 does the same, less one, so it has the maximum sigops permitted.
#
tip(39)
b40 = block(40, spend=out[12])
sigops = get_legacy_sigopcount_block(b40)
numTxes = (MAX_BLOCK_SIGOPS - sigops) // b39_sigops_per_output
assert_equal(numTxes <= b39_outputs, True)
lastOutpoint = COutPoint(b40.vtx[1].sha256, 0)
new_txs = []
for i in range(1, numTxes+1):
tx = CTransaction()
tx.vout.append(CTxOut(1, CScript([OP_TRUE])))
tx.vin.append(CTxIn(lastOutpoint, b''))
# second input is corresponding P2SH output from b39
tx.vin.append(CTxIn(COutPoint(b39.vtx[i].sha256, 0), b''))
# Note: must pass the redeem_script (not p2sh_script) to the signature hash function
(sighash, err) = SignatureHash(redeem_script, tx, 1, SIGHASH_ALL)
sig = self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))
scriptSig = CScript([sig, redeem_script])
tx.vin[1].scriptSig = scriptSig
tx.rehash()
new_txs.append(tx)
lastOutpoint = COutPoint(tx.sha256, 0)
b40_sigops_to_fill = MAX_BLOCK_SIGOPS - (numTxes * b39_sigops_per_output + sigops) + 1
tx = CTransaction()
tx.vin.append(CTxIn(lastOutpoint, b''))
tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b40_sigops_to_fill)))
tx.rehash()
new_txs.append(tx)
update_block(40, new_txs)
# yield rejected(RejectResult(16, b'bad-blk-sigops'))
# Viacoin: this won't fit in a block
yield rejected(RejectResult(16, b'bad-blk-length'))
# same as b40, but one less sigop
tip(39)
b41 = block(41, spend=None)
# update_block(41, b40.vtx[1:-1])
b41_sigops_to_fill = b40_sigops_to_fill - 1
tx = CTransaction()
tx.vin.append(CTxIn(lastOutpoint, b''))
tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b41_sigops_to_fill)))
tx.rehash()
# Viacoin: this won't fit in a block
# update_block(41, [tx])
yield accepted()
# Fork off of b39 to create a constant base again
#
# b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13)
# \-> b41 (12)
#
tip(39)
block(42, spend=out[12])
yield rejected()
save_spendable_output()
block(43, spend=out[13])
yield accepted()
save_spendable_output()
# Test a number of really invalid scenarios
#
# -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b44 (14)
# \-> ??? (15)
# The next few blocks are going to be created "by hand" since they'll do funky things, such as having
# the first transaction be non-coinbase, etc. The purpose of b44 is to make sure this works.
height = self.block_heights[self.tip.sha256] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
b44 = CBlock()
b44.nTime = self.tip.nTime + 1
b44.hashPrevBlock = self.tip.sha256
b44.nBits = 0x207fffff
b44.vtx.append(coinbase)
b44.hashMerkleRoot = b44.calc_merkle_root()
b44.solve()
self.tip = b44
self.block_heights[b44.sha256] = height
self.blocks[44] = b44
yield accepted()
# A block with a non-coinbase as the first tx
non_coinbase = create_tx(out[15].tx, out[15].n, 1)
b45 = CBlock()
b45.nTime = self.tip.nTime + 1
b45.hashPrevBlock = self.tip.sha256
b45.nBits = 0x207fffff
b45.vtx.append(non_coinbase)
b45.hashMerkleRoot = b45.calc_merkle_root()
b45.calc_sha256()
b45.solve()
self.block_heights[b45.sha256] = self.block_heights[self.tip.sha256]+1
self.tip = b45
self.blocks[45] = b45
yield rejected(RejectResult(16, b'bad-cb-missing'))
# A block with no txns
tip(44)
b46 = CBlock()
b46.nTime = b44.nTime+1
b46.hashPrevBlock = b44.sha256
b46.nBits = 0x207fffff
b46.vtx = []
b46.hashMerkleRoot = 0
b46.solve()
self.block_heights[b46.sha256] = self.block_heights[b44.sha256]+1
self.tip = b46
assert 46 not in self.blocks
self.blocks[46] = b46
s = ser_uint256(b46.hashMerkleRoot)
yield rejected(RejectResult(16, b'bad-blk-length'))
# A block with invalid work
tip(44)
b47 = block(47, solve=False)
target = uint256_from_compact(b47.nBits)
while b47.scrypt256 < target: #changed > to <
b47.nNonce += 1
b47.rehash()
yield rejected(RejectResult(16, b'high-hash'))
# A block with timestamp > 2 hrs in the future
tip(44)
b48 = block(48, solve=False)
b48.nTime = int(time.time()) + 60 * 60 * 3
b48.solve()
yield rejected(RejectResult(16, b'time-too-new'))
# A block with an invalid merkle hash
tip(44)
b49 = block(49)
b49.hashMerkleRoot += 1
b49.solve()
yield rejected(RejectResult(16, b'bad-txnmrklroot'))
# A block with an incorrect POW limit
tip(44)
b50 = block(50)
b50.nBits = b50.nBits - 1
b50.solve()
yield rejected(RejectResult(16, b'bad-diffbits'))
# A block with two coinbase txns
tip(44)
b51 = block(51)
cb2 = create_coinbase(51, self.coinbase_pubkey)
b51 = update_block(51, [cb2])
yield rejected(RejectResult(16, b'bad-cb-multiple'))
# A block w/ duplicate txns
# Note: txns have to be in the right position in the merkle tree to trigger this error
tip(44)
b52 = block(52, spend=out[15])
tx = create_tx(b52.vtx[1], 0, 1)
b52 = update_block(52, [tx, tx])
yield rejected(RejectResult(16, b'bad-txns-duplicate'))
# Test block timestamps
# -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15)
# \-> b54 (15)
#
tip(43)
block(53, spend=out[14])
yield rejected() # rejected since b44 is at same height
save_spendable_output()
# invalid timestamp (b35 is 5 blocks back, so its time is MedianTimePast)
b54 = block(54, spend=out[15])
b54.nTime = b35.nTime - 1
b54.solve()
yield rejected(RejectResult(16, b'time-too-old'))
# valid timestamp
tip(53)
b55 = block(55, spend=out[15])
b55.nTime = b35.nTime
update_block(55, [])
yield accepted()
save_spendable_output()
# Test CVE-2012-2459
#
# -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57p2 (16)
# \-> b57 (16)
# \-> b56p2 (16)
# \-> b56 (16)
#
# Merkle tree malleability (CVE-2012-2459): repeating sequences of transactions in a block without
# affecting the merkle root of a block, while still invalidating it.
# See: src/consensus/merkle.h
#
# b57 has three txns: coinbase, tx, tx1. The merkle root computation will duplicate tx.
# Result: OK
#
# b56 copies b57 but duplicates tx1 and does not recalculate the block hash. So it has a valid merkle
# root but duplicate transactions.
# Result: Fails
#
# b57p2 has six transactions in its merkle tree:
# - coinbase, tx, tx1, tx2, tx3, tx4
# Merkle root calculation will duplicate as necessary.
# Result: OK.
#
# b56p2 copies b57p2 but adds both tx3 and tx4. The purpose of the test is to make sure the code catches
# duplicate txns that are not next to one another with the "bad-txns-duplicate" error (which indicates
# that the error was caught early, avoiding a DOS vulnerability.)
# b57 - a good block with 2 txs, don't submit until end
tip(55)
b57 = block(57)
tx = create_and_sign_tx(out[16].tx, out[16].n, 1)
tx1 = create_tx(tx, 0, 1)
b57 = update_block(57, [tx, tx1])
# b56 - copy b57, add a duplicate tx
tip(55)
b56 = copy.deepcopy(b57)
self.blocks[56] = b56
assert_equal(len(b56.vtx),3)
b56 = update_block(56, [tx1])
assert_equal(b56.hash, b57.hash)
yield rejected(RejectResult(16, b'bad-txns-duplicate'))
# b57p2 - a good block with 6 tx'es, don't submit until end
tip(55)
b57p2 = block("57p2")
tx = create_and_sign_tx(out[16].tx, out[16].n, 1)
tx1 = create_tx(tx, 0, 1)
tx2 = create_tx(tx1, 0, 1)
tx3 = create_tx(tx2, 0, 1)
tx4 = create_tx(tx3, 0, 1)
b57p2 = update_block("57p2", [tx, tx1, tx2, tx3, tx4])
# b56p2 - copy b57p2, duplicate two non-consecutive tx's
tip(55)
b56p2 = copy.deepcopy(b57p2)
self.blocks["b56p2"] = b56p2
assert_equal(b56p2.hash, b57p2.hash)
assert_equal(len(b56p2.vtx),6)
b56p2 = update_block("b56p2", [tx3, tx4])
yield rejected(RejectResult(16, b'bad-txns-duplicate'))
tip("57p2")
yield accepted()
tip(57)
yield rejected() #rejected because 57p2 seen first
save_spendable_output()
# Test a few invalid tx types
#
# -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> ??? (17)
#
# tx with prevout.n out of range
tip(57)
b58 = block(58, spend=out[17])
tx = CTransaction()
assert(len(out[17].tx.vout) < 42)
tx.vin.append(CTxIn(COutPoint(out[17].tx.sha256, 42), CScript([OP_TRUE]), 0xffffffff))
tx.vout.append(CTxOut(0, b""))
tx.calc_sha256()
b58 = update_block(58, [tx])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# tx with output value > input value out of range
tip(57)
b59 = block(59)
tx = create_and_sign_tx(out[17].tx, out[17].n, 51*COIN)
b59 = update_block(59, [tx])
yield rejected(RejectResult(16, b'bad-txns-in-belowout'))
# reset to good chain
tip(57)
b60 = block(60, spend=out[17])
yield accepted()
save_spendable_output()
# Test BIP30
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b61 (18)
#
# Blocks are not allowed to contain a transaction whose id matches that of an earlier,
# not-fully-spent transaction in the same chain. To test, make identical coinbases;
# the second one should be rejected.
#
tip(60)
b61 = block(61, spend=out[18])
b61.vtx[0].vin[0].scriptSig = b60.vtx[0].vin[0].scriptSig #equalize the coinbases
b61.vtx[0].rehash()
b61 = update_block(61, [])
assert_equal(b60.vtx[0].serialize(), b61.vtx[0].serialize())
yield rejected(RejectResult(16, b'bad-txns-BIP30'))
# Test tx.isFinal is properly rejected (not an exhaustive tx.isFinal test, that should be in data-driven transaction tests)
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b62 (18)
#
tip(60)
b62 = block(62)
tx = CTransaction()
tx.nLockTime = 0xffffffff #this locktime is non-final
assert(out[18].n < len(out[18].tx.vout))
tx.vin.append(CTxIn(COutPoint(out[18].tx.sha256, out[18].n))) # don't set nSequence
tx.vout.append(CTxOut(0, CScript([OP_TRUE])))
assert(tx.vin[0].nSequence < 0xffffffff)
tx.calc_sha256()
b62 = update_block(62, [tx])
yield rejected(RejectResult(16, b'bad-txns-nonfinal'))
# Test a non-final coinbase is also rejected
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b63 (-)
#
tip(60)
b63 = block(63)
b63.vtx[0].nLockTime = 0xffffffff
b63.vtx[0].vin[0].nSequence = 0xDEADBEEF
b63.vtx[0].rehash()
b63 = update_block(63, [])
yield rejected(RejectResult(16, b'bad-txns-nonfinal'))
# This checks that a block with a bloated VARINT between the block_header and the array of tx such that
# the block is > MAX_BLOCK_BASE_SIZE with the bloated varint, but <= MAX_BLOCK_BASE_SIZE without the bloated varint,
# does not cause a subsequent, identical block with canonical encoding to be rejected. The test does not
# care whether the bloated block is accepted or rejected; it only cares that the second block is accepted.
#
# What matters is that the receiving node should not reject the bloated block, and then reject the canonical
# block on the basis that it's the same as an already-rejected block (which would be a consensus failure.)
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18)
# \
# b64a (18)
# b64a is a bloated block (non-canonical varint)
# b64 is a good block (same as b64 but w/ canonical varint)
#
tip(60)
regular_block = block("64a", spend=out[18])
# make it a "broken_block," with non-canonical serialization
b64a = CBrokenBlock(regular_block)
b64a.initialize(regular_block)
self.blocks["64a"] = b64a
self.tip = b64a
tx = CTransaction()
# use canonical serialization to calculate size
script_length = MAX_BLOCK_BASE_SIZE - len(b64a.normal_serialize()) - 65
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0)))
b64a = update_block("64a", [tx])
assert_equal(len(b64a.serialize()), MAX_BLOCK_BASE_SIZE + 8)
yield TestInstance([[self.tip, None]])
# comptool workaround: to make sure b64 is delivered, manually erase b64a from blockstore
self.test.block_store.erase(b64a.sha256)
tip(60)
b64 = CBlock(b64a)
b64.vtx = copy.deepcopy(b64a.vtx)
assert_equal(b64.hash, b64a.hash)
assert_equal(len(b64.serialize()), MAX_BLOCK_BASE_SIZE)
self.blocks[64] = b64
update_block(64, [])
yield accepted()
save_spendable_output()
# Spend an output created in the block itself
#
# -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
#
tip(64)
b65 = block(65)
tx1 = create_and_sign_tx(out[19].tx, out[19].n, out[19].tx.vout[0].nValue)
tx2 = create_and_sign_tx(tx1, 0, 0)
update_block(65, [tx1, tx2])
yield accepted()
save_spendable_output()
# Attempt to spend an output created later in the same block
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
# \-> b66 (20)
tip(65)
b66 = block(66)
tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue)
tx2 = create_and_sign_tx(tx1, 0, 1)
update_block(66, [tx2, tx1])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# Attempt to double-spend a transaction created in a block
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
# \-> b67 (20)
#
#
tip(65)
b67 = block(67)
tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue)
tx2 = create_and_sign_tx(tx1, 0, 1)
tx3 = create_and_sign_tx(tx1, 0, 2)
update_block(67, [tx1, tx2, tx3])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# More tests of block subsidy
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
# \-> b68 (20)
#
# b68 - coinbase with an extra 10 satoshis,
# creates a tx that has 9 satoshis from out[20] go to fees
# this fails because the coinbase is trying to claim 1 satoshi too much in fees
#
# b69 - coinbase with extra 10 satoshis, and a tx that gives a 10 satoshi fee
# this succeeds
#
tip(65)
b68 = block(68, additional_coinbase_value=10)
tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue-9)
update_block(68, [tx])
yield rejected(RejectResult(16, b'bad-cb-amount'))
tip(65)
b69 = block(69, additional_coinbase_value=10)
tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue-10)
update_block(69, [tx])
yield accepted()
save_spendable_output()
# Test spending the outpoint of a non-existent transaction
#
# -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
# \-> b70 (21)
#
tip(69)
block(70, spend=out[21])
bogus_tx = CTransaction()
bogus_tx.sha256 = uint256_from_str(b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c")
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xffffffff))
tx.vout.append(CTxOut(1, b""))
update_block(70, [tx])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks)
#
# -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
# \-> b71 (21)
#
# b72 is a good block.
# b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b71.
#
tip(69)
b72 = block(72)
tx1 = create_and_sign_tx(out[21].tx, out[21].n, 2)
tx2 = create_and_sign_tx(tx1, 0, 1)
b72 = update_block(72, [tx1, tx2]) # now tip is 72
b71 = copy.deepcopy(b72)
b71.vtx.append(tx2) # add duplicate tx2
self.block_heights[b71.sha256] = self.block_heights[b69.sha256] + 1 # b71 builds off b69
self.blocks[71] = b71
assert_equal(len(b71.vtx), 4)
assert_equal(len(b72.vtx), 3)
assert_equal(b72.sha256, b71.sha256)
tip(71)
yield rejected(RejectResult(16, b'bad-txns-duplicate'))
tip(72)
yield accepted()
save_spendable_output()
# Test some invalid scripts and MAX_BLOCK_SIGOPS
#
# -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
# \-> b** (22)
#
# b73 - tx with excessive sigops that are placed after an excessively large script element.
# The purpose of the test is to make sure those sigops are counted.
#
# script is a bytearray of size 20,526
#
# bytearray[0-19,998] : OP_CHECKSIG
# bytearray[19,999] : OP_PUSHDATA4
# bytearray[20,000-20,003]: 521 (max_script_element_size+1, in little-endian format)
# bytearray[20,004-20,525]: unread data (script_element)
# bytearray[20,526] : OP_CHECKSIG (this puts us over the limit)
#
tip(72)
b73 = block(73)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + 1
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = int("4e",16) # OP_PUSHDATA4
element_size = MAX_SCRIPT_ELEMENT_SIZE + 1
a[MAX_BLOCK_SIGOPS] = element_size % 256
a[MAX_BLOCK_SIGOPS+1] = element_size // 256
a[MAX_BLOCK_SIGOPS+2] = 0
a[MAX_BLOCK_SIGOPS+3] = 0
tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a))
b73 = update_block(73, [tx])
assert_equal(get_legacy_sigopcount_block(b73), MAX_BLOCK_SIGOPS+1)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# b74/75 - if we push an invalid script element, all prevous sigops are counted,
# but sigops after the element are not counted.
#
# The invalid script element is that the push_data indicates that
# there will be a large amount of data (0xffffff bytes), but we only
# provide a much smaller number. These bytes are CHECKSIGS so they would
# cause b75 to fail for excessive sigops, if those bytes were counted.
#
# b74 fails because we put MAX_BLOCK_SIGOPS+1 before the element
# b75 succeeds because we put MAX_BLOCK_SIGOPS before the element
#
#
tip(72)
b74 = block(74)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 # total = 20,561
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS] = 0x4e
a[MAX_BLOCK_SIGOPS+1] = 0xfe
a[MAX_BLOCK_SIGOPS+2] = 0xff
a[MAX_BLOCK_SIGOPS+3] = 0xff
a[MAX_BLOCK_SIGOPS+4] = 0xff
tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a))
b74 = update_block(74, [tx])
yield rejected(RejectResult(16, b'bad-blk-sigops'))
tip(72)
b75 = block(75)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS-1] = 0x4e
a[MAX_BLOCK_SIGOPS] = 0xff
a[MAX_BLOCK_SIGOPS+1] = 0xff
a[MAX_BLOCK_SIGOPS+2] = 0xff
a[MAX_BLOCK_SIGOPS+3] = 0xff
tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a))
b75 = update_block(75, [tx])
yield accepted()
save_spendable_output()
# Check that if we push an element filled with CHECKSIGs, they are not counted
tip(75)
b76 = block(76)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS-1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs
tx = create_and_sign_tx(out[23].tx, 0, 1, CScript(a))
b76 = update_block(76, [tx])
yield accepted()
save_spendable_output()
# Test transaction resurrection
#
# -> b77 (24) -> b78 (25) -> b79 (26)
# \-> b80 (25) -> b81 (26) -> b82 (27)
#
# b78 creates a tx, which is spent in b79. After b82, both should be in mempool
#
# The tx'es must be unsigned and pass the node's mempool policy. It is unsigned for the
# rather obscure reason that the Python signature code does not distinguish between
# Low-S and High-S values (whereas the bitcoin code has custom code which does so);
# as a result of which, the odds are 50% that the python code will use the right
# value and the transaction will be accepted into the mempool. Until we modify the
# test framework to support low-S signing, we are out of luck.
#
# To get around this issue, we construct transactions which are not signed and which
# spend to OP_TRUE. If the standard-ness rules change, this test would need to be
# updated. (Perhaps to spend to a P2SH OP_TRUE script)
#
tip(76)
block(77)
tx77 = create_and_sign_tx(out[24].tx, out[24].n, 10*COIN)
update_block(77, [tx77])
yield accepted()
save_spendable_output()
block(78)
tx78 = create_tx(tx77, 0, 9*COIN)
update_block(78, [tx78])
yield accepted()
block(79)
tx79 = create_tx(tx78, 0, 8*COIN)
update_block(79, [tx79])
yield accepted()
# mempool should be empty
assert_equal(len(self.nodes[0].getrawmempool()), 0)
tip(77)
block(80, spend=out[25])
yield rejected()
save_spendable_output()
block(81, spend=out[26])
yield rejected() # other chain is same length
save_spendable_output()
block(82, spend=out[27])
yield accepted() # now this chain is longer, triggers re-org
save_spendable_output()
# now check that tx78 and tx79 have been put back into the peer's mempool
mempool = self.nodes[0].getrawmempool()
assert_equal(len(mempool), 2)
assert(tx78.hash in mempool)
assert(tx79.hash in mempool)
# Test invalid opcodes in dead execution paths.
#
# -> b81 (26) -> b82 (27) -> b83 (28)
#
b83 = block(83)
op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF]
script = CScript(op_codes)
tx1 = create_and_sign_tx(out[28].tx, out[28].n, out[28].tx.vout[0].nValue, script)
tx2 = create_and_sign_tx(tx1, 0, 0, CScript([OP_TRUE]))
tx2.vin[0].scriptSig = CScript([OP_FALSE])
tx2.rehash()
update_block(83, [tx1, tx2])
yield accepted()
save_spendable_output()
# Reorg on/off blocks that have OP_RETURN in them (and try to spend them)
#
# -> b81 (26) -> b82 (27) -> b83 (28) -> b84 (29) -> b87 (30) -> b88 (31)
# \-> b85 (29) -> b86 (30) \-> b89a (32)
#
#
b84 = block(84)
tx1 = create_tx(out[29].tx, out[29].n, 0, CScript([OP_RETURN]))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.calc_sha256()
self.sign_tx(tx1, out[29].tx, out[29].n)
tx1.rehash()
tx2 = create_tx(tx1, 1, 0, CScript([OP_RETURN]))
tx2.vout.append(CTxOut(0, CScript([OP_RETURN])))
tx3 = create_tx(tx1, 2, 0, CScript([OP_RETURN]))
tx3.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx4 = create_tx(tx1, 3, 0, CScript([OP_TRUE]))
tx4.vout.append(CTxOut(0, CScript([OP_RETURN])))
tx5 = create_tx(tx1, 4, 0, CScript([OP_RETURN]))
update_block(84, [tx1,tx2,tx3,tx4,tx5])
yield accepted()
save_spendable_output()
tip(83)
block(85, spend=out[29])
yield rejected()
block(86, spend=out[30])
yield accepted()
tip(84)
block(87, spend=out[30])
yield rejected()
save_spendable_output()
block(88, spend=out[31])
yield accepted()
save_spendable_output()
# trying to spend the OP_RETURN output is rejected
block("89a", spend=out[32])
tx = create_tx(tx1, 0, 0, CScript([OP_TRUE]))
update_block("89a", [tx])
yield rejected()
# Test re-org of a week's worth of blocks (1088 blocks)
# This test takes a minute or two and can be accomplished in memory
#
if self.options.runbarelyexpensive:
tip(88)
LARGE_REORG_SIZE = 288
test1 = TestInstance(sync_every_block=False)
spend=out[32]
for i in range(89, LARGE_REORG_SIZE + 89):
b = block(i, spend)
tx = CTransaction()
script_length = MAX_BLOCK_BASE_SIZE - len(b.serialize()) - 65
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0)))
b = update_block(i, [tx])
assert_equal(len(b.serialize()), MAX_BLOCK_BASE_SIZE)
test1.blocks_and_transactions.append([self.tip, True])
save_spendable_output()
spend = get_spendable_output()
yield test1
chain1_tip = i
# now create alt chain of same length
tip(88)
test2 = TestInstance(sync_every_block=False)
for i in range(89, LARGE_REORG_SIZE + 89):
block("alt"+str(i))
test2.blocks_and_transactions.append([self.tip, False])
yield test2
# extend alt chain to trigger re-org
block("alt" + str(chain1_tip + 1))
yield accepted()
# ... and re-org back to the first chain
tip(chain1_tip)
block(chain1_tip + 1)
yield rejected()
block(chain1_tip + 2)
yield accepted()
chain1_tip += 2
if __name__ == '__main__':
FullBlockTest().main()
| mit |
lnielsen/invenio-metrics | docs/_ext/ultramock.py | 164 | 2830 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Hijacks `mock` to fake as many non-available modules as possible."""
import sys
import types
try:
import unittest.mock as mock
except ImportError:
import mock
# skip `_is_magic` check.
orig_is_magic = mock._is_magic
def always_false(*args, **kwargs):
return False
# avoid spec configuration for mocked classes with super classes.
# honestly this does not happen very often and is kind of a tricky case.
orig_mock_add_spec = mock.NonCallableMock._mock_add_spec
def mock_add_spec_fake(self, spec, spec_set):
orig_mock_add_spec(self, None, None)
# special MagicMock with empty docs
class MyMagicMock(mock.MagicMock):
""""""
# set up a fake class-metaclass hierarchy
class SuperMockMetaMeta(MyMagicMock):
__metaclass__ = MyMagicMock()
class SuperMockMeta(MyMagicMock):
__metaclass__ = SuperMockMetaMeta
class SuperMock(MyMagicMock):
__metaclass__ = SuperMockMeta
class MockedModule(types.ModuleType):
def __init__(self, name):
super(types.ModuleType, self).__init__(name)
self.__name__ = super.__name__
self.__file__ = self.__name__.replace('.', '/') + '.py'
sys.modules[self.__name__] = self
def __getattr__(self, key):
obj = SuperMock
setattr(self, key, obj)
return obj
# overwrite imports
orig_import = __import__
def import_mock(name, *args, **kwargs):
try:
return orig_import(name, *args, **kwargs)
except ImportError:
return MockedModule(name)
import_patch = mock.patch('__builtin__.__import__', side_effect=import_mock)
# public methods
def activate():
mock._is_magic = always_false
mock.NonCallableMock._mock_add_spec = mock_add_spec_fake
import_patch.start()
def deactivate():
import_patch.stop()
mock.NonCallableMock._mock_add_spec = orig_mock_add_spec
mock._is_magic = orig_is_magic
| gpl-2.0 |
GNOME/orca | test/keystrokes/firefox/math_line_nav_torture_test.py | 1 | 17854 | #!/usr/bin/python
from macaroon.playback import *
import utils
sequence = MacroSequence()
#sequence.append(WaitForDocLoad())
sequence.append(PauseAction(5000))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("<Control>Home"))
sequence.append(utils.AssertPresentationAction(
"1. Top of file",
["BRAILLE LINE: 'MathML \"Torture Test\" test cases'",
" VISIBLE: 'MathML \"Torture Test\" test cases', cursor=1",
"SPEECH OUTPUT: 'MathML \"Torture Test\" test cases'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"2. Line Down",
["BRAILLE LINE: 'math'",
" VISIBLE: 'math', cursor=0",
"SPEECH OUTPUT: 'x.'",
"SPEECH OUTPUT: 'superscript 2.'",
"SPEECH OUTPUT: 'y.'",
"SPEECH OUTPUT: 'superscript 2.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"3. Line Down",
["BRAILLE LINE: 'math'",
" VISIBLE: 'math', cursor=0",
"SPEECH OUTPUT: 'F.'",
"SPEECH OUTPUT: 'pre-subscript 2'",
"SPEECH OUTPUT: 'subscript 3'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"4. Line Down",
["BRAILLE LINE: 'math'",
" VISIBLE: 'math', cursor=0",
"SPEECH OUTPUT: 'fraction start.'",
"SPEECH OUTPUT: 'x plus y.'",
"SPEECH OUTPUT: 'superscript 2.'",
"SPEECH OUTPUT: 'over k plus 1.'",
"SPEECH OUTPUT: 'fraction end.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"5. Line Down",
["BRAILLE LINE: 'math'",
" VISIBLE: 'math', cursor=0",
"SPEECH OUTPUT: 'x plus y.'",
"SPEECH OUTPUT: 'superscript fraction start.'",
"SPEECH OUTPUT: '2 over k plus 1.'",
"SPEECH OUTPUT: 'fraction end.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"6. Line Down",
["BRAILLE LINE: 'math'",
" VISIBLE: 'math', cursor=0",
"SPEECH OUTPUT: 'fraction start.'",
"SPEECH OUTPUT: 'a over b slash 2.'",
"SPEECH OUTPUT: 'fraction end.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"7. Line Down",
["BRAILLE LINE: 'math'",
" VISIBLE: 'math', cursor=0",
"SPEECH OUTPUT: 'a.'",
"SPEECH OUTPUT: 'subscript 0.'",
"SPEECH OUTPUT: 'plus fraction start.'",
"SPEECH OUTPUT: '1 over a.'",
"SPEECH OUTPUT: 'subscript 1.'",
"SPEECH OUTPUT: 'plus fraction start.'",
"SPEECH OUTPUT: '1 over a.'",
"SPEECH OUTPUT: 'subscript 2.'",
"SPEECH OUTPUT: 'plus fraction start.'",
"SPEECH OUTPUT: '1 over a.'",
"SPEECH OUTPUT: 'subscript 3.'",
"SPEECH OUTPUT: 'plus fraction start.'",
"SPEECH OUTPUT: '1 over a.'",
"SPEECH OUTPUT: 'subscript 4.'",
"SPEECH OUTPUT: 'fraction end.'",
"SPEECH OUTPUT: 'fraction end.'",
"SPEECH OUTPUT: 'fraction end.'",
"SPEECH OUTPUT: 'fraction end.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"8. Line Down",
["BRAILLE LINE: 'math'",
" VISIBLE: 'math', cursor=0",
"SPEECH OUTPUT: 'a.'",
"SPEECH OUTPUT: 'subscript 0.'",
"SPEECH OUTPUT: 'plus fraction start.'",
"SPEECH OUTPUT: '1 over a.'",
"SPEECH OUTPUT: 'subscript 1.'",
"SPEECH OUTPUT: 'plus fraction start.'",
"SPEECH OUTPUT: '1 over a.'",
"SPEECH OUTPUT: 'subscript 2.'",
"SPEECH OUTPUT: 'plus fraction start.'",
"SPEECH OUTPUT: '1 over a.'",
"SPEECH OUTPUT: 'subscript 3.'",
"SPEECH OUTPUT: 'plus fraction start.'",
"SPEECH OUTPUT: '1 over a.'",
"SPEECH OUTPUT: 'subscript 4.'",
"SPEECH OUTPUT: 'fraction end.'",
"SPEECH OUTPUT: 'fraction end.'",
"SPEECH OUTPUT: 'fraction end.'",
"SPEECH OUTPUT: 'fraction end.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"9. Line Down",
["BRAILLE LINE: 'math'",
" VISIBLE: 'math', cursor=0",
"SPEECH OUTPUT: 'left paren fraction without bar, start.'",
"SPEECH OUTPUT: 'n over k slash 2.'",
"SPEECH OUTPUT: 'fraction end.'",
"SPEECH OUTPUT: 'right paren'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"10. Line Down",
["BRAILLE LINE: 'math'",
" VISIBLE: 'math', cursor=0",
"SPEECH OUTPUT: 'left paren fraction without bar, start.'",
"SPEECH OUTPUT: 'p over 2.'",
"SPEECH OUTPUT: 'fraction end.'",
"SPEECH OUTPUT: 'right paren x.'",
"SPEECH OUTPUT: 'superscript 2.'",
"SPEECH OUTPUT: 'y.'",
"SPEECH OUTPUT: 'superscript p minus 2.'",
"SPEECH OUTPUT: 'minus fraction start.'",
"SPEECH OUTPUT: '1 over 1 minus x.'",
"SPEECH OUTPUT: 'fraction end.'",
"SPEECH OUTPUT: 'fraction start.'",
"SPEECH OUTPUT: '1 over 1 minus x.'",
"SPEECH OUTPUT: 'superscript 2.'",
"SPEECH OUTPUT: 'fraction end.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"11. Line Down",
["BRAILLE LINE: 'math'",
" VISIBLE: 'math', cursor=0",
"SPEECH OUTPUT: 'sum.'",
"SPEECH OUTPUT: 'underscript fraction without bar, start.'",
"SPEECH OUTPUT: '0 less than or equal to i less than or equal to m over 0 less than j less than n.'",
"SPEECH OUTPUT: 'fraction end.'",
"SPEECH OUTPUT: 'P left paren i comma j right paren'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"12. Line Down",
["BRAILLE LINE: 'math'",
" VISIBLE: 'math', cursor=0",
"SPEECH OUTPUT: 'x.'",
"SPEECH OUTPUT: 'superscript 2 y.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"13. Line Down",
["BRAILLE LINE: 'math'",
" VISIBLE: 'math', cursor=0",
"SPEECH OUTPUT: 'sum.'",
"SPEECH OUTPUT: 'underscript i equals 1.'",
"SPEECH OUTPUT: 'overscript p.'",
"SPEECH OUTPUT: 'sum.'",
"SPEECH OUTPUT: 'underscript j equals 1.'",
"SPEECH OUTPUT: 'overscript q.'",
"SPEECH OUTPUT: 'sum.'",
"SPEECH OUTPUT: 'underscript k equals 1.'",
"SPEECH OUTPUT: 'overscript r.'",
"SPEECH OUTPUT: 'a.'",
"SPEECH OUTPUT: 'subscript i j.'",
"SPEECH OUTPUT: 'b.'",
"SPEECH OUTPUT: 'subscript j k.'",
"SPEECH OUTPUT: 'c.'",
"SPEECH OUTPUT: 'subscript k i.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"14. Line Down",
["BRAILLE LINE: 'math'",
" VISIBLE: 'math', cursor=0",
"SPEECH OUTPUT: 'square root of 1 plus square root of 1 plus square root of 1 plus square root of 1 plus square root of 1 plus square root of 1 plus square root of 1 plus x.'",
"SPEECH OUTPUT: 'root end.'",
"SPEECH OUTPUT: 'root end.'",
"SPEECH OUTPUT: 'root end.'",
"SPEECH OUTPUT: 'root end.'",
"SPEECH OUTPUT: 'root end.'",
"SPEECH OUTPUT: 'root end.'",
"SPEECH OUTPUT: 'root end.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"15. Line Down",
["BRAILLE LINE: 'math'",
" VISIBLE: 'math', cursor=0",
"SPEECH OUTPUT: 'left paren fraction start.'",
"SPEECH OUTPUT: 'partial differential.'",
"SPEECH OUTPUT: 'superscript 2.'",
"SPEECH OUTPUT: 'over partial differential x.'",
"SPEECH OUTPUT: 'superscript 2.'",
"SPEECH OUTPUT: 'fraction end.'",
"SPEECH OUTPUT: 'plus fraction start.'",
"SPEECH OUTPUT: 'partial differential.'",
"SPEECH OUTPUT: 'superscript 2.'",
"SPEECH OUTPUT: 'over partial differential y.'",
"SPEECH OUTPUT: 'superscript 2.'",
"SPEECH OUTPUT: 'fraction end.'",
"SPEECH OUTPUT: 'right paren vertical bar φ left paren x plus i y right paren vertical bar.'",
"SPEECH OUTPUT: 'superscript 2.'",
"SPEECH OUTPUT: 'equals 0'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"16. Line Down",
["BRAILLE LINE: 'math'",
" VISIBLE: 'math', cursor=0",
"SPEECH OUTPUT: '2.'",
"SPEECH OUTPUT: 'superscript 2.'",
"SPEECH OUTPUT: 'superscript 2.'",
"SPEECH OUTPUT: 'superscript x.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"17. Line Down",
["BRAILLE LINE: 'math'",
" VISIBLE: 'math', cursor=0",
"SPEECH OUTPUT: 'integral.'",
"SPEECH OUTPUT: 'subscript 1.'",
"SPEECH OUTPUT: 'superscript x.'",
"SPEECH OUTPUT: 'fraction start.'",
"SPEECH OUTPUT: 'd t over t.'",
"SPEECH OUTPUT: 'fraction end.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"18. Line Down",
["BRAILLE LINE: 'math'",
" VISIBLE: 'math', cursor=0",
"SPEECH OUTPUT: 'double integral.'",
"SPEECH OUTPUT: 'subscript D.'",
"SPEECH OUTPUT: 'd x d y'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"19. Line Down",
["BRAILLE LINE: 'math'",
" VISIBLE: 'math', cursor=0",
"SPEECH OUTPUT: 'f left paren x right paren equals left brace math table with 3 rows 2 columns.'",
"SPEECH OUTPUT: 'row 1.'",
"SPEECH OUTPUT: '1 slash 3.'",
"SPEECH OUTPUT: 'if 0 less than or equal to x less than or equal to 1 semicolon.'",
"SPEECH OUTPUT: 'row 2.'",
"SPEECH OUTPUT: '2 slash 3.'",
"SPEECH OUTPUT: 'if 3 less than or equal to x less than or equal to 4 semicolon.'",
"SPEECH OUTPUT: 'row 3.'",
"SPEECH OUTPUT: '0.'",
"SPEECH OUTPUT: 'elsewhere.'",
"SPEECH OUTPUT: 'table end.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"20. Line Down",
["BRAILLE LINE: 'math'",
" VISIBLE: 'math', cursor=0",
"SPEECH OUTPUT: 'x plus ... plus x.'",
"SPEECH OUTPUT: 'overscript top brace.'",
"SPEECH OUTPUT: 'overscript k times.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"21. Line Down",
["BRAILLE LINE: 'math'",
" VISIBLE: 'math', cursor=0",
"SPEECH OUTPUT: 'y.'",
"SPEECH OUTPUT: 'subscript x.'",
"SPEECH OUTPUT: 'superscript 2.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"22. Line Down",
["BRAILLE LINE: 'math'",
" VISIBLE: 'math', cursor=0",
"SPEECH OUTPUT: 'sum.'",
"SPEECH OUTPUT: 'underscript p prime.'",
"SPEECH OUTPUT: 'f left paren p right paren equals integral.'",
"SPEECH OUTPUT: 'subscript t greater than 1.'",
"SPEECH OUTPUT: 'f left paren t right paren d π left paren t right paren'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"23. Line Down",
["BRAILLE LINE: 'math'",
" VISIBLE: 'math', cursor=0",
"SPEECH OUTPUT: 'left brace a comma ... comma a.'",
"SPEECH OUTPUT: 'overscript top brace.'",
"SPEECH OUTPUT: 'overscript k a 's.'",
"SPEECH OUTPUT: 'comma b comma ... comma b.'",
"SPEECH OUTPUT: 'overscript top brace.'",
"SPEECH OUTPUT: 'overscript l b 's.'",
"SPEECH OUTPUT: 'underscript bottom brace.'",
"SPEECH OUTPUT: 'underscript k plus l elements.'",
"SPEECH OUTPUT: 'right brace'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"24. Line Down",
["BRAILLE LINE: 'math'",
" VISIBLE: 'math', cursor=0",
"SPEECH OUTPUT: 'left paren math table with 2 rows 2 columns.'",
"SPEECH OUTPUT: 'row 1.'",
"SPEECH OUTPUT: 'left paren nested math table with 2 rows 2 columns.'",
"SPEECH OUTPUT: 'row 1.'",
"SPEECH OUTPUT: 'a.'",
"SPEECH OUTPUT: 'b.'",
"SPEECH OUTPUT: 'row 2.'",
"SPEECH OUTPUT: 'c.'",
"SPEECH OUTPUT: 'd.'",
"SPEECH OUTPUT: 'nested table end.'",
"SPEECH OUTPUT: 'right paren.'",
"SPEECH OUTPUT: 'left paren nested math table with 2 rows 2 columns.'",
"SPEECH OUTPUT: 'row 1.'",
"SPEECH OUTPUT: 'e.'",
"SPEECH OUTPUT: 'f.'",
"SPEECH OUTPUT: 'row 2.'",
"SPEECH OUTPUT: 'g.'",
"SPEECH OUTPUT: 'h.'",
"SPEECH OUTPUT: 'nested table end.'",
"SPEECH OUTPUT: 'right paren.'",
"SPEECH OUTPUT: 'row 2.'",
"SPEECH OUTPUT: '0.'",
"SPEECH OUTPUT: 'left paren nested math table with 2 rows 2 columns.'",
"SPEECH OUTPUT: 'row 1.'",
"SPEECH OUTPUT: 'i.'",
"SPEECH OUTPUT: 'j.'",
"SPEECH OUTPUT: 'row 2.'",
"SPEECH OUTPUT: 'k.'",
"SPEECH OUTPUT: 'l.'",
"SPEECH OUTPUT: 'nested table end.'",
"SPEECH OUTPUT: 'right paren.'",
"SPEECH OUTPUT: 'table end.'",
"SPEECH OUTPUT: 'right paren'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"25. Line Down",
["BRAILLE LINE: 'math'",
" VISIBLE: 'math', cursor=0",
"SPEECH OUTPUT: 'det vertical bar math table with 5 rows 5 columns.'",
"SPEECH OUTPUT: 'row 1.'",
"SPEECH OUTPUT: 'c.'",
"SPEECH OUTPUT: 'subscript 0.'",
"SPEECH OUTPUT: 'c.'",
"SPEECH OUTPUT: 'subscript 1.'",
"SPEECH OUTPUT: 'c.'",
"SPEECH OUTPUT: 'subscript 2.'",
"SPEECH OUTPUT: 'horizontal ellipsis.'",
"SPEECH OUTPUT: 'c.'",
"SPEECH OUTPUT: 'subscript n.'",
"SPEECH OUTPUT: 'row 2.'",
"SPEECH OUTPUT: 'c.'",
"SPEECH OUTPUT: 'subscript 1.'",
"SPEECH OUTPUT: 'c.'",
"SPEECH OUTPUT: 'subscript 2.'",
"SPEECH OUTPUT: 'c.'",
"SPEECH OUTPUT: 'subscript 3.'",
"SPEECH OUTPUT: 'horizontal ellipsis.'",
"SPEECH OUTPUT: 'c.'",
"SPEECH OUTPUT: 'subscript n plus 1.'",
"SPEECH OUTPUT: 'row 3.'",
"SPEECH OUTPUT: 'c.'",
"SPEECH OUTPUT: 'subscript 2.'",
"SPEECH OUTPUT: 'c.'",
"SPEECH OUTPUT: 'subscript 3.'",
"SPEECH OUTPUT: 'c.'",
"SPEECH OUTPUT: 'subscript 4.'",
"SPEECH OUTPUT: 'horizontal ellipsis.'",
"SPEECH OUTPUT: 'c.'",
"SPEECH OUTPUT: 'subscript n plus 2.'",
"SPEECH OUTPUT: 'row 4.'",
"SPEECH OUTPUT: 'vertical ellipsis.'",
"SPEECH OUTPUT: 'vertical ellipsis.'",
"SPEECH OUTPUT: 'vertical ellipsis.'",
"SPEECH OUTPUT: 'vertical ellipsis.'",
"SPEECH OUTPUT: 'row 5.'",
"SPEECH OUTPUT: 'c.'",
"SPEECH OUTPUT: 'subscript n.'",
"SPEECH OUTPUT: 'c.'",
"SPEECH OUTPUT: 'subscript n plus 1.'",
"SPEECH OUTPUT: 'c.'",
"SPEECH OUTPUT: 'subscript n plus 2.'",
"SPEECH OUTPUT: 'horizontal ellipsis.'",
"SPEECH OUTPUT: 'c.'",
"SPEECH OUTPUT: 'subscript 2 n.'",
"SPEECH OUTPUT: 'table end.'",
"SPEECH OUTPUT: 'vertical bar greater than 0'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"26. Line Down",
["BRAILLE LINE: 'math'",
" VISIBLE: 'math', cursor=0",
"SPEECH OUTPUT: 'y.'",
"SPEECH OUTPUT: 'subscript x.'",
"SPEECH OUTPUT: 'subscript 2.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"27. Line Down",
["BRAILLE LINE: 'math'",
" VISIBLE: 'math', cursor=0",
"SPEECH OUTPUT: 'x.'",
"SPEECH OUTPUT: 'subscript 92.'",
"SPEECH OUTPUT: 'superscript 31415.'",
"SPEECH OUTPUT: 'plus π'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"28. Line Down",
["BRAILLE LINE: 'math'",
" VISIBLE: 'math', cursor=0",
"SPEECH OUTPUT: 'x.'",
"SPEECH OUTPUT: 'subscript y.'",
"SPEECH OUTPUT: 'subscript b.'",
"SPEECH OUTPUT: 'superscript a.'",
"SPEECH OUTPUT: 'superscript z.'",
"SPEECH OUTPUT: 'subscript c.'",
"SPEECH OUTPUT: 'superscript d.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"29. Line Down",
["BRAILLE LINE: 'math'",
" VISIBLE: 'math', cursor=0",
"SPEECH OUTPUT: 'y.'",
"SPEECH OUTPUT: 'subscript 3.'",
"SPEECH OUTPUT: 'superscript triple prime.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"30. Line Down",
["BRAILLE LINE: 'End of test'",
" VISIBLE: 'End of test', cursor=1",
"SPEECH OUTPUT: 'End of test'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
| lgpl-2.1 |
TEAM-Gummy/platform_external_chromium_org | chrome/test/mini_installer/launch_chrome.py | 127 | 1647 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Launches Chrome.
This script launches Chrome and waits until its window shows up.
"""
import optparse
import sys
import time
import win32process
import chrome_helper
def WaitForWindow(process_id, class_pattern):
"""Waits until a window specified by |process_id| and class name shows up.
Args:
process_id: The ID of the process that owns the window.
class_pattern: The regular expression pattern of the window class name.
Returns:
A boolean value indicating whether the specified window shows up within
30 seconds.
"""
start_time = time.time()
while time.time() - start_time < 30:
if chrome_helper.WindowExists([process_id], class_pattern):
return True
time.sleep(0.1)
return False
def main():
usage = 'usage: %prog chrome_path'
parser = optparse.OptionParser(usage, description='Launch Chrome.')
_, args = parser.parse_args()
if len(args) != 1:
parser.error('Incorrect number of arguments.')
chrome_path = args[0]
# Use CreateProcess rather than subprocess.Popen to avoid side effects such as
# handle interitance.
_, _, process_id, _ = win32process.CreateProcess(None, chrome_path, None,
None, 0, 0, None, None,
win32process.STARTUPINFO())
if not WaitForWindow(process_id, 'Chrome_WidgetWin_'):
raise Exception('Could not launch Chrome.')
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
trac-ja/trac-ja | trac/mimeview/pygments.py | 2 | 8620 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# Copyright (C) 2006 Matthew Good <matt@matt-good.net>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# Author: Matthew Good <matt@matt-good.net>
from datetime import datetime
import os
from pkg_resources import resource_filename
import re
from trac.core import *
from trac.config import ListOption, Option
from trac.env import ISystemInfoProvider
from trac.mimeview.api import IHTMLPreviewRenderer, Mimeview
from trac.prefs import IPreferencePanelProvider
from trac.util import get_pkginfo
from trac.util.datefmt import http_date, localtz
from trac.util.translation import _
from trac.web.api import IRequestHandler, HTTPNotFound
from trac.web.chrome import add_notice, add_stylesheet
from genshi import QName, Stream
from genshi.core import Attrs, START, END, TEXT
# Kludge to workaround the lack of absolute imports in Python version prior to
# 2.5
pygments = __import__('pygments', {}, {}, ['lexers', 'styles', 'formatters'])
get_all_lexers = pygments.lexers.get_all_lexers
get_lexer_by_name = pygments.lexers.get_lexer_by_name
HtmlFormatter = pygments.formatters.html.HtmlFormatter
get_all_styles = pygments.styles.get_all_styles
get_style_by_name = pygments.styles.get_style_by_name
__all__ = ['PygmentsRenderer']
class PygmentsRenderer(Component):
"""HTML renderer for syntax highlighting based on Pygments."""
implements(ISystemInfoProvider, IHTMLPreviewRenderer,
IPreferencePanelProvider, IRequestHandler)
default_style = Option('mimeviewer', 'pygments_default_style', 'trac',
"""The default style to use for Pygments syntax highlighting.""")
pygments_modes = ListOption('mimeviewer', 'pygments_modes',
'', doc=
"""List of additional MIME types known by Pygments.
For each, a tuple `mimetype:mode:quality` has to be
specified, where `mimetype` is the MIME type,
`mode` is the corresponding Pygments mode to be used
for the conversion and `quality` is the quality ratio
associated to this conversion. That can also be used
to override the default quality ratio used by the
Pygments render.""")
expand_tabs = True
returns_source = True
QUALITY_RATIO = 7
EXAMPLE = """<!DOCTYPE html>
<html lang="en">
<head>
<title>Hello, world!</title>
<script>
jQuery(document).ready(function($) {
$("h1").fadeIn("slow");
});
</script>
</head>
<body>
<h1>Hello, world!</h1>
</body>
</html>"""
def __init__(self):
self._types = None
# ISystemInfoProvider methods
def get_system_info(self):
version = get_pkginfo(pygments).get('version')
# if installed from source, fallback to the hardcoded version info
if not version and hasattr(pygments, '__version__'):
version = pygments.__version__
yield 'Pygments', version
# IHTMLPreviewRenderer methods
def get_extra_mimetypes(self):
for lexname, aliases, _, mimetypes in get_all_lexers():
name = aliases[0] if aliases else lexname
for mimetype in mimetypes:
yield mimetype, aliases
def get_quality_ratio(self, mimetype):
# Extend default MIME type to mode mappings with configured ones
if self._types is None:
self._init_types()
try:
return self._types[mimetype][1]
except KeyError:
return 0
def render(self, context, mimetype, content, filename=None, rev=None):
req = context.req
if self._types is None:
self._init_types()
add_stylesheet(req, '/pygments/%s.css' %
req.session.get('pygments_style', self.default_style))
try:
if len(content) > 0:
mimetype = mimetype.split(';', 1)[0]
language = self._types[mimetype][0]
return self._generate(language, content)
except (KeyError, ValueError):
raise Exception("No Pygments lexer found for mime-type '%s'."
% mimetype)
# IPreferencePanelProvider methods
def get_preference_panels(self, req):
yield ('pygments', _('Syntax Highlighting'))
def render_preference_panel(self, req, panel):
styles = list(get_all_styles())
if req.method == 'POST':
style = req.args.get('style')
if style and style in styles:
req.session['pygments_style'] = style
add_notice(req, _('Your preferences have been saved.'))
req.redirect(req.href.prefs(panel or None))
output = self._generate('html', self.EXAMPLE)
return 'prefs_pygments.html', {
'output': output,
'selection': req.session.get('pygments_style', self.default_style),
'styles': styles
}
# IRequestHandler methods
def match_request(self, req):
match = re.match(r'/pygments/(\w+)\.css', req.path_info)
if match:
req.args['style'] = match.group(1)
return True
def process_request(self, req):
style = req.args['style']
try:
style_cls = get_style_by_name(style)
except ValueError, e:
raise HTTPNotFound(e)
parts = style_cls.__module__.split('.')
filename = resource_filename('.'.join(parts[:-1]), parts[-1] + '.py')
mtime = datetime.fromtimestamp(os.path.getmtime(filename), localtz)
last_modified = http_date(mtime)
if last_modified == req.get_header('If-Modified-Since'):
req.send_response(304)
req.end_headers()
return
formatter = HtmlFormatter(style=style_cls)
content = u'\n\n'.join([
formatter.get_style_defs('div.code pre'),
formatter.get_style_defs('table.code td')
]).encode('utf-8')
req.send_response(200)
req.send_header('Content-Type', 'text/css; charset=utf-8')
req.send_header('Last-Modified', last_modified)
req.send_header('Content-Length', len(content))
req.write(content)
# Internal methods
def _init_types(self):
self._types = {}
for lexname, aliases, _, mimetypes in get_all_lexers():
name = aliases[0] if aliases else lexname
for mimetype in mimetypes:
self._types[mimetype] = (name, self.QUALITY_RATIO)
# Pygments currently doesn't know application/javascript
if 'application/javascript' not in self._types:
js_entry = self._types.get('text/javascript')
if js_entry:
self._types['application/javascript'] = js_entry
self._types.update(
Mimeview(self.env).configured_modes_mapping('pygments')
)
def _generate(self, language, content):
lexer = get_lexer_by_name(language, stripnl=False)
return GenshiHtmlFormatter().generate(lexer.get_tokens(content))
class GenshiHtmlFormatter(HtmlFormatter):
"""A Pygments formatter subclass that generates a Python stream instead
of writing markup as strings to an output file.
"""
def _chunk(self, tokens):
"""Groups tokens with the same CSS class in the token stream
and yields them one by one, along with the CSS class, with the
values chunked together."""
last_class = None
text = []
for ttype, value in tokens:
c = self._get_css_class(ttype)
if c == 'n':
c = ''
if c == last_class:
text.append(value)
continue
# If no value, leave the old <span> open.
if value:
yield last_class, u''.join(text)
text = [value]
last_class = c
if text:
yield last_class, u''.join(text)
def generate(self, tokens):
pos = (None, -1, -1)
span = QName('span')
class_ = QName('class')
def _generate():
for c, text in self._chunk(tokens):
if c:
attrs = Attrs([(class_, c)])
yield START, (span, attrs), pos
yield TEXT, text, pos
yield END, span, pos
else:
yield TEXT, text, pos
return Stream(_generate())
| bsd-3-clause |
mogproject/artifact-cli | src/artifactcli/artifact/fileinfo.py | 1 | 2185 | import os
import time
import socket
import getpass
from datetime import datetime
import dateutil.parser
from .baseinfo import BaseInfo
from artifactcli.util import *
class FileInfo(BaseInfo):
keys = ['host', 'user', 'size', 'mtime', 'md5']
def __init__(self, host, user, size, mtime, md5):
super(FileInfo, self).__init__(FileInfo.keys)
self.host = host
self.user = user
self.size = size
self.mtime = mtime
self.md5 = md5
def __str__(self):
buf = [
'File Info:',
' User : %s@%s' % (self.user, self.host),
' Modified: %s' % self.mtime,
' Size : %s (%s)' % (self.size, self.size_format()),
' MD5 : %s' % self.md5,
]
return '\n'.join(buf)
def to_dict(self):
return {
'host': self.host,
'user': self.user,
'size': self.size,
'mtime': self.mtime.isoformat(),
'hex_md5': self.md5
}
@staticmethod
def from_path(path):
host = socket.gethostname()
user = getpass.getuser()
size = os.path.getsize(path)
mtime = datetime(*time.localtime(os.path.getmtime(path))[:6])
md5 = FileInfo.get_hex_md5(path)
return FileInfo(host, user, size, mtime, md5)
@staticmethod
def from_dict(d):
return FileInfo(d['host'], d['user'], d['size'], dateutil.parser.parse(d['mtime']), d['hex_md5'])
@staticmethod
def get_hex_md5(path):
import hashlib
hasher = hashlib.md5()
blocksize = 65536
with open(path, 'rb') as f:
buf = f.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = f.read(blocksize)
return hasher.hexdigest()
def size_format(self):
return self._sizeof_fmt(self.size)
@classmethod
def _sizeof_fmt(cls, num, suffix='B'):
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
| apache-2.0 |
macloo/flasky | manage.py | 76 | 2391 | #!/usr/bin/env python
import os
COV = None
if os.environ.get('FLASK_COVERAGE'):
import coverage
COV = coverage.coverage(branch=True, include='app/*')
COV.start()
if os.path.exists('.env'):
print('Importing environment from .env...')
for line in open('.env'):
var = line.strip().split('=')
if len(var) == 2:
os.environ[var[0]] = var[1]
from app import create_app, db
from app.models import User, Follow, Role, Permission, Post, Comment
from flask.ext.script import Manager, Shell
from flask.ext.migrate import Migrate, MigrateCommand
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, User=User, Follow=Follow, Role=Role,
Permission=Permission, Post=Post, Comment=Comment)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def test(coverage=False):
"""Run the unit tests."""
if coverage and not os.environ.get('FLASK_COVERAGE'):
import sys
os.environ['FLASK_COVERAGE'] = '1'
os.execvp(sys.executable, [sys.executable] + sys.argv)
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if COV:
COV.stop()
COV.save()
print('Coverage Summary:')
COV.report()
basedir = os.path.abspath(os.path.dirname(__file__))
covdir = os.path.join(basedir, 'tmp/coverage')
COV.html_report(directory=covdir)
print('HTML version: file://%s/index.html' % covdir)
COV.erase()
@manager.command
def profile(length=25, profile_dir=None):
"""Start the application under the code profiler."""
from werkzeug.contrib.profiler import ProfilerMiddleware
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[length],
profile_dir=profile_dir)
app.run()
@manager.command
def deploy():
"""Run deployment tasks."""
from flask.ext.migrate import upgrade
from app.models import Role, User
# migrate database to latest revision
upgrade()
# create user roles
Role.insert_roles()
# create self-follows for all users
User.add_self_follows()
if __name__ == '__main__':
manager.run()
| mit |
saddingtonbaynes/rez | src/rez/vendor/schema/test_schema.py | 8 | 16154 | from __future__ import with_statement
import rez.vendor.unittest2 as unittest
import os
import tempfile
from schema import Schema, Use, And, Or, Optional, SchemaError
# REZ: These are some regular expressions used in converting from original
# pytest format to standard unittest format
#
# re search:
# with SE: (\w[^\n]*)\(([^)]*)\)(\s*#[^\n]*)?\n
# re replace:
# self.assertRaises(SchemaError, \1, \2)\3\n
#
# [Note that you still need to de-indent after applying this]
# re search:
# ( ( *))with SE:\n\1 try:\n((?:(?:\1 [^\n]*| *)\n)*?)\1 except SchemaError as e:\n((?:(?:\1 [^\n]*| *)\n)*?)\1 raise\n
# re replace:
# \1 try:\n\3\1 self.fail("SchemaError should have been raised")\n\1 except SchemaError as e:\n\4
#
# [Note that if you apply this after the new setUpClass method is created, you
# need to skip applying this for the first match, where cls.test_file_name is
# assigned!]
# re search:
# '(\./)?LICENSE-MIT'
# re replace:
# self.test_file_name
#
# re search:
# ^\n\n( *)def test_([a-zA-Z0-9_]+)\(\):
# re replace:
# \n\1def test_\2(self):
try:
basestring
except NameError:
basestring = str # Python 3 does not have basestring
def ve(_):
raise ValueError()
def se(_):
raise SchemaError('first auto', 'first error')
class TestSchema(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.test_file_fd, cls.test_file_name = tempfile.mkstemp(suffix='LICENSE-MIT')
os.write(cls.test_file_fd, "Copyright (c) 2012 Vladimir Keleshev, <vladimir@keleshev.com>")
os.close(cls.test_file_fd)
@classmethod
def tearDownClass(cls):
if os.path.exists(cls.test_file_name):
os.remove(cls.test_file_name)
def test_schema(self):
assert Schema(1).validate(1) == 1
self.assertRaises(SchemaError, Schema(1).validate, 9)
assert Schema(int).validate(1) == 1
self.assertRaises(SchemaError, Schema(int).validate, '1')
assert Schema(Use(int)).validate('1') == 1
self.assertRaises(SchemaError, Schema(int).validate, int)
assert Schema(str).validate('hai') == 'hai'
self.assertRaises(SchemaError, Schema(str).validate, 1)
assert Schema(Use(str)).validate(1) == '1'
assert Schema(list).validate(['a', 1]) == ['a', 1]
assert Schema(dict).validate({'a': 1}) == {'a': 1}
self.assertRaises(SchemaError, Schema(dict).validate, ['a', 1])
assert Schema(lambda n: 0 < n < 5).validate(3) == 3
self.assertRaises(SchemaError, Schema(lambda n: 0 < n < 5).validate, -1)
def test_validate_file(self):
assert Schema(
Use(open)).validate(self.test_file_name).read().startswith('Copyright')
self.assertRaises(SchemaError, Schema(Use(open)).validate, 'NON-EXISTENT')
assert Schema(os.path.exists).validate('.') == '.'
self.assertRaises(SchemaError, Schema(os.path.exists).validate, './non-existent/')
assert Schema(os.path.isfile).validate(self.test_file_name) == self.test_file_name
self.assertRaises(SchemaError, Schema(os.path.isfile).validate, 'NON-EXISTENT')
def test_and(self):
assert And(int, lambda n: 0 < n < 5).validate(3) == 3
self.assertRaises(SchemaError, And(int, lambda n: 0 < n < 5).validate, 3.33)
assert And(Use(int), lambda n: 0 < n < 5).validate(3.33) == 3
self.assertRaises(SchemaError, And(Use(int), lambda n: 0 < n < 5).validate, '3.33')
def test_or(self):
assert Or(int, dict).validate(5) == 5
assert Or(int, dict).validate({}) == {}
self.assertRaises(SchemaError, Or(int, dict).validate, 'hai')
assert Or(int).validate(4)
self.assertRaises(SchemaError, Or().validate, 2)
def test_validate_list(self):
assert Schema([1, 0]).validate([1, 0, 1, 1]) == [1, 0, 1, 1]
assert Schema([1, 0]).validate([]) == []
self.assertRaises(SchemaError, Schema([1, 0]).validate, 0)
self.assertRaises(SchemaError, Schema([1, 0]).validate, [2])
assert And([1, 0], lambda l: len(l) > 2).validate([0, 1, 0]) == [0, 1, 0]
self.assertRaises(SchemaError, And([1, 0], lambda l: len(l) > 2).validate, [0, 1])
def test_list_tuple_set_frozenset(self):
assert Schema([int]).validate([1, 2])
self.assertRaises(SchemaError, Schema([int]).validate, ['1', 2])
assert Schema(set([int])).validate(set([1, 2])) == set([1, 2])
self.assertRaises(SchemaError, Schema(set([int])).validate, [1, 2]) # not a set
self.assertRaises(SchemaError, Schema(set([int])).validate, ['1', 2])
assert Schema(tuple([int])).validate(tuple([1, 2])) == tuple([1, 2])
self.assertRaises(SchemaError, Schema(tuple([int])).validate, [1, 2]) # not a set
def test_strictly(self):
assert Schema(int).validate(1) == 1
self.assertRaises(SchemaError, Schema(int).validate, '1')
def test_dict(self):
assert Schema({'key': 5}).validate({'key': 5}) == {'key': 5}
self.assertRaises(SchemaError, Schema({'key': 5}).validate, {'key': 'x'})
assert Schema({'key': int}).validate({'key': 5}) == {'key': 5}
assert Schema({'n': int, 'f': float}).validate(
{'n': 5, 'f': 3.14}) == {'n': 5, 'f': 3.14}
self.assertRaises(SchemaError, Schema({'n': int, 'f': float}).validate,
{'n': 3.14, 'f': 5})
try:
Schema({'key': 5}).validate({})
self.fail("SchemaError should have been raised")
except SchemaError as e:
assert e.args[0] in ["missed keys set(['key'])",
"missed keys {'key'}"] # Python 3 style
try:
Schema({'key': 5}).validate({'n': 5})
self.fail("SchemaError should have been raised")
except SchemaError as e:
assert e.args[0] in ["missed keys set(['key'])",
"missed keys {'key'}"] # Python 3 style
try:
Schema({}).validate({'n': 5})
self.fail("SchemaError should have been raised")
except SchemaError as e:
assert e.args[0] == "wrong keys 'n' in {'n': 5}"
try:
Schema({'key': 5}).validate({'key': 5, 'bad': 5})
self.fail("SchemaError should have been raised")
except SchemaError as e:
assert e.args[0] in ["wrong keys 'bad' in {'key': 5, 'bad': 5}",
"wrong keys 'bad' in {'bad': 5, 'key': 5}"]
try:
Schema({}).validate({'a': 5, 'b': 5})
self.fail("SchemaError should have been raised")
except SchemaError as e:
assert e.args[0] in ["wrong keys 'a', 'b' in {'a': 5, 'b': 5}",
"wrong keys 'a', 'b' in {'b': 5, 'a': 5}"]
def test_dict_keys(self):
assert Schema({str: int}).validate(
{'a': 1, 'b': 2}) == {'a': 1, 'b': 2}
self.assertRaises(SchemaError, Schema({str: int}).validate, {1: 1, 'b': 2})
assert Schema({Use(str): Use(int)}).validate(
{1: 3.14, 3.14: 1}) == {'1': 3, '3.14': 1}
def test_dict_optional_keys(self):
self.assertRaises(SchemaError, Schema({'a': 1, 'b': 2}).validate, {'a': 1})
assert Schema({'a': 1, Optional('b'): 2}).validate({'a': 1}) == {'a': 1}
assert Schema({'a': 1, Optional('b'): 2}).validate(
{'a': 1, 'b': 2}) == {'a': 1, 'b': 2}
# Make sure Optionals are favored over types:
assert Schema({basestring: 1,
Optional('b'): 2}).validate({'a': 1, 'b': 2}) == {'a': 1, 'b': 2}
def test_dict_optional_defaults(self):
# Optionals fill out their defaults:
assert Schema({Optional('a', default=1): 11,
Optional('b', default=2): 22}).validate({'a': 11}) == {'a': 11, 'b': 2}
# Optionals take precedence over types. Here, the "a" is served by the
# Optional:
assert Schema({Optional('a', default=1): 11,
basestring: 22}).validate({'b': 22}) == {'a': 1, 'b': 22}
self.assertRaises(TypeError, Optional, And(str, Use(int)), default=7)
def test_complex(self):
s = Schema({'<file>': And([Use(open)], lambda l: len(l)),
'<path>': os.path.exists,
Optional('--count'): And(int, lambda n: 0 <= n <= 5)})
data = s.validate({'<file>': [self.test_file_name], '<path>': './'})
assert len(data) == 2
assert len(data['<file>']) == 1
assert data['<file>'][0].read().startswith('Copyright')
assert data['<path>'] == './'
def test_nice_errors(self):
try:
Schema(int, error='should be integer').validate('x')
except SchemaError as e:
assert e.errors == ['should be integer']
try:
Schema(Use(float), error='should be a number').validate('x')
except SchemaError as e:
assert e.code == 'should be a number'
try:
Schema({Optional('i'): Use(int, error='should be a number')}).validate({'i': 'x'})
except SchemaError as e:
assert e.code == 'should be a number'
def test_use_error_handling(self):
try:
Use(ve).validate('x')
except SchemaError as e:
assert e.autos == ["ve('x') raised ValueError()"]
assert e.errors == [None]
try:
Use(ve, error='should not raise').validate('x')
except SchemaError as e:
assert e.autos == ["ve('x') raised ValueError()"]
assert e.errors == ['should not raise']
try:
Use(se).validate('x')
except SchemaError as e:
assert e.autos == [None, 'first auto']
assert e.errors == [None, 'first error']
try:
Use(se, error='second error').validate('x')
except SchemaError as e:
assert e.autos == [None, 'first auto']
assert e.errors == ['second error', 'first error']
def test_or_error_handling(self):
try:
Or(ve).validate('x')
except SchemaError as e:
assert e.autos[0].startswith('Or(')
assert e.autos[0].endswith(") did not validate 'x'")
assert e.autos[1] == "ve('x') raised ValueError()"
assert len(e.autos) == 2
assert e.errors == [None, None]
try:
Or(ve, error='should not raise').validate('x')
except SchemaError as e:
assert e.autos[0].startswith('Or(')
assert e.autos[0].endswith(") did not validate 'x'")
assert e.autos[1] == "ve('x') raised ValueError()"
assert len(e.autos) == 2
assert e.errors == ['should not raise', 'should not raise']
try:
Or('o').validate('x')
except SchemaError as e:
assert e.autos == ["Or('o') did not validate 'x'",
"'o' does not match 'x'"]
assert e.errors == [None, None]
try:
Or('o', error='second error').validate('x')
except SchemaError as e:
assert e.autos == ["Or('o') did not validate 'x'",
"'o' does not match 'x'"]
assert e.errors == ['second error', 'second error']
def test_and_error_handling(self):
try:
And(ve).validate('x')
except SchemaError as e:
assert e.autos == ["ve('x') raised ValueError()"]
assert e.errors == [None]
try:
And(ve, error='should not raise').validate('x')
except SchemaError as e:
assert e.autos == ["ve('x') raised ValueError()"]
assert e.errors == ['should not raise']
try:
And(str, se).validate('x')
except SchemaError as e:
assert e.autos == [None, 'first auto']
assert e.errors == [None, 'first error']
try:
And(str, se, error='second error').validate('x')
except SchemaError as e:
assert e.autos == [None, 'first auto']
assert e.errors == ['second error', 'first error']
def test_schema_error_handling(self):
try:
Schema(Use(ve)).validate('x')
except SchemaError as e:
assert e.autos == [None, "ve('x') raised ValueError()"]
assert e.errors == [None, None]
try:
Schema(Use(ve), error='should not raise').validate('x')
except SchemaError as e:
assert e.autos == [None, "ve('x') raised ValueError()"]
assert e.errors == ['should not raise', None]
try:
Schema(Use(se)).validate('x')
except SchemaError as e:
assert e.autos == [None, None, 'first auto']
assert e.errors == [None, None, 'first error']
try:
Schema(Use(se), error='second error').validate('x')
except SchemaError as e:
assert e.autos == [None, None, 'first auto']
assert e.errors == ['second error', None, 'first error']
def test_use_json(self):
import json
gist_schema = Schema(And(Use(json.loads), # first convert from JSON
{Optional('description'): basestring,
'public': bool,
'files': {basestring: {'content': basestring}}}))
gist = '''{"description": "the description for this gist",
"public": true,
"files": {
"file1.txt": {"content": "String file contents"},
"other.txt": {"content": "Another file contents"}}}'''
assert gist_schema.validate(gist)
def test_error_reporting(self):
s = Schema({'<files>': [Use(open, error='<files> should be readable')],
'<path>': And(os.path.exists, error='<path> should exist'),
'--count': Or(None, And(Use(int), lambda n: 0 < n < 5),
error='--count should be integer 0 < n < 5')},
error='Error:')
s.validate({'<files>': [], '<path>': './', '--count': 3})
try:
s.validate({'<files>': [], '<path>': './', '--count': '10'})
except SchemaError as e:
assert e.code == 'Error:\n--count should be integer 0 < n < 5'
try:
s.validate({'<files>': [], '<path>': './hai', '--count': '2'})
except SchemaError as e:
assert e.code == 'Error:\n<path> should exist'
try:
s.validate({'<files>': ['hai'], '<path>': './', '--count': '2'})
except SchemaError as e:
assert e.code == 'Error:\n<files> should be readable'
def test_schema_repr(self): # what about repr with `error`s?
schema = Schema([Or(None, And(str, Use(float)))])
repr_ = "Schema([Or(None, And(<type 'str'>, Use(<type 'float'>)))])"
# in Python 3 repr contains <class 'str'>, not <type 'str'>
assert repr(schema).replace('class', 'type') == repr_
def test_validate_object(self):
schema = Schema({object: str})
assert schema.validate({42: 'str'}) == {42: 'str'}
self.assertRaises(SchemaError, schema.validate, {42: 777})
def test_issue_9_prioritized_key_comparison(self):
validate = Schema({'key': 42, object: 42}).validate
assert validate({'key': 42, 777: 42}) == {'key': 42, 777: 42}
def test_issue_9_prioritized_key_comparison_in_dicts(self):
# http://stackoverflow.com/questions/14588098/docopt-schema-validation
s = Schema({'ID': Use(int, error='ID should be an int'),
'FILE': Or(None, Use(open, error='FILE should be readable')),
Optional(str): object})
data = {'ID': 10, 'FILE': None, 'other': 'other', 'other2': 'other2'}
assert s.validate(data) == data
data = {'ID': 10, 'FILE': None}
assert s.validate(data) == data
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
Bismarrck/tensorflow | tensorflow/contrib/framework/__init__.py | 7 | 4089 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Framework utilities.
@@assert_same_float_dtype
@@assert_scalar
@@assert_scalar_int
@@convert_to_tensor_or_sparse_tensor
@@get_graph_from_inputs
@@is_numeric_tensor
@@is_non_decreasing
@@is_strictly_increasing
@@is_tensor
@@reduce_sum_n
@@remove_squeezable_dimensions
@@with_shape
@@with_same_shape
@@deprecated
@@deprecated_args
@@deprecated_arg_values
@@arg_scope
@@add_arg_scope
@@current_arg_scope
@@has_arg_scope
@@arg_scoped_arguments
@@prepend_name_scope
@@strip_name_scope
@@add_model_variable
@@assert_global_step
@@assert_or_get_global_step
@@assign_from_checkpoint
@@assign_from_checkpoint_fn
@@assign_from_values
@@assign_from_values_fn
@@create_global_step
@@filter_variables
@@fuse_op
@@get_global_step
@@get_or_create_global_step
@@get_local_variables
@@get_model_variables
@@get_name_scope
@@get_trainable_variables
@@get_unique_variable
@@get_variables_by_name
@@get_variables_by_suffix
@@get_variable_full_name
@@get_variables_to_restore
@@get_variables
@@global_variable
@@local_variable
@@model_variable
@@variable
@@VariableDeviceChooser
@@convolutional_delta_orthogonal
@@convolutional_orthogonal_1d
@@convolutional_orthogonal_2d
@@convolutional_orthogonal_3d
@@zero_initializer
@@load_checkpoint
@@list_variables
@@load_variable
@@init_from_checkpoint
@@load_and_remap_matrix_initializer
@@load_embedding_initializer
@@load_linear_multiclass_bias_initializer
@@load_variable_slot_initializer
@@argsort
@@py_func
@@sort
@@get_placeholders
@@smart_cond
@@smart_constant_value
@@smart_case
@@CriticalSection
@@BoundedTensorSpec
@@TensorSpec
@@RecordInput
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.framework.python.framework import *
from tensorflow.contrib.framework.python.framework import nest
from tensorflow.contrib.framework.python.ops import *
# pylint: enable=unused-import,wildcard-import
from tensorflow.python.framework.ops import prepend_name_scope
from tensorflow.python.framework.ops import strip_name_scope
from tensorflow.python.framework.smart_cond import smart_case
from tensorflow.python.framework.smart_cond import smart_cond
from tensorflow.python.framework.smart_cond import smart_constant_value
from tensorflow.python.framework.tensor_spec import BoundedTensorSpec
from tensorflow.python.framework.tensor_spec import TensorSpec
from tensorflow.python.ops.data_flow_ops import RecordInput
from tensorflow.python.ops.init_ops import convolutional_delta_orthogonal
from tensorflow.python.ops.init_ops import convolutional_orthogonal_1d
from tensorflow.python.ops.init_ops import convolutional_orthogonal_2d
from tensorflow.python.ops.init_ops import convolutional_orthogonal_3d
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['nest']
_nest_allowed_symbols = [
'assert_same_structure',
'is_sequence',
'flatten',
'flatten_dict_items',
'pack_sequence_as',
'map_structure',
'map_structure_with_paths',
'assert_shallow_structure',
'flatten_up_to',
'map_structure_up_to',
'get_traverse_shallow_structure',
'yield_flat_paths',
'flatten_with_joined_string_paths',
]
remove_undocumented(nest.__name__, allowed_exception_list=_nest_allowed_symbols)
remove_undocumented(__name__, allowed_exception_list=_allowed_symbols)
| apache-2.0 |
OpenGazettes/aleph | aleph/model/validate.py | 2 | 1258 | import os
import json
from dalet import is_country_code, is_partial_date, is_language_code
from dalet import is_domain, is_url
from jsonschema import Draft4Validator, FormatChecker, RefResolver
from aleph.core import get_config
resolver = RefResolver('core.json#', {})
SCHEMA_DIR = os.path.join(os.path.dirname(__file__), 'validation')
for (root, dirs, files) in os.walk(SCHEMA_DIR):
for schema_file in files:
with open(os.path.join(root, schema_file), 'r') as fh:
schema = json.load(fh)
resolver.store[schema['id']] = schema
format_checker = FormatChecker()
format_checker.checks('country-code')(is_country_code)
format_checker.checks('partial-date')(is_partial_date)
format_checker.checks('language-code')(is_language_code)
format_checker.checks('url')(is_url)
format_checker.checks('domain')(is_domain)
@format_checker.checks('collection-category')
def is_collection_category(cat):
categories = get_config('COLLECTION_CATEGORIES', {})
return cat in categories.keys()
def validate(data, schema):
_, schema = resolver.resolve(schema)
validator = Draft4Validator(schema, resolver=resolver,
format_checker=format_checker)
return validator.validate(data, schema)
| mit |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/reportlab/lib/styles.py | 27 | 13238 | #Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/lib/styles.py
__version__=''' $Id$ '''
__doc__='''Classes for ParagraphStyle and similar things.
A style is a collection of attributes, but with some extra features
to allow 'inheritance' from a parent, and to ensure nobody makes
changes after construction.
ParagraphStyle shows all the attributes available for formatting
paragraphs.
getSampleStyleSheet() returns a stylesheet you can use for initial
development, with a few basic heading and text styles.
'''
__all__=(
'PropertySet',
'ParagraphStyle',
'LineStyle',
'ListStyle',
'StyleSheet1',
'getSampleStyleSheet',
)
from reportlab.lib.colors import white, black
from reportlab.lib.enums import TA_LEFT, TA_CENTER
from reportlab.lib.fonts import tt2ps
from reportlab.rl_config import canvas_basefontname as _baseFontName, baseUnderlineProportion as _baseUnderlineProportion
_baseFontNameB = tt2ps(_baseFontName,1,0)
_baseFontNameI = tt2ps(_baseFontName,0,1)
_baseFontNameBI = tt2ps(_baseFontName,1,1)
###########################################################
# This class provides an 'instance inheritance'
# mechanism for its descendants, simpler than acquisition
# but not as far-reaching
###########################################################
class PropertySet:
defaults = {}
def __init__(self, name, parent=None, **kw):
"""When initialized, it copies the class defaults;
then takes a copy of the attributes of the parent
if any. All the work is done in init - styles
should cost little to use at runtime."""
# step one - validate the hell out of it
assert 'name' not in self.defaults, "Class Defaults may not contain a 'name' attribute"
assert 'parent' not in self.defaults, "Class Defaults may not contain a 'parent' attribute"
if parent:
assert parent.__class__ == self.__class__, "Parent style %s must have same class as new style %s" % (parent.__class__.__name__,self.__class__.__name__)
#step two
self.name = name
self.parent = parent
self.__dict__.update(self.defaults)
#step two - copy from parent if any. Try to be
# very strict that only keys in class defaults are
# allowed, so they cannot inherit
self.refresh()
self._setKwds(**kw)
def _setKwds(self,**kw):
#step three - copy keywords if any
for key, value in kw.items():
self.__dict__[key] = value
def __repr__(self):
return "<%s '%s'>" % (self.__class__.__name__, self.name)
def refresh(self):
"""re-fetches attributes from the parent on demand;
use if you have been hacking the styles. This is
used by __init__"""
if self.parent:
for key, value in self.parent.__dict__.items():
if (key not in ['name','parent']):
self.__dict__[key] = value
def listAttrs(self, indent=''):
print(indent + 'name =', self.name)
print(indent + 'parent =', self.parent)
keylist = list(self.__dict__.keys())
keylist.sort()
keylist.remove('name')
keylist.remove('parent')
for key in keylist:
value = self.__dict__.get(key, None)
print(indent + '%s = %s' % (key, value))
def clone(self, name, parent=None, **kwds):
r = self.__class__(name,parent)
r.__dict__ = self.__dict__.copy()
r.name = name
r.parent = parent is None and self or parent
r._setKwds(**kwds)
return r
class ParagraphStyle(PropertySet):
defaults = {
'fontName':_baseFontName,
'fontSize':10,
'leading':12,
'leftIndent':0,
'rightIndent':0,
'firstLineIndent':0,
'alignment':TA_LEFT,
'spaceBefore':0,
'spaceAfter':0,
'bulletFontName':_baseFontName,
'bulletFontSize':10,
'bulletIndent':0,
#'bulletColor':black,
'textColor': black,
'backColor':None,
'wordWrap':None, #None means do nothing special
#CJK use Chinese Line breaking
#LTR RTL use left to right / right to left
#with support from pyfribi2 if available
'borderWidth': 0,
'borderPadding': 0,
'borderColor': None,
'borderRadius': None,
'allowWidows': 1,
'allowOrphans': 0,
'textTransform':None, #uppercase lowercase (captitalize not yet) or None or absent
'endDots':None, #dots on the last line of left/right justified paras
#string or object with text and optional fontName, fontSize, textColor & backColor
#dy
'splitLongWords':1, #make best efforts to split long words
'underlineProportion': _baseUnderlineProportion, #set to non-zero to get proportional
'bulletAnchor': 'start', #where the bullet is anchored ie start, middle, end or numeric
}
class LineStyle(PropertySet):
defaults = {
'width':1,
'color': black
}
def prepareCanvas(self, canvas):
"""You can ask a LineStyle to set up the canvas for drawing
the lines."""
canvas.setLineWidth(1)
#etc. etc.
class ListStyle(PropertySet):
defaults = dict(
leftIndent=18,
rightIndent=0,
bulletAlign='left',
bulletType='1',
bulletColor=black,
bulletFontName='Helvetica',
bulletFontSize=12,
bulletOffsetY=0,
bulletDedent='auto',
bulletDir='ltr',
bulletFormat=None,
start=None, #starting value for a list
)
_stylesheet1_undefined = object()
class StyleSheet1:
"""
This may or may not be used. The idea is to:
1. slightly simplify construction of stylesheets;
2. enforce rules to validate styles when added
(e.g. we may choose to disallow having both
'heading1' and 'Heading1' - actual rules are
open to discussion);
3. allow aliases and alternate style lookup
mechanisms
4. Have a place to hang style-manipulation
methods (save, load, maybe support a GUI
editor)
Access is via getitem, so they can be
compatible with plain old dictionaries.
"""
def __init__(self):
self.byName = {}
self.byAlias = {}
def __getitem__(self, key):
try:
return self.byAlias[key]
except KeyError:
try:
return self.byName[key]
except KeyError:
raise KeyError("Style '%s' not found in stylesheet" % key)
def get(self,key,default=_stylesheet1_undefined):
try:
return self[key]
except KeyError:
if default!=_stylesheet1_undefined: return default
raise
def __contains__(self, key):
return key in self.byAlias or key in self.byName
def has_key(self,key):
return key in self
def add(self, style, alias=None):
key = style.name
if key in self.byName:
raise KeyError("Style '%s' already defined in stylesheet" % key)
if key in self.byAlias:
raise KeyError("Style name '%s' is already an alias in stylesheet" % key)
if alias:
if alias in self.byName:
raise KeyError("Style '%s' already defined in stylesheet" % alias)
if alias in self.byAlias:
raise KeyError("Alias name '%s' is already an alias in stylesheet" % alias)
#passed all tests? OK, add it
self.byName[key] = style
if alias:
self.byAlias[alias] = style
def list(self):
styles = list(self.byName.items())
styles.sort()
alii = {}
for (alias, style) in list(self.byAlias.items()):
alii[style] = alias
for (name, style) in styles:
alias = alii.get(style, None)
print(name, alias)
style.listAttrs(' ')
print()
def testStyles():
pNormal = ParagraphStyle('Normal',None)
pNormal.fontName = _baseFontName
pNormal.fontSize = 12
pNormal.leading = 14.4
pNormal.listAttrs()
print()
pPre = ParagraphStyle('Literal', pNormal)
pPre.fontName = 'Courier'
pPre.listAttrs()
return pNormal, pPre
def getSampleStyleSheet():
"""Returns a stylesheet object"""
stylesheet = StyleSheet1()
stylesheet.add(ParagraphStyle(name='Normal',
fontName=_baseFontName,
fontSize=10,
leading=12)
)
stylesheet.add(ParagraphStyle(name='BodyText',
parent=stylesheet['Normal'],
spaceBefore=6)
)
stylesheet.add(ParagraphStyle(name='Italic',
parent=stylesheet['BodyText'],
fontName = _baseFontNameI)
)
stylesheet.add(ParagraphStyle(name='Heading1',
parent=stylesheet['Normal'],
fontName = _baseFontNameB,
fontSize=18,
leading=22,
spaceAfter=6),
alias='h1')
stylesheet.add(ParagraphStyle(name='Title',
parent=stylesheet['Normal'],
fontName = _baseFontNameB,
fontSize=18,
leading=22,
alignment=TA_CENTER,
spaceAfter=6),
alias='title')
stylesheet.add(ParagraphStyle(name='Heading2',
parent=stylesheet['Normal'],
fontName = _baseFontNameB,
fontSize=14,
leading=18,
spaceBefore=12,
spaceAfter=6),
alias='h2')
stylesheet.add(ParagraphStyle(name='Heading3',
parent=stylesheet['Normal'],
fontName = _baseFontNameBI,
fontSize=12,
leading=14,
spaceBefore=12,
spaceAfter=6),
alias='h3')
stylesheet.add(ParagraphStyle(name='Heading4',
parent=stylesheet['Normal'],
fontName = _baseFontNameBI,
fontSize=10,
leading=12,
spaceBefore=10,
spaceAfter=4),
alias='h4')
stylesheet.add(ParagraphStyle(name='Heading5',
parent=stylesheet['Normal'],
fontName = _baseFontNameB,
fontSize=9,
leading=10.8,
spaceBefore=8,
spaceAfter=4),
alias='h5')
stylesheet.add(ParagraphStyle(name='Heading6',
parent=stylesheet['Normal'],
fontName = _baseFontNameB,
fontSize=7,
leading=8.4,
spaceBefore=6,
spaceAfter=2),
alias='h6')
stylesheet.add(ParagraphStyle(name='Bullet',
parent=stylesheet['Normal'],
firstLineIndent=0,
spaceBefore=3),
alias='bu')
stylesheet.add(ParagraphStyle(name='Definition',
parent=stylesheet['Normal'],
firstLineIndent=0,
leftIndent=36,
bulletIndent=0,
spaceBefore=6,
bulletFontName=_baseFontNameBI),
alias='df')
stylesheet.add(ParagraphStyle(name='Code',
parent=stylesheet['Normal'],
fontName='Courier',
fontSize=8,
leading=8.8,
firstLineIndent=0,
leftIndent=36))
return stylesheet
| agpl-3.0 |
adviti/melange | app/soc/views/helper/decorators.py | 1 | 1144 | #!/usr/bin/env python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Views decorators.
"""
from functools import wraps
from django import http
def mutation(func):
"""This decorator indicates that the view is a mutation operation and is
therefore restricted to POST requests.
XSRF checking is performed automatically by the xsrf middleware.
"""
@wraps(func)
def wrapper(self, request, *args, **kwargs):
if request.method != "POST":
return http.HttpResponse("Invoked a mutation view w/o POST.", status=403)
return func(self, request, *args, **kwargs)
return wrapper
| apache-2.0 |
ryanGT/sympy | sympy/thirdparty/__init__.py | 10 | 1047 | """Thirdparty Packages for internal use.
"""
import sys
import os
def import_thirdparty(lib):
"""
Imports a thirdparty package "lib" by setting all paths correctly.
At the moment, there is only the "pyglet" library, so we just put
pyglet to sys.path temporarily, then import "lib" and then restore the path.
With more packages, we'll just put them to sys.path as well.
"""
seen = set()
def new_import(name, globals={}, locals={}, fromlist=[]):
if name in seen:
return old_import(name, globals, locals, fromlist)
seen.add(name)
sys.path.insert(0, os.path.join(os.path.abspath(os.path.dirname( \
__file__)), "pyglet"))
try:
m = old_import(name, globals, locals, fromlist)
finally:
del sys.path[0]
return m
import __builtin__
old_import = __builtin__.__import__
__builtin__.__import__ = new_import
try:
m = __import__(lib)
finally:
__builtin__.__import__ = old_import
return m
| bsd-3-clause |
tdtrask/ansible | lib/ansible/utils/module_docs_fragments/ironware.py | 25 | 3932 | #
# (c) 2017, Paul Baker <@paulquack>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
authorize:
description:
- Instructs the module to enter privileged mode on the remote device
before sending any commands. If not specified, the device will
attempt to execute all commands in non-privileged mode. If the value
is not specified in the task, the value of environment variable
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
default: no
choices: ['yes', 'no']
provider:
description:
- A dict object containing connection details.
default: null
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
port:
description:
- Specifies the port to use when building the connection to the remote
device.
default: 22
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
default: null
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to
the remote device. This value is the path to the
key used to authenticate the SSH session. If the value is not specified
in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE)
will be used instead.
authorize:
description:
- Instructs the module to enter privileged mode on the remote device
before sending any commands. If not specified, the device will
attempt to execute all commands in non-privileged mode. If the value
is not specified in the task, the value of environment variable
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
default: no
choices: ['yes', 'no']
auth_pass:
description:
- Specifies the password to use if required to enter privileged mode
on the remote device. If I(authorize) is false, then this argument
does nothing. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead.
default: none
timeout:
description:
- Specifies idle timeout in seconds for the connection, in seconds. Useful
if the console freezes before continuing. For example when saving
configurations.
default: 10
"""
| gpl-3.0 |
Azure/azure-sdk-for-python | sdk/tables/azure-data-tables/tests/_shared/asynctestcase.py | 1 | 4937 | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from __future__ import division
from datetime import datetime
from dateutil.tz import tzutc
import uuid
from azure.core.credentials import AccessToken
from azure.core.exceptions import ResourceExistsError
from azure.data.tables import (
EntityProperty,
EdmType,
)
from azure.data.tables.aio import TableServiceClient
from azure.identity.aio import DefaultAzureCredential
from devtools_testutils import is_live
from .testcase import TableTestCase, SLEEP_DELAY
TEST_TABLE_PREFIX = "pytableasync"
class AsyncFakeTokenCredential(object):
"""Protocol for classes able to provide OAuth tokens.
:param str scopes: Lets you specify the type of access needed.
"""
def __init__(self):
self.token = AccessToken("YOU SHALL NOT PASS", 0)
async def get_token(self, *args):
return self.token
class AsyncTableTestCase(TableTestCase):
def get_token_credential(self):
if is_live():
return DefaultAzureCredential()
return self.generate_fake_token()
def generate_fake_token(self):
return AsyncFakeTokenCredential()
def _get_table_reference(self, prefix=TEST_TABLE_PREFIX):
table_name = self.get_resource_name(prefix)
return table_name
async def _create_table(self, ts, prefix=TEST_TABLE_PREFIX, table_list=None):
table_name = self._get_table_reference(prefix)
try:
table = await ts.create_table(table_name)
if table_list is not None:
table_list.append(table)
except ResourceExistsError:
table = ts.get_table_client(table_name)
return table
async def _delete_all_tables(self, account_name, key):
client = TableServiceClient(self.account_url(account_name, "cosmos"), credential=key)
async for table in client.list_tables():
await client.delete_table(table.name)
if self.is_live:
self.sleep(10)
async def _tear_down(self):
if is_live():
async for table in self.ts.list_tables():
await self.ts.delete_table(table.name)
self.test_tables = []
await self.ts.close()
async def _create_query_table(self, entity_count):
"""
Creates a table with the specified name and adds entities with the
default set of values. PartitionKey is set to 'MyPartition' and RowKey
is set to a unique counter value starting at 1 (as a string).
"""
table_name = self.get_resource_name("querytable")
table = await self.ts.create_table(table_name)
self.query_tables.append(table_name)
client = self.ts.get_table_client(table_name)
entity = self._create_random_entity_dict()
for i in range(1, entity_count + 1):
entity["RowKey"] = entity["RowKey"] + str(i)
await client.create_entity(entity=entity)
return client
async def _insert_two_opposite_entities(self, pk=None, rk=None):
entity1 = self._create_random_entity_dict()
resp = await self.table.create_entity(entity1)
partition, row = self._create_pk_rk(pk, rk)
properties = {
"PartitionKey": partition + u"1",
"RowKey": row + u"1",
"age": 49,
"sex": u"female",
"married": False,
"deceased": True,
"optional": None,
"ratio": 5.2,
"evenratio": 6.0,
"large": 39999011,
"Birthday": datetime(1993, 4, 1, tzinfo=tzutc()),
"birthday": datetime(1990, 4, 1, tzinfo=tzutc()),
"binary": b"binary-binary",
"other": EntityProperty(40, EdmType.INT32),
"clsid": uuid.UUID("c8da6455-213e-42d9-9b79-3f9149a57833"),
}
await self.table.create_entity(properties)
return entity1, resp
async def _insert_random_entity(self, pk=None, rk=None):
entity = self._create_random_entity_dict(pk, rk)
metadata = await self.table.create_entity(entity=entity)
return entity, metadata["etag"]
async def _set_up(self, account_name, credential, url="table"):
account_url = self.account_url(account_name, url)
self.ts = TableServiceClient(account_url, credential=credential)
self.table_name = self.get_resource_name("uttable")
self.table = self.ts.get_table_client(self.table_name)
if self.is_live:
try:
await self.ts.create_table(table_name=self.table_name)
except ResourceExistsError:
pass
self.query_tables = []
| mit |
iqas/e2gui | mytest.py | 2 | 27994 | import sys
import os
from time import time
if os.path.isfile("/usr/lib/enigma2/python/enigma.zip"):
sys.path.append("/usr/lib/enigma2/python/enigma.zip")
from Tools.Profile import profile, profile_final
profile("PYTHON_START")
import Tools.RedirectOutput
import enigma
from boxbranding import getBoxType, getBrandOEM
import eConsoleImpl
import eBaseImpl
enigma.eTimer = eBaseImpl.eTimer
enigma.eSocketNotifier = eBaseImpl.eSocketNotifier
enigma.eConsoleAppContainer = eConsoleImpl.eConsoleAppContainer
boxtype = getBoxType()
if os.path.isfile("/usr/lib/enigma2/python/Plugins/Extensions/MediaPortal/plugin.pyo") and boxtype in ('dm7080','dm820'):
import pyo_patcher
if not os.path.isfile("/etc/chipset"):
os.system('echo "7400" > /etc/chipset');
from traceback import print_exc
profile("SimpleSummary")
from Screens import InfoBar
from Screens.SimpleSummary import SimpleSummary
from sys import stdout, exc_info
profile("Bouquets")
from Components.config import config, configfile, ConfigText, ConfigYesNo, ConfigInteger, NoSave
config.misc.load_unlinked_userbouquets = ConfigYesNo(default=False)
def setLoadUnlinkedUserbouquets(configElement):
enigma.eDVBDB.getInstance().setLoadUnlinkedUserbouquets(configElement.value)
config.misc.load_unlinked_userbouquets.addNotifier(setLoadUnlinkedUserbouquets)
enigma.eDVBDB.getInstance().reloadBouquets()
profile("ParentalControl")
import Components.ParentalControl
Components.ParentalControl.InitParentalControl()
profile("LOAD:Navigation")
from Navigation import Navigation
profile("LOAD:skin")
from skin import readSkin
profile("LOAD:Tools")
from Tools.Directories import InitFallbackFiles, resolveFilename, SCOPE_PLUGINS, SCOPE_ACTIVE_SKIN, SCOPE_CURRENT_SKIN, SCOPE_CONFIG
from Components.config import config, configfile, ConfigText, ConfigYesNo, ConfigInteger, ConfigSelection, NoSave
import Components.RecordingConfig
InitFallbackFiles()
profile("config.misc")
config.misc.boxtype = ConfigText(default = boxtype)
config.misc.blackradiopic = ConfigText(default = resolveFilename(SCOPE_ACTIVE_SKIN, "black.mvi"))
radiopic = resolveFilename(SCOPE_ACTIVE_SKIN, "radio.mvi")
if os.path.exists(resolveFilename(SCOPE_CONFIG, "radio.mvi")):
radiopic = resolveFilename(SCOPE_CONFIG, "radio.mvi")
config.misc.radiopic = ConfigText(default = radiopic)
#config.misc.isNextRecordTimerAfterEventActionAuto = ConfigYesNo(default=False)
#config.misc.isNextPowerTimerAfterEventActionAuto = ConfigYesNo(default=False)
config.misc.nextWakeup = ConfigText(default = "-1,-1,0,0,-1,0") #wakeup time, timer begins, set by (0=rectimer,1=zaptimer, 2=powertimer or 3=plugin), go in standby, next rectimer, force rectimer
config.misc.SyncTimeUsing = ConfigSelection(default = "0", choices = [("0", "Transponder Time"), ("1", _("NTP"))])
config.misc.NTPserver = ConfigText(default = 'pool.ntp.org', fixed_size=False)
config.misc.startCounter = ConfigInteger(default=0) # number of e2 starts...
config.misc.standbyCounter = NoSave(ConfigInteger(default=0)) # number of standby
config.misc.DeepStandby = NoSave(ConfigYesNo(default=False)) # detect deepstandby
#demo code for use of standby enter leave callbacks
#def leaveStandby():
# print "!!!!!!!!!!!!!!!!!leave standby"
#def standbyCountChanged(configelement):
# print "!!!!!!!!!!!!!!!!!enter standby num", configelement.value
# from Screens.Standby import inStandby
# inStandby.onClose.append(leaveStandby)
#config.misc.standbyCounter.addNotifier(standbyCountChanged, initial_call = False)
####################################################
def useSyncUsingChanged(configelement):
if config.misc.SyncTimeUsing.value == "0":
print "[Time By]: Transponder"
enigma.eDVBLocalTimeHandler.getInstance().setUseDVBTime(True)
enigma.eEPGCache.getInstance().timeUpdated()
else:
print "[Time By]: NTP"
enigma.eDVBLocalTimeHandler.getInstance().setUseDVBTime(False)
enigma.eEPGCache.getInstance().timeUpdated()
config.misc.SyncTimeUsing.addNotifier(useSyncUsingChanged)
def NTPserverChanged(configelement):
if config.misc.NTPserver.value == "pool.ntp.org":
return
print "[NTPDATE] save /etc/default/ntpdate"
f = open("/etc/default/ntpdate", "w")
f.write('NTPSERVERS="' + config.misc.NTPserver.value + '"')
f.close()
os.chmod("/etc/default/ntpdate", 0755)
from Components.Console import Console
Console = Console()
Console.ePopen('/usr/bin/ntpdate-sync')
config.misc.NTPserver.addNotifier(NTPserverChanged, immediate_feedback = True)
profile("Twisted")
try:
import e2reactor
e2reactor.install()
from twisted.internet import reactor
def runReactor():
reactor.run(installSignalHandlers=False)
except ImportError:
print "twisted not available"
def runReactor():
enigma.runMainloop()
profile("LOAD:Plugin")
# initialize autorun plugins and plugin menu entries
from Components.PluginComponent import plugins
profile("LOAD:Wizard")
from Screens.StartWizard import *
import Screens.Rc
from Tools.BoundFunction import boundFunction
from Plugins.Plugin import PluginDescriptor
profile("misc")
had = dict()
def dump(dir, p = ""):
if isinstance(dir, dict):
for (entry, val) in dir.items():
dump(val, p + "(dict)/" + entry)
if hasattr(dir, "__dict__"):
for name, value in dir.__dict__.items():
if not had.has_key(str(value)):
had[str(value)] = 1
dump(value, p + "/" + str(name))
else:
print p + "/" + str(name) + ":" + str(dir.__class__) + "(cycle)"
else:
print p + ":" + str(dir)
# + ":" + str(dir.__class__)
# display
profile("LOAD:ScreenGlobals")
from Screens.Globals import Globals
from Screens.SessionGlobals import SessionGlobals
from Screens.Screen import Screen
profile("Screen")
Screen.global_screen = Globals()
# Session.open:
# * push current active dialog ('current_dialog') onto stack
# * call execEnd for this dialog
# * clear in_exec flag
# * hide screen
# * instantiate new dialog into 'current_dialog'
# * create screens, components
# * read, apply skin
# * create GUI for screen
# * call execBegin for new dialog
# * set in_exec
# * show gui screen
# * call components' / screen's onExecBegin
# ... screen is active, until it calls 'close'...
# Session.close:
# * assert in_exec
# * save return value
# * start deferred close handler ('onClose')
# * execEnd
# * clear in_exec
# * hide screen
# .. a moment later:
# Session.doClose:
# * destroy screen
class Session:
def __init__(self, desktop = None, summary_desktop = None, navigation = None):
self.desktop = desktop
self.summary_desktop = summary_desktop
self.nav = navigation
self.delay_timer = enigma.eTimer()
self.delay_timer.callback.append(self.processDelay)
self.current_dialog = None
self.dialog_stack = [ ]
self.summary_stack = [ ]
self.summary = None
self.in_exec = False
self.screen = SessionGlobals(self)
for p in plugins.getPlugins(PluginDescriptor.WHERE_SESSIONSTART):
try:
p(reason=0, session=self)
except:
print "Plugin raised exception at WHERE_SESSIONSTART"
import traceback
traceback.print_exc()
def processDelay(self):
callback = self.current_dialog.callback
retval = self.current_dialog.returnValue
if self.current_dialog.isTmp:
self.current_dialog.doClose()
# dump(self.current_dialog)
del self.current_dialog
else:
del self.current_dialog.callback
self.popCurrent()
if callback is not None:
callback(*retval)
def execBegin(self, first=True, do_show = True):
assert not self.in_exec
self.in_exec = True
c = self.current_dialog
# when this is an execbegin after a execend of a "higher" dialog,
# popSummary already did the right thing.
if first:
self.instantiateSummaryDialog(c)
c.saveKeyboardMode()
c.execBegin()
# when execBegin opened a new dialog, don't bother showing the old one.
if c == self.current_dialog and do_show:
c.show()
def execEnd(self, last=True):
assert self.in_exec
self.in_exec = False
self.current_dialog.execEnd()
self.current_dialog.restoreKeyboardMode()
self.current_dialog.hide()
if last:
self.current_dialog.removeSummary(self.summary)
self.popSummary()
def instantiateDialog(self, screen, *arguments, **kwargs):
return self.doInstantiateDialog(screen, arguments, kwargs, self.desktop)
def deleteDialog(self, screen):
screen.hide()
screen.doClose()
def deleteDialogWithCallback(self, callback, screen, *retval):
screen.hide()
screen.doClose()
if callback is not None:
callback(*retval)
def instantiateSummaryDialog(self, screen, **kwargs):
self.pushSummary()
summary = screen.createSummary() or SimpleSummary
arguments = (screen,)
self.summary = self.doInstantiateDialog(summary, arguments, kwargs, self.summary_desktop)
self.summary.show()
screen.addSummary(self.summary)
def doInstantiateDialog(self, screen, arguments, kwargs, desktop):
# create dialog
dlg = screen(self, *arguments, **kwargs)
if dlg is None:
return
# read skin data
readSkin(dlg, None, dlg.skinName, desktop)
# create GUI view of this dialog
dlg.setDesktop(desktop)
dlg.applySkin()
return dlg
def pushCurrent(self):
if self.current_dialog is not None:
self.dialog_stack.append((self.current_dialog, self.current_dialog.shown))
self.execEnd(last=False)
def popCurrent(self):
if self.dialog_stack:
(self.current_dialog, do_show) = self.dialog_stack.pop()
self.execBegin(first=False, do_show=do_show)
else:
self.current_dialog = None
def execDialog(self, dialog):
self.pushCurrent()
self.current_dialog = dialog
self.current_dialog.isTmp = False
self.current_dialog.callback = None # would cause re-entrancy problems.
self.execBegin()
def openWithCallback(self, callback, screen, *arguments, **kwargs):
dlg = self.open(screen, *arguments, **kwargs)
dlg.callback = callback
return dlg
def open(self, screen, *arguments, **kwargs):
if self.dialog_stack and not self.in_exec:
raise RuntimeError("modal open are allowed only from a screen which is modal!")
# ...unless it's the very first screen.
self.pushCurrent()
dlg = self.current_dialog = self.instantiateDialog(screen, *arguments, **kwargs)
dlg.isTmp = True
dlg.callback = None
self.execBegin()
return dlg
def close(self, screen, *retval):
if not self.in_exec:
print "close after exec!"
return
# be sure that the close is for the right dialog!
# if it's not, you probably closed after another dialog
# was opened. this can happen if you open a dialog
# onExecBegin, and forget to do this only once.
# after close of the top dialog, the underlying will
# gain focus again (for a short time), thus triggering
# the onExec, which opens the dialog again, closing the loop.
assert screen == self.current_dialog
self.current_dialog.returnValue = retval
self.delay_timer.start(0, 1)
self.execEnd()
def pushSummary(self):
if self.summary is not None:
self.summary.hide()
self.summary_stack.append(self.summary)
self.summary = None
def popSummary(self):
if self.summary is not None:
self.summary.doClose()
self.summary = self.summary_stack.pop()
if self.summary is not None:
self.summary.show()
profile("Standby,PowerKey")
import Screens.Standby
from Screens.Menu import MainMenu, mdom
from GlobalActions import globalActionMap
class PowerKey:
""" PowerKey stuff - handles the powerkey press and powerkey release actions"""
def __init__(self, session):
self.session = session
globalActionMap.actions["power_down"]=self.powerdown
globalActionMap.actions["power_up"]=self.powerup
globalActionMap.actions["power_long"]=self.powerlong
globalActionMap.actions["deepstandby"]=self.shutdown # frontpanel long power button press
globalActionMap.actions["discrete_off"]=self.standby
globalActionMap.actions["sleeptimer"]=self.openSleepTimer
globalActionMap.actions["powertimer_standby"]=self.sleepStandby
globalActionMap.actions["powertimer_deepstandby"]=self.sleepDeepStandby
self.standbyblocked = 1
def MenuClosed(self, *val):
self.session.infobar = None
def shutdown(self):
wasRecTimerWakeup = False
recordings = self.session.nav.getRecordings(False,Components.RecordingConfig.recType(config.recording.warn_box_restart_rec_types.getValue()))
if not recordings:
next_rec_time = self.session.nav.RecordTimer.getNextRecordingTime()
if recordings or (next_rec_time > 0 and (next_rec_time - time()) < 360):
if os.path.exists("/tmp/was_rectimer_wakeup") and not self.session.nav.RecordTimer.isRecTimerWakeup():
f = open("/tmp/was_rectimer_wakeup", "r")
file = f.read()
f.close()
wasRecTimerWakeup = int(file) and True or False
if self.session.nav.RecordTimer.isRecTimerWakeup() or wasRecTimerWakeup or self.session.nav.RecordTimer.isRecording():
print "PowerOff (timer wakewup) - Recording in progress or a timer about to activate, entering standby!"
lastrecordEnd = 0
for timer in self.session.nav.RecordTimer.timer_list:
if lastrecordEnd == 0 or lastrecordEnd >= timer.begin:
print "Set after-event for recording %s to DEEP-STANDBY." % timer.name
timer.afterEvent = 2
if timer.end > lastrecordEnd:
lastrecordEnd = timer.end + 900
from Screens.MessageBox import MessageBox
self.session.openWithCallback(self.gotoStandby,MessageBox,_("PowerOff while Recording in progress!\nEntering standby, after recording the box will shutdown."), type = MessageBox.TYPE_INFO, timeout = 10)
else:
print "PowerOff - Now!"
self.session.open(Screens.Standby.TryQuitMainloop, 1)
elif not Screens.Standby.inTryQuitMainloop and self.session.current_dialog and self.session.current_dialog.ALLOW_SUSPEND:
print "PowerOff - Now!"
self.session.open(Screens.Standby.TryQuitMainloop, 1)
def powerlong(self):
if Screens.Standby.inTryQuitMainloop or (self.session.current_dialog and not self.session.current_dialog.ALLOW_SUSPEND):
return
self.doAction(action = config.usage.on_long_powerpress.value)
def doAction(self, action):
self.standbyblocked = 1
if action == "shutdown":
self.shutdown()
elif action == "show_menu":
print "Show shutdown Menu"
root = mdom.getroot()
for x in root.findall("menu"):
y = x.find("id")
if y is not None:
id = y.get("val")
if id and id == "shutdown":
self.session.infobar = self
menu_screen = self.session.openWithCallback(self.MenuClosed, MainMenu, x)
menu_screen.setTitle(_("Standby / restart"))
return
elif action == "standby":
self.standby()
elif action == "powertimerStandby":
val = 3
self.setSleepTimer(val)
elif action == "powertimerDeepStandby":
val = 4
self.setSleepTimer(val)
elif action == "sleeptimer":
self.openSleepTimer()
def powerdown(self):
self.standbyblocked = 0
def powerup(self):
if self.standbyblocked == 0:
self.doAction(action = config.usage.on_short_powerpress.value)
def gotoStandby(self, ret):
self.standby()
def standby(self):
if not Screens.Standby.inStandby and self.session.current_dialog and self.session.current_dialog.ALLOW_SUSPEND and self.session.in_exec:
self.session.open(Screens.Standby.Standby)
def openSleepTimer(self):
from Screens.SleepTimerEdit import SleepTimerEdit
self.session.open(SleepTimerEdit)
def setSleepTimer(self, val):
from PowerTimer import PowerTimerEntry
sleeptime = 15
data = (int(time() + 60), int(time() + 120))
self.addSleepTimer(PowerTimerEntry(checkOldTimers = True, *data, timerType = val, autosleepdelay = sleeptime))
def addSleepTimer(self, timer):
from Screens.PowerTimerEntry import TimerEntry
self.session.openWithCallback(self.finishedAdd, TimerEntry, timer)
def finishedAdd(self, answer):
if answer[0]:
entry = answer[1]
simulTimerList = self.session.nav.PowerTimer.record(entry)
def sleepStandby(self):
self.doAction(action = "powertimerStandby")
def sleepDeepStandby(self):
self.doAction(action = "powertimerDeepStandby")
profile("Scart")
from Screens.Scart import Scart
class AutoScartControl:
def __init__(self, session):
self.force = False
self.current_vcr_sb = enigma.eAVSwitch.getInstance().getVCRSlowBlanking()
if self.current_vcr_sb and config.av.vcrswitch.value:
self.scartDialog = session.instantiateDialog(Scart, True)
else:
self.scartDialog = session.instantiateDialog(Scart, False)
config.av.vcrswitch.addNotifier(self.recheckVCRSb)
enigma.eAVSwitch.getInstance().vcr_sb_notifier.get().append(self.VCRSbChanged)
def recheckVCRSb(self, configelement):
self.VCRSbChanged(self.current_vcr_sb)
def VCRSbChanged(self, value):
#print "vcr sb changed to", value
self.current_vcr_sb = value
if config.av.vcrswitch.value or value > 2:
if value:
self.scartDialog.showMessageBox()
else:
self.scartDialog.switchToTV()
profile("Load:CI")
from Screens.Ci import CiHandler
profile("Load:VolumeControl")
from Components.VolumeControl import VolumeControl
from time import time, localtime, strftime
from Tools.StbHardware import setFPWakeuptime, setRTCtime
def autorestoreLoop():
# Check if auto restore settings fails, just start the wizard (avoid a endless loop)
count = 0
if os.path.exists("/media/hdd/images/config/autorestore"):
f = open("/media/hdd/images/config/autorestore", "r")
count = int(f.read())
f.close()
if count >= 3:
return False
count += 1
f = open("/media/hdd/images/config/autorestore", "w")
f.write(str(count))
f.close()
return True
def runScreenTest():
config.misc.startCounter.value += 1
profile("readPluginList")
plugins.readPluginList(resolveFilename(SCOPE_PLUGINS))
profile("Init:Session")
nav = Navigation(config.misc.nextWakeup.value)
session = Session(desktop = enigma.getDesktop(0), summary_desktop = enigma.getDesktop(1), navigation = nav)
CiHandler.setSession(session)
profile("wizards")
screensToRun = []
RestoreSettings = None
if os.path.exists("/media/hdd/images/config/settings") and config.misc.firstrun.value:
if autorestoreLoop():
RestoreSettings = True
from Plugins.SystemPlugins.SoftwareManager.BackupRestore import RestoreScreen
session.open(RestoreScreen, runRestore = True)
else:
screensToRun = [ p.__call__ for p in plugins.getPlugins(PluginDescriptor.WHERE_WIZARD) ]
screensToRun += wizardManager.getWizards()
else:
if os.path.exists("/media/hdd/images/config/autorestore"):
os.system('rm -f /media/hdd/images/config/autorestore')
screensToRun = [ p.__call__ for p in plugins.getPlugins(PluginDescriptor.WHERE_WIZARD) ]
screensToRun += wizardManager.getWizards()
screensToRun.append((100, InfoBar.InfoBar))
screensToRun.sort()
print screensToRun
enigma.ePythonConfigQuery.setQueryFunc(configfile.getResolvedKey)
def runNextScreen(session, screensToRun, *result):
if result:
print "[mytest.py] quitMainloop #3"
enigma.quitMainloop(*result)
return
screen = screensToRun[0][1]
args = screensToRun[0][2:]
if screensToRun:
session.openWithCallback(boundFunction(runNextScreen, session, screensToRun[1:]), screen, *args)
else:
session.open(screen, *args)
if not RestoreSettings:
runNextScreen(session, screensToRun)
profile("Init:VolumeControl")
vol = VolumeControl(session)
profile("Init:PowerKey")
power = PowerKey(session)
if boxtype in ('sf3038', 'spycat', 'e4hd', 'e4hdhybrid', 'mbmicro', 'et7500', 'mixosf5', 'mixosf7', 'mixoslumi', 'gi9196m', 'maram9', 'ixussone', 'ixussone', 'uniboxhd1', 'uniboxhd2', 'uniboxhd3', 'sezam5000hd', 'mbtwin', 'sezam1000hd', 'mbmini', 'atemio5x00', 'beyonwizt3') or getBrandOEM() in ('fulan'):
profile("VFDSYMBOLS")
import Components.VfdSymbols
Components.VfdSymbols.SymbolsCheck(session)
# we need session.scart to access it from within menu.xml
session.scart = AutoScartControl(session)
profile("Init:Trashcan")
import Tools.Trashcan
Tools.Trashcan.init(session)
profile("Init:AutoVideoMode")
import Screens.VideoMode
Screens.VideoMode.autostart(session)
profile("RunReactor")
profile_final()
if boxtype in ('sf8', 'classm', 'axodin', 'axodinc', 'starsatlx', 'genius', 'evo'):
f = open("/dev/dbox/oled0", "w")
f.write('-E2-')
f.close()
print "lastshutdown=%s (True = last shutdown was OK)" % config.usage.shutdownOK.value
print "NOK shutdown action=%s" % config.usage.shutdownNOK_action.value
print "bootup action=%s" % config.usage.boot_action.value
if not config.usage.shutdownOK.value and not config.usage.shutdownNOK_action.value == 'normal' or not config.usage.boot_action.value == 'normal':
print "last shutdown = %s" % config.usage.shutdownOK.value
import Screens.PowerLost
Screens.PowerLost.PowerLost(session)
config.usage.shutdownOK.setValue(False)
config.usage.shutdownOK.save()
if not RestoreSettings:
configfile.save()
runReactor()
print "[mytest.py] normal shutdown"
config.misc.startCounter.save()
config.usage.shutdownOK.setValue(True)
config.usage.shutdownOK.save()
profile("wakeup")
#get currentTime
nowTime = time()
# if not config.misc.SyncTimeUsing.value == "0" or getBrandOEM() == 'gigablue':
if not config.misc.SyncTimeUsing.value == "0" or boxtype.startswith('gb') or getBrandOEM().startswith('ini'):
print "dvb time sync disabled... so set RTC now to current linux time!", strftime("%Y/%m/%d %H:%M", localtime(nowTime))
setRTCtime(nowTime)
#recordtimer
if session.nav.isRecordTimerImageStandard: #check RecordTimer instance
tmp = session.nav.RecordTimer.getNextRecordingTime(getNextStbPowerOn = True)
nextRecordTime = tmp[0]
nextRecordTimeInStandby = tmp[1]
else:
nextRecordTime = session.nav.RecordTimer.getNextRecordingTime()
nextRecordTimeInStandby = session.nav.RecordTimer.isNextRecordAfterEventActionAuto()
#zaptimer
nextZapTime = session.nav.RecordTimer.getNextZapTime()
nextZapTimeInStandby = 0
#powertimer
tmp = session.nav.PowerTimer.getNextPowerManagerTime(getNextStbPowerOn = True)
nextPowerTime = tmp[0]
nextPowerTimeInStandby = tmp[1]
#plugintimer
tmp = plugins.getNextWakeupTime(getPluginIdent = True)
nextPluginTime = tmp[0]
nextPluginIdent = tmp[1] #"pluginname | pluginfolder"
tmp = tmp[1].lower()
#start in standby, depending on plugin type
if "epgrefresh" in tmp:
nextPluginName = "EPGRefresh"
nextPluginTimeInStandby = 1
elif "vps" in tmp:
nextPluginName = "VPS"
nextPluginTimeInStandby = 1
elif "serienrecorder" in tmp:
nextPluginName = "SerienRecorder"
nextPluginTimeInStandby = 0 # plugin function for deep standby from standby not compatible (not available)
elif "elektro" in tmp:
nextPluginName = "Elektro"
nextPluginTimeInStandby = 1
elif "minipowersave" in tmp:
nextPluginName = "MiniPowersave"
nextPluginTimeInStandby = 1
elif "enhancedpowersave" in tmp:
nextPluginName = "EnhancedPowersave"
nextPluginTimeInStandby = 1
else:
#default for plugins
nextPluginName = nextPluginIdent
nextPluginTimeInStandby = 0
wakeupList = [
x for x in ((nextRecordTime, 0, nextRecordTimeInStandby),
(nextZapTime, 1, nextZapTimeInStandby),
(nextPowerTime, 2, nextPowerTimeInStandby),
(nextPluginTime, 3, nextPluginTimeInStandby))
if x[0] != -1
]
wakeupList.sort()
# individual wakeup time offset
if config.workaround.wakeuptimeoffset.value == "standard":
if boxtype.startswith("gb"):
wpoffset = -120 # Gigaboxes already starts 2 min. before wakeup time
else:
wpoffset = 0
else:
wpoffset = int(config.workaround.wakeuptimeoffset.value)
print "="*100
if wakeupList and wakeupList[0][0] > 0:
startTime = wakeupList[0]
# wakeup time is 5 min before timer starts + offset
wptime = startTime[0] - 300 - wpoffset
if (wptime - nowTime) < 120: # no time to switch box back on
wptime = int(nowTime) + 120 # so switch back on in 120 seconds
#check for plugin-, zap- or power-timer to enable the 'forced' record-timer wakeup
forceNextRecord = 0
setStandby = startTime[2]
if startTime[1] != 0 and nextRecordTime > 0:
#when next record starts in 15 mins
if abs(nextRecordTime - startTime[0]) <= 900:
setStandby = forceNextRecord = 1
#by vps-plugin
elif startTime[1] == 3 and nextPluginName == "VPS":
setStandby = forceNextRecord = 1
if startTime[1] == 3:
nextPluginName = " (%s)" % nextPluginName
else:
nextPluginName = ""
print "[mytest.py] set next wakeup type to '%s'%s %s" % ({0:"record-timer",1:"zap-timer",2:"power-timer",3:"plugin-timer"}[startTime[1]], nextPluginName, {0:"and starts normal",1:"and starts in standby"}[setStandby])
if forceNextRecord:
print "[mytest.py] set from 'vps-plugin' or just before a 'record-timer' starts, set 'record-timer' wakeup flag"
print "[mytest.py] set next wakeup time to", strftime("%a, %Y/%m/%d %H:%M:%S", localtime(wptime))
#set next wakeup
setFPWakeuptime(wptime)
#set next standby only after shutdown in deep standby
if Screens.Standby.quitMainloopCode != 1:
setStandby = 2 # 0=no standby, but get in standby if wakeup to timer start > 60 sec (not for plugin-timer, here is no standby), 1=standby, 2=no standby, when before was not in deep-standby
config.misc.nextWakeup.value = "%d,%d,%d,%d,%d,%d" % (wptime,startTime[0],startTime[1],setStandby,nextRecordTime,forceNextRecord)
else:
config.misc.nextWakeup.value = "-1,-1,0,0,-1,0"
setFPWakeuptime(int(nowTime) - 3600) #minus one hour -> overwrite old wakeup time
print "[mytest.py] no set next wakeup time"
config.misc.nextWakeup.save()
print "="*100
profile("stopService")
session.nav.stopService()
profile("nav shutdown")
session.nav.shutdown()
profile("configfile.save")
configfile.save()
from Screens import InfoBarGenerics
InfoBarGenerics.saveResumePoints()
return 0
profile("Init:skin")
import skin
skin.loadSkinData(enigma.getDesktop(0))
profile("InputDevice")
import Components.InputDevice
Components.InputDevice.InitInputDevices()
import Components.InputHotplug
profile("SetupDevices")
import Components.SetupDevices
Components.SetupDevices.InitSetupDevices()
profile("AVSwitch")
import Components.AVSwitch
Components.AVSwitch.InitAVSwitch()
Components.AVSwitch.InitiVideomodeHotplug()
profile("RecordingConfig")
import Components.RecordingConfig
Components.RecordingConfig.InitRecordingConfig()
profile("UsageConfig")
import Components.UsageConfig
Components.UsageConfig.InitUsageConfig()
profile("Init:DebugLogCheck")
import Screens.LogManager
Screens.LogManager.AutoLogManager()
#profile("Init:OnlineCheckState")
#import Components.OnlineUpdateCheck
#Components.OnlineUpdateCheck.OnlineUpdateCheck()
profile("Init:NTPSync")
import Components.NetworkTime
Components.NetworkTime.AutoNTPSync()
profile("keymapparser")
import keymapparser
keymapparser.readKeymap(config.usage.keymap.value)
profile("Network")
import Components.Network
Components.Network.InitNetwork()
profile("LCD")
import Components.Lcd
Components.Lcd.InitLcd()
Components.Lcd.IconCheck()
# Disable internal clock vfd for ini5000 until we can adjust it for standby
if boxtype in ('uniboxhd1', 'uniboxhd2', 'uniboxhd3', 'sezam5000hd', 'mbtwin', 'beyonwizt3'):
try:
f = open("/proc/stb/fp/enable_clock", "r").readline()[:-1]
if f != '0':
f = open("/proc/stb/fp/enable_clock", "w")
f.write('0')
f.close()
except:
print "Error disable enable_clock for ini5000 boxes"
profile("UserInterface")
import Screens.UserInterfacePositioner
Screens.UserInterfacePositioner.InitOsd()
profile("EpgCacheSched")
import Components.EpgLoadSave
Components.EpgLoadSave.EpgCacheSaveCheck()
Components.EpgLoadSave.EpgCacheLoadCheck()
profile("RFMod")
import Components.RFmod
Components.RFmod.InitRFmod()
profile("Init:CI")
import Screens.Ci
Screens.Ci.InitCiConfig()
profile("RcModel")
import Components.RcModel
#from enigma import dump_malloc_stats
#t = eTimer()
#t.callback.append(dump_malloc_stats)
#t.start(1000)
# first, setup a screen
try:
runScreenTest()
plugins.shutdown()
Components.ParentalControl.parentalControl.save()
except:
print 'EXCEPTION IN PYTHON STARTUP CODE:'
print '-'*60
print_exc(file=stdout)
print "[mytest.py] quitMainloop #4"
enigma.quitMainloop(5)
print '-'*60
| gpl-2.0 |
bossino/dra-kernel | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
zasdfgbnm/tensorflow | tensorflow/python/estimator/export/export_output_test.py | 26 | 10163 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for export."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.estimator.export import export_output as export_output_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import signature_constants
class ExportOutputTest(test.TestCase):
def test_regress_value_must_be_float(self):
value = array_ops.placeholder(dtypes.string, 1, name="output-tensor-1")
with self.assertRaises(ValueError) as e:
export_output_lib.RegressionOutput(value)
self.assertEqual('Regression output value must be a float32 Tensor; got '
'Tensor("output-tensor-1:0", shape=(1,), dtype=string)',
str(e.exception))
def test_classify_classes_must_be_strings(self):
classes = array_ops.placeholder(dtypes.float32, 1, name="output-tensor-1")
with self.assertRaises(ValueError) as e:
export_output_lib.ClassificationOutput(classes=classes)
self.assertEqual('Classification classes must be a string Tensor; got '
'Tensor("output-tensor-1:0", shape=(1,), dtype=float32)',
str(e.exception))
def test_classify_scores_must_be_float(self):
scores = array_ops.placeholder(dtypes.string, 1, name="output-tensor-1")
with self.assertRaises(ValueError) as e:
export_output_lib.ClassificationOutput(scores=scores)
self.assertEqual('Classification scores must be a float32 Tensor; got '
'Tensor("output-tensor-1:0", shape=(1,), dtype=string)',
str(e.exception))
def test_classify_requires_classes_or_scores(self):
with self.assertRaises(ValueError) as e:
export_output_lib.ClassificationOutput()
self.assertEqual("At least one of scores and classes must be set.",
str(e.exception))
def test_build_standardized_signature_def_regression(self):
input_tensors = {
"input-1":
array_ops.placeholder(
dtypes.string, 1, name="input-tensor-1")
}
value = array_ops.placeholder(dtypes.float32, 1, name="output-tensor-1")
export_output = export_output_lib.RegressionOutput(value)
actual_signature_def = export_output.as_signature_def(input_tensors)
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_float = types_pb2.DataType.Value("DT_FLOAT")
dtype_string = types_pb2.DataType.Value("DT_STRING")
expected_signature_def.inputs[
signature_constants.REGRESS_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(name="input-tensor-1:0",
dtype=dtype_string,
tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.REGRESS_OUTPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(name="output-tensor-1:0",
dtype=dtype_float,
tensor_shape=shape))
expected_signature_def.method_name = signature_constants.REGRESS_METHOD_NAME
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classify_classes_only(self):
"""Tests classification with one output tensor."""
input_tensors = {
"input-1":
array_ops.placeholder(
dtypes.string, 1, name="input-tensor-1")
}
classes = array_ops.placeholder(dtypes.string, 1, name="output-tensor-1")
export_output = export_output_lib.ClassificationOutput(classes=classes)
actual_signature_def = export_output.as_signature_def(input_tensors)
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_string = types_pb2.DataType.Value("DT_STRING")
expected_signature_def.inputs[
signature_constants.CLASSIFY_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(name="input-tensor-1:0",
dtype=dtype_string,
tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_CLASSES].CopyFrom(
meta_graph_pb2.TensorInfo(name="output-tensor-1:0",
dtype=dtype_string,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.CLASSIFY_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classify_both(self):
"""Tests multiple output tensors that include classes and scores."""
input_tensors = {
"input-1":
array_ops.placeholder(
dtypes.string, 1, name="input-tensor-1")
}
classes = array_ops.placeholder(dtypes.string, 1,
name="output-tensor-classes")
scores = array_ops.placeholder(dtypes.float32, 1,
name="output-tensor-scores")
export_output = export_output_lib.ClassificationOutput(
scores=scores, classes=classes)
actual_signature_def = export_output.as_signature_def(input_tensors)
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_float = types_pb2.DataType.Value("DT_FLOAT")
dtype_string = types_pb2.DataType.Value("DT_STRING")
expected_signature_def.inputs[
signature_constants.CLASSIFY_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(name="input-tensor-1:0",
dtype=dtype_string,
tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_CLASSES].CopyFrom(
meta_graph_pb2.TensorInfo(name="output-tensor-classes:0",
dtype=dtype_string,
tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_SCORES].CopyFrom(
meta_graph_pb2.TensorInfo(name="output-tensor-scores:0",
dtype=dtype_float,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.CLASSIFY_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classify_scores_only(self):
"""Tests classification without classes tensor."""
input_tensors = {
"input-1":
array_ops.placeholder(
dtypes.string, 1, name="input-tensor-1")
}
scores = array_ops.placeholder(dtypes.float32, 1,
name="output-tensor-scores")
export_output = export_output_lib.ClassificationOutput(
scores=scores)
actual_signature_def = export_output.as_signature_def(input_tensors)
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_float = types_pb2.DataType.Value("DT_FLOAT")
dtype_string = types_pb2.DataType.Value("DT_STRING")
expected_signature_def.inputs[
signature_constants.CLASSIFY_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(name="input-tensor-1:0",
dtype=dtype_string,
tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_SCORES].CopyFrom(
meta_graph_pb2.TensorInfo(name="output-tensor-scores:0",
dtype=dtype_float,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.CLASSIFY_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_predict_outputs_valid(self):
"""Tests that no errors are raised when provided outputs are valid."""
outputs = {
"output0": constant_op.constant([0]),
u"output1": constant_op.constant(["foo"]),
}
export_output_lib.PredictOutput(outputs)
# Single Tensor is OK too
export_output_lib.PredictOutput(constant_op.constant([0]))
def test_predict_outputs_invalid(self):
with self.assertRaisesRegexp(
ValueError,
"Prediction output key must be a string"):
export_output_lib.PredictOutput({1: constant_op.constant([0])})
with self.assertRaisesRegexp(
ValueError,
"Prediction output value must be a Tensor"):
export_output_lib.PredictOutput({
"prediction1": sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]),
})
if __name__ == "__main__":
test.main()
| apache-2.0 |
ssanderson/docker-py | docker/errors.py | 3 | 2372 | # Copyright 2014 dotCloud inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
class APIError(requests.exceptions.HTTPError):
def __init__(self, message, response, explanation=None):
# requests 1.2 supports response as a keyword argument, but
# requests 1.1 doesn't
super(APIError, self).__init__(message)
self.response = response
self.explanation = explanation
if self.explanation is None and response.content:
self.explanation = response.content.strip()
def __str__(self):
message = super(APIError, self).__str__()
if self.is_client_error():
message = '{0} Client Error: {1}'.format(
self.response.status_code, self.response.reason)
elif self.is_server_error():
message = '{0} Server Error: {1}'.format(
self.response.status_code, self.response.reason)
if self.explanation:
message = '{0} ("{1}")'.format(message, self.explanation)
return message
def is_client_error(self):
return 400 <= self.response.status_code < 500
def is_server_error(self):
return 500 <= self.response.status_code < 600
class DockerException(Exception):
pass
class InvalidVersion(DockerException):
pass
class InvalidRepository(DockerException):
pass
class InvalidConfigFile(DockerException):
pass
class DeprecatedMethod(DockerException):
pass
class TLSParameterError(DockerException):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg + (". TLS configurations should map the Docker CLI "
"client configurations. See "
"http://docs.docker.com/examples/https/ for "
"API details.")
| apache-2.0 |
leiserfg/flask-restplus | tests/legacy/test_api_with_blueprint.py | 3 | 7165 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import flask
import six
from flask import Blueprint, request
import flask_restplus as restplus
from nose.tools import assert_true, assert_false # you need it for tests in form of continuations
from .. import TestCase, Mock
# Add a dummy Resource to verify that the app is properly set.
class HelloWorld(restplus.Resource):
def get(self):
return {}
class GoodbyeWorld(restplus.Resource):
def __init__(self, err):
self.err = err
def get(self):
flask.abort(self.err)
class APIWithBlueprintTestCase(TestCase):
def test_api_base(self):
blueprint = Blueprint('test', __name__)
api = restplus.Api(blueprint)
self.app.register_blueprint(blueprint)
self.assertEquals(api.urls, {})
self.assertEquals(api.prefix, '')
self.assertEquals(api.default_mediatype, 'application/json')
def test_api_delayed_initialization(self):
blueprint = Blueprint('test', __name__)
api = restplus.Api()
api.init_app(blueprint)
self.app.register_blueprint(blueprint)
api.add_resource(HelloWorld, '/', endpoint="hello")
def test_add_resource_endpoint(self):
blueprint = Blueprint('test', __name__)
api = restplus.Api(blueprint)
view = Mock(**{'as_view.return_value.__name__': str('test_view')})
api.add_resource(view, '/foo', endpoint='bar')
self.app.register_blueprint(blueprint)
view.as_view.assert_called_with('bar', api)
def test_add_resource_endpoint_after_registration(self):
blueprint = Blueprint('test', __name__)
api = restplus.Api(blueprint)
self.app.register_blueprint(blueprint)
view = Mock(**{'as_view.return_value.__name__': str('test_view')})
api.add_resource(view, '/foo', endpoint='bar')
view.as_view.assert_called_with('bar', api)
def test_url_with_api_prefix(self):
blueprint = Blueprint('test', __name__)
api = restplus.Api(blueprint, prefix='/api')
api.add_resource(HelloWorld, '/hi', endpoint='hello')
self.app.register_blueprint(blueprint)
with self.app.test_request_context('/api/hi'):
self.assertEquals(request.endpoint, 'test.hello')
def test_url_with_blueprint_prefix(self):
blueprint = Blueprint('test', __name__, url_prefix='/bp')
api = restplus.Api(blueprint)
api.add_resource(HelloWorld, '/hi', endpoint='hello')
self.app.register_blueprint(blueprint)
with self.app.test_request_context('/bp/hi'):
self.assertEquals(request.endpoint, 'test.hello')
def test_url_with_registration_prefix(self):
blueprint = Blueprint('test', __name__)
api = restplus.Api(blueprint)
api.add_resource(HelloWorld, '/hi', endpoint='hello')
self.app.register_blueprint(blueprint, url_prefix='/reg')
with self.app.test_request_context('/reg/hi'):
self.assertEquals(request.endpoint, 'test.hello')
def test_registration_prefix_overrides_blueprint_prefix(self):
blueprint = Blueprint('test', __name__, url_prefix='/bp')
api = restplus.Api(blueprint)
api.add_resource(HelloWorld, '/hi', endpoint='hello')
self.app.register_blueprint(blueprint, url_prefix='/reg')
with self.app.test_request_context('/reg/hi'):
self.assertEquals(request.endpoint, 'test.hello')
def test_url_with_api_and_blueprint_prefix(self):
blueprint = Blueprint('test', __name__, url_prefix='/bp')
api = restplus.Api(blueprint, prefix='/api')
api.add_resource(HelloWorld, '/hi', endpoint='hello')
self.app.register_blueprint(blueprint)
with self.app.test_request_context('/bp/api/hi'):
self.assertEquals(request.endpoint, 'test.hello')
def test_error_routing(self):
blueprint = Blueprint('test', __name__)
api = restplus.Api(blueprint)
api.add_resource(HelloWorld, '/hi', endpoint="hello")
api.add_resource(GoodbyeWorld(404), '/bye', endpoint="bye")
self.app.register_blueprint(blueprint)
with self.app.test_request_context('/hi', method='POST'):
assert_true(api._should_use_fr_error_handler())
assert_true(api._has_fr_route())
with self.app.test_request_context('/bye'):
api._should_use_fr_error_handler = Mock(return_value=False)
assert_true(api._has_fr_route())
def test_non_blueprint_rest_error_routing(self):
blueprint = Blueprint('test', __name__)
api = restplus.Api(blueprint)
api.add_resource(HelloWorld, '/hi', endpoint="hello")
api.add_resource(GoodbyeWorld(404), '/bye', endpoint="bye")
self.app.register_blueprint(blueprint, url_prefix='/blueprint')
api2 = restplus.Api(self.app)
api2.add_resource(HelloWorld(api), '/hi', endpoint="hello")
api2.add_resource(GoodbyeWorld(404), '/bye', endpoint="bye")
with self.app.test_request_context('/hi', method='POST'):
assert_false(api._should_use_fr_error_handler())
assert_true(api2._should_use_fr_error_handler())
assert_false(api._has_fr_route())
assert_true(api2._has_fr_route())
with self.app.test_request_context('/blueprint/hi', method='POST'):
assert_true(api._should_use_fr_error_handler())
assert_false(api2._should_use_fr_error_handler())
assert_true(api._has_fr_route())
assert_false(api2._has_fr_route())
api._should_use_fr_error_handler = Mock(return_value=False)
api2._should_use_fr_error_handler = Mock(return_value=False)
with self.app.test_request_context('/bye'):
assert_false(api._has_fr_route())
assert_true(api2._has_fr_route())
with self.app.test_request_context('/blueprint/bye'):
assert_true(api._has_fr_route())
assert_false(api2._has_fr_route())
def test_non_blueprint_non_rest_error_routing(self):
blueprint = Blueprint('test', __name__)
api = restplus.Api(blueprint)
api.add_resource(HelloWorld, '/hi', endpoint="hello")
api.add_resource(GoodbyeWorld(404), '/bye', endpoint="bye")
self.app.register_blueprint(blueprint, url_prefix='/blueprint')
@self.app.route('/hi')
def hi():
return 'hi'
@self.app.route('/bye')
def bye():
flask.abort(404)
with self.app.test_request_context('/hi', method='POST'):
assert_false(api._should_use_fr_error_handler())
assert_false(api._has_fr_route())
with self.app.test_request_context('/blueprint/hi', method='POST'):
assert_true(api._should_use_fr_error_handler())
assert_true(api._has_fr_route())
api._should_use_fr_error_handler = Mock(return_value=False)
with self.app.test_request_context('/bye'):
assert_false(api._has_fr_route())
with self.app.test_request_context('/blueprint/bye'):
assert_true(api._has_fr_route())
| mit |
yury-s/v8-inspector | Source/chrome/build/landmine_utils.py | 76 | 3014 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import functools
import logging
import os
import shlex
import sys
def memoize(default=None):
"""This decorator caches the return value of a parameterless pure function"""
def memoizer(func):
val = []
@functools.wraps(func)
def inner():
if not val:
ret = func()
val.append(ret if ret is not None else default)
if logging.getLogger().isEnabledFor(logging.INFO):
print '%s -> %r' % (func.__name__, val[0])
return val[0]
return inner
return memoizer
@memoize()
def IsWindows():
return sys.platform in ['win32', 'cygwin']
@memoize()
def IsLinux():
return sys.platform.startswith(('linux', 'freebsd', 'openbsd'))
@memoize()
def IsMac():
return sys.platform == 'darwin'
@memoize()
def gyp_defines():
"""Parses and returns GYP_DEFINES env var as a dictionary."""
return dict(arg.split('=', 1)
for arg in shlex.split(os.environ.get('GYP_DEFINES', '')))
@memoize()
def gyp_generator_flags():
"""Parses and returns GYP_GENERATOR_FLAGS env var as a dictionary."""
return dict(arg.split('=', 1)
for arg in shlex.split(os.environ.get('GYP_GENERATOR_FLAGS', '')))
@memoize()
def gyp_msvs_version():
return os.environ.get('GYP_MSVS_VERSION', '')
@memoize()
def distributor():
"""
Returns a string which is the distributed build engine in use (if any).
Possible values: 'goma', 'ib', ''
"""
if 'goma' in gyp_defines():
return 'goma'
elif IsWindows():
if 'CHROME_HEADLESS' in os.environ:
return 'ib' # use (win and !goma and headless) as approximation of ib
@memoize()
def platform():
"""
Returns a string representing the platform this build is targetted for.
Possible values: 'win', 'mac', 'linux', 'ios', 'android'
"""
if 'OS' in gyp_defines():
if 'android' in gyp_defines()['OS']:
return 'android'
else:
return gyp_defines()['OS']
elif IsWindows():
return 'win'
elif IsLinux():
return 'linux'
else:
return 'mac'
@memoize()
def builder():
"""
Returns a string representing the build engine (not compiler) to use.
Possible values: 'make', 'ninja', 'xcode', 'msvs', 'scons'
"""
if 'GYP_GENERATORS' in os.environ:
# for simplicity, only support the first explicit generator
generator = os.environ['GYP_GENERATORS'].split(',')[0]
if generator.endswith('-android'):
return generator.split('-')[0]
elif generator.endswith('-ninja'):
return 'ninja'
else:
return generator
else:
if platform() == 'android':
# Good enough for now? Do any android bots use make?
return 'ninja'
elif platform() == 'ios':
return 'xcode'
elif IsWindows():
return 'ninja'
elif IsLinux():
return 'ninja'
elif IsMac():
return 'ninja'
else:
assert False, 'Don\'t know what builder we\'re using!'
| bsd-3-clause |
alx-eu/django | tests/regressiontests/extra_regress/models.py | 114 | 1365 | from __future__ import unicode_literals
import copy
import datetime
from django.contrib.auth.models import User
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class RevisionableModel(models.Model):
base = models.ForeignKey('self', null=True)
title = models.CharField(blank=True, max_length=255)
when = models.DateTimeField(default=datetime.datetime.now)
def __str__(self):
return "%s (%s, %s)" % (self.title, self.id, self.base.id)
def save(self, *args, **kwargs):
super(RevisionableModel, self).save(*args, **kwargs)
if not self.base:
self.base = self
kwargs.pop('force_insert', None)
kwargs.pop('force_update', None)
super(RevisionableModel, self).save(*args, **kwargs)
def new_revision(self):
new_revision = copy.copy(self)
new_revision.pk = None
return new_revision
class Order(models.Model):
created_by = models.ForeignKey(User)
text = models.TextField()
@python_2_unicode_compatible
class TestObject(models.Model):
first = models.CharField(max_length=20)
second = models.CharField(max_length=20)
third = models.CharField(max_length=20)
def __str__(self):
return 'TestObject: %s,%s,%s' % (self.first,self.second,self.third)
| bsd-3-clause |
hugoruscitti/irobotgame | lib/cocos/gl_pbuffer.py | 2 | 2808 | """A thin wrapper for OpenGL pbuffer objets. For implementation use only"""
import ctypes, sys
if sys.platform=='darwin':
#### Darwin (Carbon, AGL)
class Pbuffer (object):
def __init__ (self, *args, **kwargs):
raise Exception ("AGL Pbuffers not implemented")
elif sys.platform in ('win32', 'cygwin'):
#### Win32 (WGL)
class Pbuffer (object):
def __init__ (self, *args, **kwargs):
raise Exception ("WGL Pbuffers not implemented")
else:
#### GLX based systems
from pyglet.gl.glx import *
class Pbuffer (object):
"""
Wrapper for OpenGL pbuffer extensions.
See http://oss.sgi.com/projects/ogl-sample/registry/SGIX/pbuffer.txt for details
Caveats:
- Only GLX version implemented
- not tested, may not be working
"""
def __init__ (self, window, attrs=[], width=None, height=None):
"""Create new pbuffer with given size, on same display as `window'
(`width', `height') is the same as the window, if not specified
attrs is a list of alternating GLX_PBUFFER_<attr>, <value>. For example,
[GLX_DEPTH, 32, GLX_DOUBLEBUFFER, 1]
"""
self.window = window
# Get configuration options
conf_attrs = (ctypes.c_int * (len(attrs)+1))(*(attrs+[0]))
num = ctypes.c_int (0)
conf = glXChooseFBConfig (window._x_display, 0, conf_attrs, ctypes.byref(num))
# Pbuffer configuration
if width is None: width = window.width
if height is None: height = window.height
pbuf_attrs = (ctypes.c_int * 5)(
GLX_PBUFFER_WIDTH, width,
GLX_PBUFFER_WIDTH, height, 0)
self.width = width
self.height = height
# Create a pbuffer with a suitable configuration
self.pbuf = None
for c in range (num.value):
pbuf = glXCreatePbuffer (window._x_display, conf[c], pbuf_attrs)
if pbuf:
self.pbuf = pbuf
break
if self.pbuf is None:
raise Exception ("No valid configuration for pbuffer found")
# Create a context for the buffer
self.ctx = glXCreateNewContext (window._x_display, conf[c], GLX_RGBA_TYPE, self.window._context._context, True)
if not self.ctx:
raise Exception ("Failed to create context for this buffer")
def switch_to(self):
"""Set this pbuffer as current GL context"""
ok = glXMakeContextCurrent (self.window._x_display, self.pbuf, self.pbuf, self.ctx)
if not ok:
raise Exception("Failed to switch GL context")
| gpl-3.0 |
ybdesire/apk_sdk_analysis | common/Androguard-2.0/androguard/decompiler/dad/ast.py | 20 | 23793 | # This file is part of Androguard.
#
# Copyright (C) 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''This file is a simplified version of writer.py that outputs an AST instead of source code.'''
import struct
from androguard.decompiler.dad import basic_blocks, instruction, opcode_ins
def array_access(arr, ind): return ['ArrayAccess', [arr, ind]]
def array_creation(tn, params, dim): return ['ArrayCreation', [tn] + params, dim]
def array_initializer(params, tn=None): return ['ArrayInitializer', params, tn]
def assignment(lhs, rhs, op=''): return ['Assignment', [lhs, rhs], op]
def binary_infix(op, left, right): return ['BinaryInfix', [left, right], op]
def cast(tn, arg): return ['Cast', [tn, arg]]
def field_access(triple, left): return ['FieldAccess', [left], triple]
def literal(result, tt): return ['Literal', result, tt]
def local(name): return ['Local', name]
def method_invocation(triple, name, base, params):
if base is None:
return ['MethodInvocation', params, triple, name, False]
return ['MethodInvocation', [base]+params, triple, name, True]
def parenthesis(expr): return ['Parenthesis', [expr]]
def typen(baset, dim): return ['TypeName', (baset, dim)]
def unary_prefix(op, left): return ['Unary', [left], op, False]
def unary_postfix(left, op): return ['Unary', [left], op, True]
def var_decl(typen, var): return [typen, var]
def dummy(*args): return ['Dummy', args]
################################################################################
def expression_stmt(expr): return ['ExpressionStatement', expr]
def local_decl_stmt(expr, decl): return ['LocalDeclarationStatement', expr, decl]
def return_stmt(expr): return ['ReturnStatement', expr]
def throw_stmt(expr): return ['ThrowStatement', expr]
def jump_stmt(keyword): return ['JumpStatement', keyword, None]
def loop_stmt(isdo, cond_expr, body):
type_ = 'DoStatement' if isdo else 'WhileStatement'
return [type_, None, cond_expr, body]
def try_stmt(tryb, pairs): return ['TryStatement', None, tryb, pairs]
def if_stmt(cond_expr, scopes): return ['IfStatement', None, cond_expr, scopes]
def switch_stmt(cond_expr, ksv_pairs):
return ['SwitchStatement', None, cond_expr, ksv_pairs]
# Create empty statement block (statements to be appended later)
# Note, the code below assumes this can be modified in place
def statement_block(): return ['BlockStatement', None, []]
# Add a statement to the end of a statement block
def _append(sb, stmt):
assert(sb[0] == 'BlockStatement')
if stmt is not None:
sb[2].append(stmt)
################################################################################
TYPE_DESCRIPTOR = {
'V': 'void',
'Z': 'boolean',
'B': 'byte',
'S': 'short',
'C': 'char',
'I': 'int',
'J': 'long',
'F': 'float',
'D': 'double',
}
def parse_descriptor(desc):
dim = 0
while desc and desc[0] == '[':
desc = desc[1:]
dim += 1
if desc in TYPE_DESCRIPTOR:
return typen('.'+TYPE_DESCRIPTOR[desc], dim)
if desc and desc[0] == 'L' and desc[-1] == ';':
return typen(desc[1:-1], dim)
# invalid descriptor (probably None)
return dummy(str(desc))
# Note: the literal_foo functions (and dummy) are also imported by decompile.py
def literal_string(s):
escapes = {
'\0':'\\0',
'\t':'\\t',
'\r':'\\r',
'\n':'\\n',
'"':'\\"',
'\\':'\\\\'
}
buf = ['"']
for c in s.decode('utf8'):
if c in escapes:
buf.append(escapes[c])
elif ' ' <= c < '\x7f':
buf.append(c)
else:
buf.append('\u{:04x}'.format(ord(c)))
buf.append('"')
return literal(''.join(buf), ('java/lang/String', 0))
def literal_class(desc):
return literal(parse_descriptor(desc), ('java/lang/Class', 0))
def literal_bool(b): return literal(str(b).lower(), ('.boolean', 0))
def literal_int(b): return literal(str(b), ('.int', 0))
def literal_hex_int(b): return literal(hex(b), ('.int', 0))
def literal_long(b): return literal(str(b)+'L', ('.long', 0))
def literal_float(f): return literal(str(f)+'f', ('.float', 0))
def literal_double(f): return literal(str(f), ('.double', 0))
def literal_null(): return literal('null', ('.null', 0))
def visit_decl(var, init_expr=None):
t = parse_descriptor(var.get_type())
v = local('v{}'.format(var.name))
return local_decl_stmt(init_expr, var_decl(t, v))
def visit_arr_data(value):
data = value.get_data()
tab = []
elem_size = value.element_width
if elem_size == 4:
for i in range(0, value.size * 4, 4):
tab.append(struct.unpack('<i', data[i:i + 4])[0])
else: # FIXME: other cases
for i in range(value.size):
tab.append(struct.unpack('<b', data[i])[0])
return array_initializer(map(literal_int, tab))
def write_inplace_if_possible(lhs, rhs):
if isinstance(rhs, instruction.BinaryExpression) and lhs == rhs.var_map[rhs.arg1]:
exp_rhs = rhs.var_map[rhs.arg2]
# post increment/decrement
if rhs.op in '+-' and isinstance(exp_rhs, instruction.Constant) and exp_rhs.get_int_value() == 1:
return unary_postfix(visit_expr(lhs), rhs.op * 2)
# compound assignment
return assignment(visit_expr(lhs), visit_expr(exp_rhs), op=rhs.op)
return assignment(visit_expr(lhs), visit_expr(rhs))
def visit_expr(op):
if isinstance(op, instruction.ArrayLengthExpression):
expr = visit_expr(op.var_map[op.array])
return field_access([None, 'length', None], expr)
if isinstance(op, instruction.ArrayLoadExpression):
array_expr = visit_expr(op.var_map[op.array])
index_expr = visit_expr(op.var_map[op.idx])
return array_access(array_expr, index_expr)
if isinstance(op, instruction.ArrayStoreInstruction):
array_expr = visit_expr(op.var_map[op.array])
index_expr = visit_expr(op.var_map[op.index])
rhs = visit_expr(op.var_map[op.rhs])
return assignment(array_access(array_expr, index_expr), rhs)
if isinstance(op, instruction.AssignExpression):
lhs = op.var_map.get(op.lhs)
rhs = op.rhs
if lhs is None:
return visit_expr(rhs)
return write_inplace_if_possible(lhs, rhs)
if isinstance(op, instruction.BaseClass):
if op.clsdesc is None:
assert(op.cls == "super")
return local(op.cls)
return parse_descriptor(op.clsdesc)
if isinstance(op, instruction.BinaryExpression):
lhs = op.var_map.get(op.arg1)
rhs = op.var_map.get(op.arg2)
expr = binary_infix(op.op, visit_expr(lhs), visit_expr(rhs))
if not isinstance(op, instruction.BinaryCompExpression):
expr = parenthesis(expr)
return expr
if isinstance(op, instruction.CheckCastExpression):
lhs = op.var_map.get(op.arg)
return parenthesis(cast(parse_descriptor(op.clsdesc), visit_expr(lhs)))
if isinstance(op, instruction.ConditionalExpression):
lhs = op.var_map.get(op.arg1)
rhs = op.var_map.get(op.arg2)
return binary_infix(op.op, visit_expr(lhs), visit_expr(rhs))
if isinstance(op, instruction.ConditionalZExpression):
arg = op.var_map[op.arg]
if isinstance(arg, instruction.BinaryCompExpression):
arg.op = op.op
return visit_expr(arg)
expr = visit_expr(arg)
atype = arg.get_type()
if atype == 'Z':
if op.op == opcode_ins.Op.EQUAL:
expr = unary_prefix('!', expr)
elif atype in 'VBSCIJFD':
expr = binary_infix(op.op, expr, literal_int(0))
else:
expr = binary_infix(op.op, expr, literal_null())
return expr
if isinstance(op, instruction.Constant):
if op.type == 'Ljava/lang/String;':
return literal_string(op.cst)
elif op.type == 'Z':
return literal_bool(op.cst == 0)
elif op.type in 'ISCB':
return literal_int(op.cst2)
elif op.type in 'J':
return literal_long(op.cst2)
elif op.type in 'F':
return literal_float(op.cst)
elif op.type in 'D':
return literal_double(op.cst)
elif op.type == 'Ljava/lang/Class;':
return literal_class(op.clsdesc)
return dummy('???')
if isinstance(op, instruction.FillArrayExpression):
array_expr = visit_expr(op.var_map[op.reg])
rhs = visit_arr_data(op.value)
return assignment(array_expr, rhs)
if isinstance(op, instruction.FilledArrayExpression):
tn = parse_descriptor(op.type)
params = [visit_expr(op.var_map[x]) for x in op.args]
return array_initializer(params, tn)
if isinstance(op, instruction.InstanceExpression):
triple = op.clsdesc[1:-1], op.name, op.ftype
expr = visit_expr(op.var_map[op.arg])
return field_access(triple, expr)
if isinstance(op, instruction.InstanceInstruction):
triple = op.clsdesc[1:-1], op.name, op.atype
lhs = field_access(triple, visit_expr(op.var_map[op.lhs]))
rhs = visit_expr(op.var_map[op.rhs])
return assignment(lhs, rhs)
if isinstance(op, instruction.InvokeInstruction):
base = op.var_map[op.base]
params = [op.var_map[arg] for arg in op.args]
params = map(visit_expr, params)
if op.name == '<init>':
if isinstance(base, instruction.ThisParam):
return method_invocation(op.triple, 'this', None, params)
elif isinstance(base, instruction.NewInstance):
return ['ClassInstanceCreation', params, parse_descriptor(base.type)]
else:
assert(isinstance(base, instruction.Variable))
# fallthrough to create dummy <init> call
return method_invocation(op.triple, op.name, visit_expr(base), params)
# for unmatched monitor instructions, just create dummy expressions
if isinstance(op, instruction.MonitorEnterExpression):
return dummy("monitor enter(", visit_expr(op.var_map[op.ref]), ")")
if isinstance(op, instruction.MonitorExitExpression):
return dummy("monitor exit(", visit_expr(op.var_map[op.ref]), ")")
if isinstance(op, instruction.MoveExpression):
lhs = op.var_map.get(op.lhs)
rhs = op.var_map.get(op.rhs)
return write_inplace_if_possible(lhs, rhs)
if isinstance(op, instruction.MoveResultExpression):
lhs = op.var_map.get(op.lhs)
rhs = op.var_map.get(op.rhs)
return assignment(visit_expr(lhs), visit_expr(rhs))
if isinstance(op, instruction.NewArrayExpression):
tn = parse_descriptor(op.type[1:])
expr = visit_expr(op.var_map[op.size])
return array_creation(tn, [expr], 1)
# create dummy expression for unmatched newinstance
if isinstance(op, instruction.NewInstance):
return dummy("new ", parse_descriptor(op.type))
if isinstance(op, instruction.Param):
if isinstance(op, instruction.ThisParam):
return local('this')
return local('p{}'.format(op.v))
if isinstance(op, instruction.StaticExpression):
triple = op.clsdesc[1:-1], op.name, op.ftype
return field_access(triple, parse_descriptor(op.clsdesc))
if isinstance(op, instruction.StaticInstruction):
triple = op.clsdesc[1:-1], op.name, op.ftype
lhs = field_access(triple, parse_descriptor(op.clsdesc))
rhs = visit_expr(op.var_map[op.rhs])
return assignment(lhs, rhs)
if isinstance(op, instruction.SwitchExpression):
return visit_expr(op.var_map[op.src])
if isinstance(op, instruction.UnaryExpression):
lhs = op.var_map.get(op.arg)
if isinstance(op, instruction.CastExpression):
expr = cast(parse_descriptor(op.clsdesc), visit_expr(lhs))
else:
expr = unary_prefix(op.op, visit_expr(lhs))
return parenthesis(expr)
if isinstance(op, instruction.Variable):
# assert(op.declared)
return local('v{}'.format(op.name))
return dummy('???')
def visit_ins(op, isCtor=False):
if isinstance(op, instruction.ReturnInstruction):
expr = None if op.arg is None else visit_expr(op.var_map[op.arg])
return return_stmt(expr)
elif isinstance(op, instruction.ThrowExpression):
return throw_stmt(visit_expr(op.var_map[op.ref]))
elif isinstance(op, instruction.NopExpression):
return None
# Local var decl statements
if isinstance(op, (instruction.AssignExpression, instruction.MoveExpression, instruction.MoveResultExpression)):
lhs = op.var_map.get(op.lhs)
rhs = op.rhs if isinstance(op, instruction.AssignExpression) else op.var_map.get(op.rhs)
if isinstance(lhs, instruction.Variable) and not lhs.declared:
lhs.declared = True
expr = visit_expr(rhs)
return visit_decl(lhs, expr)
# skip this() at top of constructors
if isCtor and isinstance(op, instruction.AssignExpression):
op2 = op.rhs
if op.lhs is None and isinstance(op2, instruction.InvokeInstruction):
if op2.name == '<init>' and len(op2.args) == 0:
if isinstance(op2.var_map[op2.base], instruction.ThisParam):
return None
# MoveExpression is skipped when lhs = rhs
if isinstance(op, instruction.MoveExpression):
if op.var_map.get(op.lhs) is op.var_map.get(op.rhs):
return None
return expression_stmt(visit_expr(op))
class JSONWriter(object):
def __init__(self, graph, method):
self.graph = graph
self.method = method
self.visited_nodes = set()
self.loop_follow = [None]
self.if_follow = [None]
self.switch_follow = [None]
self.latch_node = [None]
self.try_follow = [None]
self.next_case = None
self.need_break = True
self.constructor = False
self.context = []
# This class is created as a context manager so that it can be used like
# with self as foo:
# ...
# which pushes a statement block on to the context stack and assigns it to foo
# within the with block, all added instructions will be added to foo
def __enter__(self):
self.context.append(statement_block())
return self.context[-1]
def __exit__(self, *args):
self.context.pop()
return False
# Add a statement to the current context
def add(self, val): _append(self.context[-1], val)
def visit_ins(self, op):
self.add(visit_ins(op, isCtor=self.constructor))
# Note: this is a mutating operation
def get_ast(self):
m = self.method
flags = m.access
if 'constructor' in flags:
flags.remove('constructor')
self.constructor = True
params = m.lparams[:]
if 'static' not in m.access:
params = params[1:]
# DAD doesn't create any params for abstract methods
if len(params) != len(m.params_type):
assert('abstract' in flags or 'native' in flags)
assert(not params)
params = range(len(m.params_type))
paramdecls = []
for ptype, name in zip(m.params_type, params):
t = parse_descriptor(ptype)
v = local('p{}'.format(name))
paramdecls.append(var_decl(t, v))
if self.graph is None:
body = None
else:
with self as body:
self.visit_node(self.graph.entry)
return {
'triple': m.triple,
'flags': flags,
'ret': parse_descriptor(m.type),
'params': paramdecls,
'comments': [],
'body': body,
}
def _visit_condition(self, cond):
if cond.isnot:
cond.cond1.neg()
left = parenthesis(self.get_cond(cond.cond1))
right = parenthesis(self.get_cond(cond.cond2))
op = '&&' if cond.isand else '||'
res = binary_infix(op, left, right)
return res
def get_cond(self, node):
if isinstance(node, basic_blocks.ShortCircuitBlock):
return self._visit_condition(node.cond)
elif isinstance(node, basic_blocks.LoopBlock):
return self.get_cond(node.cond)
else:
assert(type(node) == basic_blocks.CondBlock)
assert(len(node.ins) == 1)
return visit_expr(node.ins[-1])
def visit_node(self, node):
if node in (self.if_follow[-1], self.switch_follow[-1],
self.loop_follow[-1], self.latch_node[-1],
self.try_follow[-1]):
return
if not node.type.is_return and node in self.visited_nodes:
return
self.visited_nodes.add(node)
for var in node.var_to_declare:
if not var.declared:
self.add(visit_decl(var))
var.declared = True
node.visit(self)
def visit_loop_node(self, loop):
isDo = cond_expr = body = None
follow = loop.follow['loop']
if loop.looptype.is_pretest:
if loop.true is follow:
loop.neg()
loop.true, loop.false = loop.false, loop.true
isDo = False
cond_expr = self.get_cond(loop)
elif loop.looptype.is_posttest:
isDo = True
self.latch_node.append(loop.latch)
elif loop.looptype.is_endless:
isDo = False
cond_expr = literal_bool(True)
with self as body:
self.loop_follow.append(follow)
if loop.looptype.is_pretest:
self.visit_node(loop.true)
else:
self.visit_node(loop.cond)
self.loop_follow.pop()
if loop.looptype.is_pretest:
pass
elif loop.looptype.is_posttest:
self.latch_node.pop()
cond_expr = self.get_cond(loop.latch)
else:
self.visit_node(loop.latch)
assert(cond_expr is not None and isDo is not None)
self.add(loop_stmt(isDo, cond_expr, body))
if follow is not None:
self.visit_node(follow)
def visit_cond_node(self, cond):
cond_expr = None
scopes = []
follow = cond.follow['if']
if cond.false is cond.true:
self.add(expression_stmt(self.get_cond(cond)))
self.visit_node(cond.true)
return
if cond.false is self.loop_follow[-1]:
cond.neg()
cond.true, cond.false = cond.false, cond.true
if self.loop_follow[-1] in (cond.true, cond.false):
cond_expr = self.get_cond(cond)
with self as scope:
self.add(jump_stmt('break'))
scopes.append(scope)
with self as scope:
self.visit_node(cond.false)
scopes.append(scope)
self.add(if_stmt(cond_expr, scopes))
elif follow is not None:
if cond.true in (follow, self.next_case) or\
cond.num > cond.true.num:
# or cond.true.num > cond.false.num:
cond.neg()
cond.true, cond.false = cond.false, cond.true
self.if_follow.append(follow)
if cond.true: # in self.visited_nodes:
cond_expr = self.get_cond(cond)
with self as scope:
self.visit_node(cond.true)
scopes.append(scope)
is_else = not (follow in (cond.true, cond.false))
if is_else and not cond.false in self.visited_nodes:
with self as scope:
self.visit_node(cond.false)
scopes.append(scope)
self.if_follow.pop()
self.add(if_stmt(cond_expr, scopes))
self.visit_node(follow)
else:
cond_expr = self.get_cond(cond)
with self as scope:
self.visit_node(cond.true)
scopes.append(scope)
with self as scope:
self.visit_node(cond.false)
scopes.append(scope)
self.add(if_stmt(cond_expr, scopes))
def visit_switch_node(self, switch):
lins = switch.get_ins()
for ins in lins[:-1]:
self.visit_ins(ins)
switch_ins = switch.get_ins()[-1]
cond_expr = visit_expr(switch_ins)
ksv_pairs = []
follow = switch.follow['switch']
cases = switch.cases
self.switch_follow.append(follow)
default = switch.default
for i, node in enumerate(cases):
if node in self.visited_nodes:
continue
cur_ks = switch.node_to_case[node][:]
if i + 1 < len(cases):
self.next_case = cases[i + 1]
else:
self.next_case = None
if node is default:
cur_ks.append(None)
default = None
with self as body:
self.visit_node(node)
if self.need_break:
self.add(jump_stmt('break'))
else:
self.need_break = True
ksv_pairs.append((cur_ks, body))
if default not in (None, follow):
with self as body:
self.visit_node(default)
ksv_pairs.append(([None], body))
self.add(switch_stmt(cond_expr, ksv_pairs))
self.switch_follow.pop()
self.visit_node(follow)
def visit_statement_node(self, stmt):
sucs = self.graph.sucs(stmt)
for ins in stmt.get_ins():
self.visit_ins(ins)
if len(sucs) == 1:
if sucs[0] is self.loop_follow[-1]:
self.add(jump_stmt('break'))
elif sucs[0] is self.next_case:
self.need_break = False
else:
self.visit_node(sucs[0])
def visit_try_node(self, try_node):
with self as tryb:
self.try_follow.append(try_node.follow)
self.visit_node(try_node.try_start)
pairs = []
for catch_node in try_node.catch:
if catch_node.exception_ins:
ins = catch_node.exception_ins
assert(isinstance(ins, instruction.MoveExceptionExpression))
var = ins.var_map[ins.ref]
var.declared = True
ctype = var.get_type()
name = 'v{}'.format(var.name)
else:
ctype = catch_node.catch_type
name = '_'
catch_decl = var_decl(parse_descriptor(ctype), local(name))
with self as body:
self.visit_node(catch_node.catch_start)
pairs.append((catch_decl, body))
self.add(try_stmt(tryb, pairs))
self.visit_node(self.try_follow.pop())
def visit_return_node(self, ret):
self.need_break = False
for ins in ret.get_ins():
self.visit_ins(ins)
def visit_throw_node(self, throw):
for ins in throw.get_ins():
self.visit_ins(ins) | gpl-3.0 |
deNULL/stagger | test/friendly.py | 16 | 15820 | #!/usr/bin/env python3
#
# friendly.py
# From the stagger project: http://code.google.com/p/stagger/
#
# Copyright (c) 2009-2011 Karoly Lorentey <karoly@lorentey.hu>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import unittest
import os.path
import warnings
import stagger
from stagger.id3 import *
class FriendlyTestCase(unittest.TestCase):
def testTitle22(self):
tag = stagger.Tag22()
tag[TT2] = "Foobar"
self.assertEqual(tag.title, "Foobar")
tag[TT2] = ("Foo", "Bar")
self.assertEqual(tag.title, "Foo / Bar")
tag.title = "Baz"
self.assertEqual(tag[TT2], TT2(text=["Baz"]))
self.assertEqual(tag.title, "Baz")
tag.title = "Quux / Xyzzy"
self.assertEqual(tag[TT2], TT2(text=["Quux", "Xyzzy"]))
self.assertEqual(tag.title, "Quux / Xyzzy")
def testTitle(self):
for tagcls in stagger.Tag23, stagger.Tag24:
tag = tagcls()
tag[TIT2] = "Foobar"
self.assertEqual(tag.title, "Foobar")
tag[TIT2] = ("Foo", "Bar")
self.assertEqual(tag.title, "Foo / Bar")
tag.title = "Baz"
self.assertEqual(tag[TIT2], TIT2(text=["Baz"]))
self.assertEqual(tag.title, "Baz")
tag.title = "Quux / Xyzzy"
self.assertEqual(tag[TIT2], TIT2(text=["Quux", "Xyzzy"]))
self.assertEqual(tag.title, "Quux / Xyzzy")
def testTextFrames(self):
for tagcls in stagger.Tag22, stagger.Tag23, stagger.Tag24:
tag = tagcls()
for attr, frame in (("title", TIT2),
("artist", TPE1),
("album_artist", TPE2),
("album", TALB),
("composer", TCOM),
("genre", TCON),
("grouping", TIT1),
("sort_title", TSOT),
("sort_artist", TSOP),
("sort_album_artist", TSO2),
("sort_album", TSOA),
("sort_composer", TSOC)):
if tagcls == stagger.Tag22:
frame = frame._v2_frame
# No frame -> empty string
self.assertEqual(getattr(tag, attr), "")
# Set by frameid, check via friendly name
tag[frame] = "Foobar"
self.assertEqual(getattr(tag, attr), "Foobar")
tag[frame] = ("Foo", "Bar")
self.assertEqual(getattr(tag, attr), "Foo / Bar")
# Set by friendly name, check via frame id
setattr(tag, attr, "Baz")
self.assertEqual(getattr(tag, attr), "Baz")
self.assertEqual(tag[frame], frame(text=["Baz"]))
setattr(tag, attr, "Quux / Xyzzy")
self.assertEqual(getattr(tag, attr), "Quux / Xyzzy")
self.assertEqual(tag[frame], frame(text=["Quux", "Xyzzy"]))
# Set to empty string, check frame is gone
setattr(tag, attr, "")
self.assertTrue(frame not in tag)
# Repeat, should not throw KeyError
setattr(tag, attr, "")
self.assertTrue(frame not in tag)
def testTrackFrames(self):
for tagcls in stagger.Tag22, stagger.Tag23, stagger.Tag24:
tag = tagcls()
for track, total, frame in (("track", "track_total", TRCK),
("disc", "disc_total", TPOS)):
if tagcls == stagger.Tag22:
frame = frame._v2_frame
# No frame -> zero values
self.assertEqual(getattr(tag, track), 0)
self.assertEqual(getattr(tag, total), 0)
# Set by frameid, check via friendly name
tag[frame] = "12"
self.assertEqual(getattr(tag, track), 12)
self.assertEqual(getattr(tag, total), 0)
tag[frame] = "12/24"
self.assertEqual(getattr(tag, track), 12)
self.assertEqual(getattr(tag, total), 24)
tag[frame] = "Foobar"
self.assertEqual(getattr(tag, track), 0)
self.assertEqual(getattr(tag, total), 0)
# Set by friendly name, check via frame id
setattr(tag, track, 7)
self.assertEqual(getattr(tag, track), 7)
self.assertEqual(getattr(tag, total), 0)
self.assertEqual(tag[frame], frame(text=["7"]))
setattr(tag, total, 21)
self.assertEqual(getattr(tag, track), 7)
self.assertEqual(getattr(tag, total), 21)
self.assertEqual(tag[frame], frame(text=["7/21"]))
# Set to 0/0, check frame is gone
setattr(tag, total, 0)
self.assertEqual(getattr(tag, track), 7)
self.assertEqual(getattr(tag, total), 0)
self.assertEqual(tag[frame], frame(text=["7"]))
setattr(tag, track, 0)
self.assertEqual(getattr(tag, track), 0)
self.assertEqual(getattr(tag, total), 0)
self.assertTrue(frame not in tag)
# Repeat, should not throw
setattr(tag, track, 0)
setattr(tag, total, 0)
self.assertTrue(frame not in tag)
# Set just the total
setattr(tag, total, 13)
self.assertEqual(tag[frame], frame(text=["0/13"]))
def testDate22_23(self):
for tagcls, yearframe, dateframe, timeframe in ((stagger.Tag22, TYE, TDA, TIM),
(stagger.Tag23, TYER, TDAT, TIME)):
tag = tagcls()
# Check empty
self.assertEqual(tag.date, "")
# Set to empty
tag.date = ""
self.assertEqual(tag.date, "")
# Set a year
tag.date = "2009"
self.assertEqual(tag.date, "2009")
tag.date = " 2009 "
self.assertEqual(tag.date, "2009")
self.assertEqual(tag[yearframe], yearframe("2009"))
self.assertTrue(dateframe not in tag)
self.assertTrue(timeframe not in tag)
# Partial date
tag.date = "2009-07"
self.assertEqual(tag.date, "2009")
self.assertEqual(tag[yearframe], yearframe("2009"))
self.assertTrue(dateframe not in tag)
self.assertTrue(timeframe not in tag)
# Full date
tag.date = "2009-07-12"
self.assertEqual(tag.date, "2009-07-12")
self.assertEqual(tag[yearframe], yearframe("2009"))
self.assertEqual(tag[dateframe], dateframe("0712"))
self.assertTrue(timeframe not in tag)
# Date + time
tag.date = "2009-07-12 18:01"
self.assertEqual(tag.date, "2009-07-12 18:01")
self.assertEqual(tag[yearframe], yearframe("2009"))
self.assertEqual(tag[dateframe], dateframe("0712"))
self.assertEqual(tag[timeframe], timeframe("1801"))
tag.date = "2009-07-12 18:01:23"
self.assertEqual(tag.date, "2009-07-12 18:01")
self.assertEqual(tag[yearframe], yearframe("2009"))
self.assertEqual(tag[dateframe], dateframe("0712"))
self.assertEqual(tag[timeframe], timeframe("1801"))
tag.date = "2009-07-12T18:01:23"
self.assertEqual(tag.date, "2009-07-12 18:01")
self.assertEqual(tag[yearframe], yearframe("2009"))
self.assertEqual(tag[dateframe], dateframe("0712"))
self.assertEqual(tag[timeframe], timeframe("1801"))
# Truncate to year only
tag.date = "2009"
self.assertEqual(tag[yearframe], yearframe("2009"))
self.assertTrue(dateframe not in tag)
self.assertTrue(timeframe not in tag)
def testDate24(self):
tag = stagger.Tag24()
# Check empty
self.assertEqual(tag.date, "")
# Set to empty
tag.date = ""
self.assertEqual(tag.date, "")
# Set a year
tag.date = "2009"
self.assertEqual(tag.date, "2009")
self.assertEqual(tag[TDRC], TDRC(tag.date))
tag.date = " 2009 "
self.assertEqual(tag.date, "2009")
self.assertEqual(tag[TDRC], TDRC(tag.date))
tag.date = "2009-07"
self.assertEqual(tag.date, "2009-07")
self.assertEqual(tag[TDRC], TDRC(tag.date))
tag.date = "2009-07-12"
self.assertEqual(tag.date, "2009-07-12")
self.assertEqual(tag[TDRC], TDRC(tag.date))
tag.date = "2009-07-12 18:01"
self.assertEqual(tag.date, "2009-07-12 18:01")
self.assertEqual(tag[TDRC], TDRC(tag.date))
tag.date = "2009-07-12 18:01:23"
self.assertEqual(tag.date, "2009-07-12 18:01:23")
self.assertEqual(tag[TDRC], TDRC(tag.date))
tag.date = "2009-07-12T18:01:23"
self.assertEqual(tag.date, "2009-07-12 18:01:23")
self.assertEqual(tag[TDRC], TDRC(tag.date))
def testPicture22(self):
tag = stagger.Tag22()
# Check empty
self.assertEqual(tag.picture, "")
# Set to empty
tag.picture = ""
self.assertEqual(tag.picture, "")
self.assertTrue(PIC not in tag)
tag.picture = os.path.join(os.path.dirname(__file__), "samples", "cover.jpg")
self.assertEqual(tag[PIC][0].type, 0)
self.assertEqual(tag[PIC][0].desc, "")
self.assertEqual(tag[PIC][0].format, "JPG")
self.assertEqual(len(tag[PIC][0].data), 60511)
self.assertEqual(tag.picture, "Other(0)::<60511 bytes of jpeg data>")
# Set to empty
tag.picture = ""
self.assertEqual(tag.picture, "")
self.assertTrue(PIC not in tag)
def testPicture23_24(self):
for tagcls in stagger.Tag23, stagger.Tag24:
tag = tagcls()
# Check empty
self.assertEqual(tag.picture, "")
# Set to empty
tag.picture = ""
self.assertEqual(tag.picture, "")
self.assertTrue(APIC not in tag)
# Set picture.
tag.picture = os.path.join(os.path.dirname(__file__), "samples", "cover.jpg")
self.assertEqual(tag[APIC][0].type, 0)
self.assertEqual(tag[APIC][0].desc, "")
self.assertEqual(tag[APIC][0].mime, "image/jpeg")
self.assertEqual(len(tag[APIC][0].data), 60511)
self.assertEqual(tag.picture, "Other(0)::<60511 bytes of jpeg data>")
# Set to empty
tag.picture = ""
self.assertEqual(tag.picture, "")
self.assertTrue(APIC not in tag)
def testComment(self):
for tagcls, frameid in ((stagger.Tag22, COM),
(stagger.Tag23, COMM),
(stagger.Tag24, COMM)):
tag = tagcls()
# Comment should be the empty string in an empty tag.
self.assertEqual(tag.comment, "")
# Try to delete non-existent comment.
tag.comment = ""
self.assertEqual(tag.comment, "")
self.assertTrue(frameid not in tag)
# Set comment.
tag.comment = "Foobar"
self.assertEqual(tag.comment, "Foobar")
self.assertTrue(frameid in tag)
self.assertEqual(len(tag[frameid]), 1)
self.assertEqual(tag[frameid][0].lang, "eng")
self.assertEqual(tag[frameid][0].desc, "")
self.assertEqual(tag[frameid][0].text, "Foobar")
# Override comment.
tag.comment = "Baz"
self.assertEqual(tag.comment, "Baz")
self.assertTrue(frameid in tag)
self.assertEqual(len(tag[frameid]), 1)
self.assertEqual(tag[frameid][0].lang, "eng")
self.assertEqual(tag[frameid][0].desc, "")
self.assertEqual(tag[frameid][0].text, "Baz")
# Delete comment.
tag.comment = ""
self.assertEqual(tag.comment, "")
self.assertTrue(frameid not in tag)
def testCommentWithExtraFrame(self):
"Test getting/setting the comment when other comments are present."
for tagcls, frameid in ((stagger.Tag22, COM),
(stagger.Tag23, COMM),
(stagger.Tag24, COMM)):
tag = tagcls()
frame = frameid(lan="eng", desc="foo", text="This is a text")
tag[frameid] = [frame]
# Comment should be the empty string.
self.assertEqual(tag.comment, "")
# Try to delete non-existent comment.
tag.comment = ""
self.assertEqual(tag.comment, "")
self.assertEqual(len(tag[frameid]), 1)
# Set comment.
tag.comment = "Foobar"
self.assertEqual(tag.comment, "Foobar")
self.assertEqual(len(tag[frameid]), 2)
self.assertEqual(tag[frameid][0], frame)
self.assertEqual(tag[frameid][1].lang, "eng")
self.assertEqual(tag[frameid][1].desc, "")
self.assertEqual(tag[frameid][1].text, "Foobar")
# Override comment.
tag.comment = "Baz"
self.assertEqual(tag.comment, "Baz")
self.assertEqual(len(tag[frameid]), 2)
self.assertEqual(tag[frameid][0], frame)
self.assertEqual(tag[frameid][1].lang, "eng")
self.assertEqual(tag[frameid][1].desc, "")
self.assertEqual(tag[frameid][1].text, "Baz")
# Delete comment.
tag.comment = ""
self.assertEqual(tag.comment, "")
self.assertEqual(len(tag[frameid]), 1)
self.assertEqual(tag[frameid][0], frame)
suite = unittest.TestLoader().loadTestsFromTestCase(FriendlyTestCase)
if __name__ == "__main__":
warnings.simplefilter("always", stagger.Warning)
unittest.main(defaultTest="suite")
| bsd-2-clause |
Nelca/buildMLSystem | ch04/blei_lda.py | 3 | 1602 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
from __future__ import print_function
from gensim import corpora, models, similarities
from mpltools import style
import matplotlib.pyplot as plt
import numpy as np
from os import path
style.use('ggplot')
if not path.exists('./data/ap/ap.dat'):
print('Error: Expected data to be present at data/ap/')
corpus = corpora.BleiCorpus('./data/ap/ap.dat', './data/ap/vocab.txt')
model = models.ldamodel.LdaModel(
corpus, num_topics=100, id2word=corpus.id2word, alpha=None)
for ti in xrange(84):
words = model.show_topic(ti, 64)
tf = sum(f for f, w in words)
print('\n'.join('{}:{}'.format(w, int(1000. * f / tf)) for f, w in words))
print()
print()
print()
thetas = [model[c] for c in corpus]
plt.hist([len(t) for t in thetas], np.arange(42))
plt.ylabel('Nr of documents')
plt.xlabel('Nr of topics')
plt.savefig('../1400OS_04_01+.png')
model1 = models.ldamodel.LdaModel(
corpus, num_topics=100, id2word=corpus.id2word, alpha=1.)
thetas1 = [model1[c] for c in corpus]
#model8 = models.ldamodel.LdaModel(corpus, num_topics=100, id2word=corpus.id2word, alpha=1.e-8)
#thetas8 = [model8[c] for c in corpus]
plt.clf()
plt.hist([[len(t) for t in thetas], [len(t) for t in thetas1]], np.arange(42))
plt.ylabel('Nr of documents')
plt.xlabel('Nr of topics')
plt.text(9, 223, r'default alpha')
plt.text(26, 156, 'alpha=1.0')
plt.savefig('../1400OS_04_02+.png')
| mit |
hafizrahmadi/tingroom | assets/ionicons-2.0.1/builder/generate.py | 357 | 9438 | from subprocess import call
import os
import json
BUILDER_PATH = os.path.dirname(os.path.abspath(__file__))
ROOT_PATH = os.path.join(BUILDER_PATH, '..')
FONTS_FOLDER_PATH = os.path.join(ROOT_PATH, 'fonts')
CSS_FOLDER_PATH = os.path.join(ROOT_PATH, 'css')
SCSS_FOLDER_PATH = os.path.join(ROOT_PATH, 'scss')
LESS_FOLDER_PATH = os.path.join(ROOT_PATH, 'less')
def main():
generate_font_files()
data = get_build_data()
rename_svg_glyph_names(data)
generate_scss(data)
generate_less(data)
generate_cheatsheet(data)
generate_component_json(data)
generate_composer_json(data)
generate_bower_json(data)
def generate_font_files():
print "Generate Fonts"
cmd = "fontforge -script %s/scripts/generate_font.py" % (BUILDER_PATH)
call(cmd, shell=True)
def rename_svg_glyph_names(data):
# hacky and slow (but safe) way to rename glyph-name attributes
svg_path = os.path.join(FONTS_FOLDER_PATH, 'ionicons.svg')
svg_file = open(svg_path, 'r+')
svg_text = svg_file.read()
svg_file.seek(0)
for ionicon in data['icons']:
# uniF2CA
org_name = 'uni%s' % (ionicon['code'].replace('0x', '').upper())
ion_name = 'ion-%s' % (ionicon['name'])
svg_text = svg_text.replace(org_name, ion_name)
svg_file.write(svg_text)
svg_file.close()
def generate_less(data):
print "Generate LESS"
font_name = data['name']
font_version = data['version']
css_prefix = data['prefix']
variables_file_path = os.path.join(LESS_FOLDER_PATH, '_ionicons-variables.less')
icons_file_path = os.path.join(LESS_FOLDER_PATH, '_ionicons-icons.less')
d = []
d.append('/*!');
d.append('Ionicons, v%s' % (font_version) );
d.append('Created by Ben Sperry for the Ionic Framework, http://ionicons.com/');
d.append('https://twitter.com/benjsperry https://twitter.com/ionicframework');
d.append('MIT License: https://github.com/driftyco/ionicons');
d.append('*/');
d.append('// Ionicons Variables')
d.append('// --------------------------\n')
d.append('@ionicons-font-path: "../fonts";')
d.append('@ionicons-font-family: "%s";' % (font_name) )
d.append('@ionicons-version: "%s";' % (font_version) )
d.append('@ionicons-prefix: %s;' % (css_prefix) )
d.append('')
for ionicon in data['icons']:
chr_code = ionicon['code'].replace('0x', '\\')
d.append('@ionicon-var-%s: "%s";' % (ionicon['name'], chr_code) )
f = open(variables_file_path, 'w')
f.write( '\n'.join(d) )
f.close()
d = []
d.append('// Ionicons Icons')
d.append('// --------------------------\n')
group = [ '.%s' % (data['name'].lower()) ]
for ionicon in data['icons']:
group.append('.@{ionicons-prefix}%s:before' % (ionicon['name']) )
d.append( ',\n'.join(group) )
d.append('{')
d.append(' &:extend(.ion);')
d.append('}')
for ionicon in data['icons']:
chr_code = ionicon['code'].replace('0x', '\\')
d.append('.@{ionicons-prefix}%s:before { content: @ionicon-var-%s; }' % (ionicon['name'], ionicon['name']) )
f = open(icons_file_path, 'w')
f.write( '\n'.join(d) )
f.close()
def generate_scss(data):
print "Generate SCSS"
font_name = data['name']
font_version = data['version']
css_prefix = data['prefix']
variables_file_path = os.path.join(SCSS_FOLDER_PATH, '_ionicons-variables.scss')
icons_file_path = os.path.join(SCSS_FOLDER_PATH, '_ionicons-icons.scss')
d = []
d.append('// Ionicons Variables')
d.append('// --------------------------\n')
d.append('$ionicons-font-path: "../fonts" !default;')
d.append('$ionicons-font-family: "%s" !default;' % (font_name) )
d.append('$ionicons-version: "%s" !default;' % (font_version) )
d.append('$ionicons-prefix: %s !default;' % (css_prefix) )
d.append('')
for ionicon in data['icons']:
chr_code = ionicon['code'].replace('0x', '\\')
d.append('$ionicon-var-%s: "%s";' % (ionicon['name'], chr_code) )
f = open(variables_file_path, 'w')
f.write( '\n'.join(d) )
f.close()
d = []
d.append('// Ionicons Icons')
d.append('// --------------------------\n')
group = [ '.%s' % (data['name'].lower()) ]
for ionicon in data['icons']:
group.append('.#{$ionicons-prefix}%s:before' % (ionicon['name']) )
d.append( ',\n'.join(group) )
d.append('{')
d.append(' @extend .ion;')
d.append('}')
for ionicon in data['icons']:
chr_code = ionicon['code'].replace('0x', '\\')
d.append('.#{$ionicons-prefix}%s:before { content: $ionicon-var-%s; }' % (ionicon['name'], ionicon['name']) )
f = open(icons_file_path, 'w')
f.write( '\n'.join(d) )
f.close()
generate_css_from_scss(data)
def generate_css_from_scss(data):
print "Generate CSS From SCSS"
scss_file_path = os.path.join(SCSS_FOLDER_PATH, 'ionicons.scss')
css_file_path = os.path.join(CSS_FOLDER_PATH, 'ionicons.css')
css_min_file_path = os.path.join(CSS_FOLDER_PATH, 'ionicons.min.css')
cmd = "sass %s %s --style compact" % (scss_file_path, css_file_path)
call(cmd, shell=True)
print "Generate Minified CSS From SCSS"
cmd = "sass %s %s --style compressed" % (scss_file_path, css_min_file_path)
call(cmd, shell=True)
def generate_cheatsheet(data):
print "Generate Cheatsheet"
cheatsheet_file_path = os.path.join(ROOT_PATH, 'cheatsheet.html')
template_path = os.path.join(BUILDER_PATH, 'cheatsheet', 'template.html')
icon_row_path = os.path.join(BUILDER_PATH, 'cheatsheet', 'icon-row.html')
f = open(template_path, 'r')
template_html = f.read()
f.close()
f = open(icon_row_path, 'r')
icon_row_template = f.read()
f.close()
content = []
for ionicon in data['icons']:
css_code = ionicon['code'].replace('0x', '\\')
escaped_html_code = ionicon['code'].replace('0x', '&#x') + ';'
html_code = ionicon['code'].replace('0x', '&#x') + ';'
item_row = icon_row_template
item_row = item_row.replace('{{name}}', ionicon['name'])
item_row = item_row.replace('{{prefix}}', data['prefix'])
item_row = item_row.replace('{{css_code}}', css_code)
item_row = item_row.replace('{{escaped_html_code}}', escaped_html_code)
item_row = item_row.replace('{{html_code}}', html_code)
content.append(item_row)
template_html = template_html.replace("{{font_name}}", data["name"])
template_html = template_html.replace("{{font_version}}", data["version"])
template_html = template_html.replace("{{icon_count}}", str(len(data["icons"])) )
template_html = template_html.replace("{{content}}", '\n'.join(content) )
f = open(cheatsheet_file_path, 'w')
f.write(template_html)
f.close()
def generate_component_json(data):
print "Generate component.json"
d = {
"name": data['name'],
"repo": "driftyco/ionicons",
"description": "The premium icon font for Ionic Framework.",
"version": data['version'],
"keywords": [],
"dependencies": {},
"development": {},
"license": "MIT",
"styles": [
"css/%s.css" % (data['name'].lower())
],
"fonts": [
"fonts/%s.eot" % (data['name'].lower()),
"fonts/%s.svg" % (data['name'].lower()),
"fonts/%s.ttf" % (data['name'].lower()),
"fonts/%s.woff" % (data['name'].lower())
]
}
txt = json.dumps(d, indent=4, separators=(',', ': '))
component_file_path = os.path.join(ROOT_PATH, 'component.json')
f = open(component_file_path, 'w')
f.write(txt)
f.close()
def generate_composer_json(data):
print "Generate composer.json"
d = {
"name": "driftyco/ionicons",
"description": "The premium icon font for Ionic Framework.",
"keywords": [ "fonts", "icon font", "icons", "ionic", "web font"],
"homepage": "http://ionicons.com/",
"authors": [
{
"name": "Ben Sperry",
"email": "ben@drifty.com",
"role": "Designer",
"homepage": "https://twitter.com/benjsperry"
},
{
"name": "Adam Bradley",
"email": "adam@drifty.com",
"role": "Developer",
"homepage": "https://twitter.com/adamdbradley"
},
{
"name": "Max Lynch",
"email": "max@drifty.com",
"role": "Developer",
"homepage": "https://twitter.com/maxlynch"
}
],
"extra": {},
"license": [ "MIT" ]
}
txt = json.dumps(d, indent=4, separators=(',', ': '))
composer_file_path = os.path.join(ROOT_PATH, 'composer.json')
f = open(composer_file_path, 'w')
f.write(txt)
f.close()
def generate_bower_json(data):
print "Generate bower.json"
d = {
"name": data['name'],
"version": data['version'],
"homepage": "https://github.com/driftyco/ionicons",
"authors": [
"Ben Sperry <ben@drifty.com>",
"Adam Bradley <adam@drifty.com>",
"Max Lynch <max@drifty.com>"
],
"description": "Ionicons - free and beautiful icons from the creators of Ionic Framework",
"main": [
"css/%s.css" % (data['name'].lower()),
"fonts/*"
],
"keywords": [ "fonts", "icon font", "icons", "ionic", "web font"],
"license": "MIT",
"ignore": [
"**/.*",
"builder",
"node_modules",
"bower_components",
"test",
"tests"
]
}
txt = json.dumps(d, indent=4, separators=(',', ': '))
bower_file_path = os.path.join(ROOT_PATH, 'bower.json')
f = open(bower_file_path, 'w')
f.write(txt)
f.close()
def get_build_data():
build_data_path = os.path.join(BUILDER_PATH, 'build_data.json')
f = open(build_data_path, 'r')
data = json.loads(f.read())
f.close()
return data
if __name__ == "__main__":
main()
| mit |
wbyne/QGIS | python/ext-libs/pygments/lexers/textedit.py | 47 | 6057 | # -*- coding: utf-8 -*-
"""
pygments.lexers.textedit
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for languages related to text processing.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from bisect import bisect
from pygments.lexer import RegexLexer, include, default, bygroups, using, this
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
from pygments.lexers.python import PythonLexer
__all__ = ['AwkLexer', 'VimLexer']
class AwkLexer(RegexLexer):
"""
For Awk scripts.
.. versionadded:: 1.5
"""
name = 'Awk'
aliases = ['awk', 'gawk', 'mawk', 'nawk']
filenames = ['*.awk']
mimetypes = ['application/x-awk']
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'#.*$', Comment.Single)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'\B', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
default('#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'^(?=\s|/)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|\|\||&&|in\b|\$|!?~|'
r'(\*\*|[-<>+*%\^/!=|])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(break|continue|do|while|exit|for|if|else|'
r'return)\b', Keyword, 'slashstartsregex'),
(r'function\b', Keyword.Declaration, 'slashstartsregex'),
(r'(atan2|cos|exp|int|log|rand|sin|sqrt|srand|gensub|gsub|index|'
r'length|match|split|sprintf|sub|substr|tolower|toupper|close|'
r'fflush|getline|next|nextfile|print|printf|strftime|systime|'
r'delete|system)\b', Keyword.Reserved),
(r'(ARGC|ARGIND|ARGV|BEGIN|CONVFMT|ENVIRON|END|ERRNO|FIELDWIDTHS|'
r'FILENAME|FNR|FS|IGNORECASE|NF|NR|OFMT|OFS|ORFS|RLENGTH|RS|'
r'RSTART|RT|SUBSEP)\b', Name.Builtin),
(r'[$a-zA-Z_]\w*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class VimLexer(RegexLexer):
"""
Lexer for VimL script files.
.. versionadded:: 0.8
"""
name = 'VimL'
aliases = ['vim']
filenames = ['*.vim', '.vimrc', '.exrc', '.gvimrc',
'_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc']
mimetypes = ['text/x-vim']
flags = re.MULTILINE
_python = r'py(?:t(?:h(?:o(?:n)?)?)?)?'
tokens = {
'root': [
(r'^([ \t:]*)(' + _python + r')([ \t]*)(<<)([ \t]*)(.*)((?:\n|.)*)(\6)',
bygroups(using(this), Keyword, Text, Operator, Text, Text,
using(PythonLexer), Text)),
(r'^([ \t:]*)(' + _python + r')([ \t])(.*)',
bygroups(using(this), Keyword, Text, using(PythonLexer))),
(r'^\s*".*', Comment),
(r'[ \t]+', Text),
# TODO: regexes can have other delims
(r'/(\\\\|\\/|[^\n/])*/', String.Regex),
(r'"(\\\\|\\"|[^\n"])*"', String.Double),
(r"'(''|[^\n'])*'", String.Single),
# Who decided that doublequote was a good comment character??
(r'(?<=\s)"[^\-:.%#=*].*', Comment),
(r'-?\d+', Number),
(r'#[0-9a-f]{6}', Number.Hex),
(r'^:', Punctuation),
(r'[()<>+=!|,~-]', Punctuation), # Inexact list. Looks decent.
(r'\b(let|if|else|endif|elseif|fun|function|endfunction)\b',
Keyword),
(r'\b(NONE|bold|italic|underline|dark|light)\b', Name.Builtin),
(r'\b\w+\b', Name.Other), # These are postprocessed below
(r'.', Text),
],
}
def __init__(self, **options):
from pygments.lexers._vim_builtins import command, option, auto
self._cmd = command
self._opt = option
self._aut = auto
RegexLexer.__init__(self, **options)
def is_in(self, w, mapping):
r"""
It's kind of difficult to decide if something might be a keyword
in VimL because it allows you to abbreviate them. In fact,
'ab[breviate]' is a good example. :ab, :abbre, or :abbreviate are
valid ways to call it so rather than making really awful regexps
like::
\bab(?:b(?:r(?:e(?:v(?:i(?:a(?:t(?:e)?)?)?)?)?)?)?)?\b
we match `\b\w+\b` and then call is_in() on those tokens. See
`scripts/get_vimkw.py` for how the lists are extracted.
"""
p = bisect(mapping, (w,))
if p > 0:
if mapping[p-1][0] == w[:len(mapping[p-1][0])] and \
mapping[p-1][1][:len(w)] == w:
return True
if p < len(mapping):
return mapping[p][0] == w[:len(mapping[p][0])] and \
mapping[p][1][:len(w)] == w
return False
def get_tokens_unprocessed(self, text):
# TODO: builtins are only subsequent tokens on lines
# and 'keywords' only happen at the beginning except
# for :au ones
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name.Other:
if self.is_in(value, self._cmd):
yield index, Keyword, value
elif self.is_in(value, self._opt) or \
self.is_in(value, self._aut):
yield index, Name.Builtin, value
else:
yield index, Text, value
else:
yield index, token, value
| gpl-2.0 |
CYBAI/servo | tests/wpt/web-platform-tests/tools/third_party/pluggy/src/pluggy/_tracing.py | 14 | 1561 | """
Tracing utils
"""
class TagTracer(object):
def __init__(self):
self._tags2proc = {}
self._writer = None
self.indent = 0
def get(self, name):
return TagTracerSub(self, (name,))
def _format_message(self, tags, args):
if isinstance(args[-1], dict):
extra = args[-1]
args = args[:-1]
else:
extra = {}
content = " ".join(map(str, args))
indent = " " * self.indent
lines = ["%s%s [%s]\n" % (indent, content, ":".join(tags))]
for name, value in extra.items():
lines.append("%s %s: %s\n" % (indent, name, value))
return "".join(lines)
def _processmessage(self, tags, args):
if self._writer is not None and args:
self._writer(self._format_message(tags, args))
try:
processor = self._tags2proc[tags]
except KeyError:
pass
else:
processor(tags, args)
def setwriter(self, writer):
self._writer = writer
def setprocessor(self, tags, processor):
if isinstance(tags, str):
tags = tuple(tags.split(":"))
else:
assert isinstance(tags, tuple)
self._tags2proc[tags] = processor
class TagTracerSub(object):
def __init__(self, root, tags):
self.root = root
self.tags = tags
def __call__(self, *args):
self.root._processmessage(self.tags, args)
def get(self, name):
return self.__class__(self.root, self.tags + (name,))
| mpl-2.0 |
dracos/django | django/db/backends/base/validation.py | 114 | 1040 | class BaseDatabaseValidation:
"""Encapsulate backend-specific validation."""
def __init__(self, connection):
self.connection = connection
def check(self, **kwargs):
return []
def check_field(self, field, **kwargs):
errors = []
# Backends may implement a check_field_type() method.
if (hasattr(self, 'check_field_type') and
# Ignore any related fields.
not getattr(field, 'remote_field', None)):
# Ignore fields with unsupported features.
db_supports_all_required_features = all(
getattr(self.connection.features, feature, False)
for feature in field.model._meta.required_db_features
)
if db_supports_all_required_features:
field_type = field.db_type(self.connection)
# Ignore non-concrete fields.
if field_type is not None:
errors.extend(self.check_field_type(field, field_type))
return errors
| bsd-3-clause |
glebb/Qt-Kata-Skeleton | 3rdparty/gtest-1.6.0/test/gtest_test_utils.py | 397 | 10437 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary. Please make sure to provide path\n'
'to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.')
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest:
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO(vladl@google.com): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
| bsd-2-clause |
kater169/libcloud | libcloud/test/compute/test_ikoula.py | 64 | 1139 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from libcloud.compute.drivers.ikoula import IkoulaNodeDriver
from libcloud.test.compute.test_cloudstack import CloudStackCommonTestCase
from libcloud.test import unittest
class IkoulaNodeDriverTestCase(CloudStackCommonTestCase, unittest.TestCase):
driver_klass = IkoulaNodeDriver
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 |
jayceyxc/hue | desktop/core/ext-py/Paste-2.0.1/tests/test_util/test_datetimeutil.py | 47 | 6026 | # (c) 2005 Clark C. Evans and contributors
# This module is part of the Python Paste Project and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
# Some of this code was funded by: http://prometheusresearch.com
from time import localtime
from datetime import date
from paste.util.datetimeutil import *
def test_timedelta():
assert('' == normalize_timedelta(""))
assert('0.10' == normalize_timedelta("6m"))
assert('0.50' == normalize_timedelta("30m"))
assert('0.75' == normalize_timedelta("45m"))
assert('1.00' == normalize_timedelta("60 min"))
assert('1.50' == normalize_timedelta("90min"))
assert('1.50' == normalize_timedelta("1.50"))
assert('4.50' == normalize_timedelta("4 : 30"))
assert('1.50' == normalize_timedelta("1h 30m"))
assert('1.00' == normalize_timedelta("1"))
assert('1.00' == normalize_timedelta("1 hour"))
assert('8.00' == normalize_timedelta("480 mins"))
assert('8.00' == normalize_timedelta("8h"))
assert('0.50' == normalize_timedelta("0.5"))
assert('0.10' == normalize_timedelta(".1"))
assert('0.50' == normalize_timedelta(".50"))
assert('0.75' == normalize_timedelta("0.75"))
def test_time():
assert('03:00 PM' == normalize_time("3p", ampm=True))
assert('03:00 AM' == normalize_time("300", ampm=True))
assert('03:22 AM' == normalize_time("322", ampm=True))
assert('01:22 PM' == normalize_time("1322", ampm=True))
assert('01:00 PM' == normalize_time("13", ampm=True))
assert('12:00 PM' == normalize_time("noon", ampm=True))
assert("06:00 PM" == normalize_time("6", ampm=True))
assert("01:00 PM" == normalize_time("1", ampm=True))
assert("07:00 AM" == normalize_time("7", ampm=True))
assert("01:00 PM" == normalize_time("1 pm", ampm=True))
assert("03:30 PM" == normalize_time("3:30 pm", ampm=True))
assert("03:30 PM" == normalize_time("3 30 pm", ampm=True))
assert("03:30 PM" == normalize_time("3 30 P.M.", ampm=True))
assert("12:00 PM" == normalize_time("0", ampm=True))
assert("12:00 AM" == normalize_time("1200 AM", ampm=True))
def test_date():
tm = localtime()
yr = tm[0]
mo = tm[1]
assert(date(yr,4,11) == parse_date("411"))
assert(date(yr,4,11) == parse_date("APR11"))
assert(date(yr,4,11) == parse_date("11APR"))
assert(date(yr,4,11) == parse_date("4 11"))
assert(date(yr,4,11) == parse_date("11 APR"))
assert(date(yr,4,11) == parse_date("APR 11"))
assert(date(yr,mo,11) == parse_date("11"))
assert(date(yr,4,1) == parse_date("APR"))
assert(date(yr,4,11) == parse_date("4/11"))
assert(date.today() == parse_date("today"))
assert(date.today() == parse_date("now"))
assert(None == parse_date(""))
assert('' == normalize_date(None))
assert('2001-02-03' == normalize_date("20010203"))
assert('1999-04-11' == normalize_date("1999 4 11"))
assert('1999-04-11' == normalize_date("1999 APR 11"))
assert('1999-04-11' == normalize_date("APR 11 1999"))
assert('1999-04-11' == normalize_date("11 APR 1999"))
assert('1999-04-11' == normalize_date("4 11 1999"))
assert('1999-04-01' == normalize_date("1999 APR"))
assert('1999-04-01' == normalize_date("1999 4"))
assert('1999-04-01' == normalize_date("4 1999"))
assert('1999-04-01' == normalize_date("APR 1999"))
assert('1999-01-01' == normalize_date("1999"))
assert('1999-04-01' == normalize_date("1APR1999"))
assert('2001-04-01' == normalize_date("1APR2001"))
assert('1999-04-18' == normalize_date("1999-04-11+7"))
assert('1999-04-18' == normalize_date("1999-04-11 7"))
assert('1999-04-01' == normalize_date("1 apr 1999"))
assert('1999-04-11' == normalize_date("11 apr 1999"))
assert('1999-04-11' == normalize_date("11 Apr 1999"))
assert('1999-04-11' == normalize_date("11-apr-1999"))
assert('1999-04-11' == normalize_date("11 April 1999"))
assert('1999-04-11' == normalize_date("11 APRIL 1999"))
assert('1999-04-11' == normalize_date("11 april 1999"))
assert('1999-04-11' == normalize_date("11 aprick 1999"))
assert('1999-04-11' == normalize_date("APR 11, 1999"))
assert('1999-04-11' == normalize_date("4/11/1999"))
assert('1999-04-11' == normalize_date("4-11-1999"))
assert('1999-04-11' == normalize_date("1999-4-11"))
assert('1999-04-11' == normalize_date("19990411"))
assert('1999-01-01' == normalize_date("1 Jan 1999"))
assert('1999-02-01' == normalize_date("1 Feb 1999"))
assert('1999-03-01' == normalize_date("1 Mar 1999"))
assert('1999-04-01' == normalize_date("1 Apr 1999"))
assert('1999-05-01' == normalize_date("1 May 1999"))
assert('1999-06-01' == normalize_date("1 Jun 1999"))
assert('1999-07-01' == normalize_date("1 Jul 1999"))
assert('1999-08-01' == normalize_date("1 Aug 1999"))
assert('1999-09-01' == normalize_date("1 Sep 1999"))
assert('1999-10-01' == normalize_date("1 Oct 1999"))
assert('1999-11-01' == normalize_date("1 Nov 1999"))
assert('1999-12-01' == normalize_date("1 Dec 1999"))
assert('1999-04-30' == normalize_date("1999-4-30"))
assert('2000-02-29' == normalize_date("29 FEB 2000"))
assert('2001-02-28' == normalize_date("28 FEB 2001"))
assert('2004-02-29' == normalize_date("29 FEB 2004"))
assert('2100-02-28' == normalize_date("28 FEB 2100"))
assert('1900-02-28' == normalize_date("28 FEB 1900"))
def assertError(val):
try:
normalize_date(val)
except (TypeError,ValueError):
return
raise ValueError("type error expected", val)
assertError("2000-13-11")
assertError("APR 99")
assertError("29 FEB 1900")
assertError("29 FEB 2100")
assertError("29 FEB 2001")
assertError("1999-4-31")
assertError("APR 99")
assertError("20301")
assertError("020301")
assertError("1APR99")
assertError("1APR01")
assertError("1 APR 99")
assertError("1 APR 01")
assertError("11/5/01")
| apache-2.0 |
amboutin/GCP | appengine/standard/multitenancy/taskqueue.py | 9 | 2859 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sample App Engine application demonstrating how to use the Namespace Manager
API with Memcache.
For more information, see README.md.
"""
# [START all]
from google.appengine.api import namespace_manager
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
import webapp2
class Counter(ndb.Model):
count = ndb.IntegerProperty()
@ndb.transactional
def update_counter(name):
"""Increment the named counter by 1."""
counter = Counter.get_by_id(name)
if counter is None:
counter = Counter(id=name, count=0)
counter.count += 1
counter.put()
return counter.count
def get_count(name):
counter = Counter.get_by_id(name)
if not counter:
return 0
return counter.count
class DeferredCounterHandler(webapp2.RequestHandler):
def post(self):
name = self.request.get('counter_name')
update_counter(name)
class TaskQueueCounterHandler(webapp2.RequestHandler):
"""Queues two tasks to increment a counter in global namespace as well as
the namespace is specified by the request, which is arbitrarily named
'default' if not specified."""
def get(self, namespace='default'):
# Queue task to update global counter.
current_global_count = get_count('counter')
taskqueue.add(
url='/tasks/counter',
params={'counter_name': 'counter'})
# Queue task to update counter in specified namespace.
previous_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(namespace)
current_namespace_count = get_count('counter')
taskqueue.add(
url='/tasks/counter',
params={'counter_name': 'counter'})
finally:
namespace_manager.set_namespace(previous_namespace)
self.response.write(
'Counters will be updated asyncronously.'
'Current values: Global: {}, Namespace {}: {}'.format(
current_global_count, namespace, current_namespace_count))
app = webapp2.WSGIApplication([
(r'/tasks/counter', DeferredCounterHandler),
(r'/taskqueue', TaskQueueCounterHandler),
(r'/taskqueue/(.*)', TaskQueueCounterHandler)
], debug=True)
| apache-2.0 |
stemchan/smartclip | main/auth.py | 1 | 2328 | import os
from cStringIO import StringIO
from django.conf import settings
from django.template.loader import get_template
from django.template import Context
from wkhtmltopdf.utils import wkhtmltopdf
from django.contrib.auth import login
from django.contrib.auth.models import User
import smartfile
from smartclip import secrets
from smartclip import backends
from main.models import *
def generate_api(request):
# "ACCESS_TOKEN" is inserted into the session in Smartfile.authenticate.
token, secret = request.session.get('ACCESS_TOKEN', (None, None))
return backends.Smartfile(client_token=secrets.OAUTH_TOKEN,
client_secret=secrets.OAUTH_SECRET,
access_token=token,
access_secret=secret)
def create_smartfile_docs(request, clip_id):
clip = Clipping.objects.get(id=clip_id)
base_path = settings.MEDIA_URL + clip.filename
api = generate_api(request)
create_smartfile_dirs(api)
api.client.post('/path/data/smartclip/html',
file=(clip.filename+'.html',
StringIO(clip.html.encode('utf-8'))))
html_file = open(base_path+'.html', 'w')
html_file.write(clip.html.encode('ascii','xmlcharrefreplace'))
html_file.close()
wkhtmltopdf(pages=[base_path+'.html'], output=base_path+'.pdf')
with open(base_path+'.pdf') as f:
api.client.post('/path/data/smartclip/pdf',
file=(clip.filename+'.pdf',f))
if os.path.isfile(base_path+'.pdf'):
os.remove(base_path+'.pdf')
if os.path.isfile(base_path+'.html'):
os.remove(base_path+'.html')
def create_smartfile_dirs(api):
for path in ['smartclip', 'smartclip.html', 'smartclip.pdf']:
response = api.client.get('/path/info/%s' % path)
if response.status_code >= 399:
api.client.post('/path/oper/mkdir', path='/%s' % path)
def create_link(api, filename, **kwargs):
message = kwargs.get('message', None)
recipients = kwargs.get('recipients', None)
name = kwargs.get('title', None)
return api.client.post('/link', path='/smartclip/pdf/'+filename+'.pdf',
name=name, recipients=recipients, message=message,
read=True, list=True)
| gpl-3.0 |
rafaeldelucena/waterbutler | waterbutler/providers/figshare/provider.py | 5 | 13530 | import http
import json
import asyncio
import aiohttp
import oauthlib.oauth1
from waterbutler.core import streams
from waterbutler.core import provider
from waterbutler.core import exceptions
from waterbutler.core.path import WaterButlerPath
from waterbutler.providers.figshare import metadata
from waterbutler.providers.figshare import settings
from waterbutler.providers.figshare import utils as figshare_utils
class FigshareProvider:
def __new__(cls, auth, credentials, settings):
if settings['container_type'] == 'project':
return FigshareProjectProvider(auth, credentials, dict(settings, project_id=settings['container_id']))
if settings['container_type'] in ('article', 'fileset'):
return FigshareArticleProvider(auth, credentials, dict(settings, article_id=settings['container_id']))
raise exceptions.ProviderError('Invalid "container_type" {0}'.format(settings['container_type']))
class BaseFigshareProvider(provider.BaseProvider):
NAME = 'figshare'
BASE_URL = settings.BASE_URL
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.client = oauthlib.oauth1.Client(
self.credentials['client_token'],
client_secret=self.credentials['client_secret'],
resource_owner_key=self.credentials['owner_token'],
resource_owner_secret=self.credentials['owner_secret'],
)
@asyncio.coroutine
def make_request(self, method, uri, *args, **kwargs):
signed_uri, signed_headers, _ = self.client.sign(uri, method)
signed_headers.update(kwargs.pop('headers', {}))
kwargs['headers'] = signed_headers
return (yield from super().make_request(method, signed_uri, *args, **kwargs))
@asyncio.coroutine
def revalidate_path(self, base, path, folder=False):
wbpath = base
assert base.is_dir
path = path.strip('/')
for entry in (yield from self.metadata(base)):
if entry.name == path:
# base may when refering to a file will have a article id as well
# This handles that case so the resulting path is actually correct
names, ids = map(lambda x: getattr(entry, x).strip('/').split('/'), ('materialized_path', 'path'))
while names and ids:
wbpath = wbpath.child(names.pop(0), _id=ids.pop(0))
wbpath._is_folder = entry.kind == 'folder'
return wbpath
return base.child(path, folder=False)
class FigshareProjectProvider(BaseFigshareProvider):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.project_id = self.settings['project_id']
@asyncio.coroutine
def validate_path(self, path, **kwargs):
split = path.rstrip('/').split('/')[1:]
wbpath = WaterButlerPath('/', _ids=(self.settings['project_id'], ), folder=True)
if split:
name_or_id = split.pop(0)
try:
article = yield from self._assert_contains_article(name_or_id)
except ValueError:
return wbpath.child(name_or_id, folder=False)
except exceptions.ProviderError as e:
if e.code not in (404, 401):
raise
return wbpath.child(name_or_id, folder=False)
wbpath = wbpath.child(article['title'], article['id'], folder=True)
if split:
provider = yield from self._make_article_provider(article['id'], check_parent=False)
try:
return (yield from provider.validate_path('/'.join([''] + split), parent=wbpath))
except exceptions.ProviderError as e:
if e.code not in (404, 401):
raise
return wbpath.child(split.pop(0), folder=False)
return wbpath
@asyncio.coroutine
def _assert_contains_article(self, article_id):
articles_json = yield from self._list_articles()
try:
return next(
each for each in articles_json
if each['id'] == int(article_id)
)
except StopIteration:
raise exceptions.ProviderError(
'Article {0} not found'.format(article_id),
code=http.client.NOT_FOUND,
)
@asyncio.coroutine
def _make_article_provider(self, article_id, check_parent=True):
article_id = str(article_id)
if check_parent:
yield from self._assert_contains_article(article_id)
settings = {'article_id': article_id}
return FigshareArticleProvider(self.auth, self.credentials, settings, child=True)
@asyncio.coroutine
def _get_project_metadata(self):
response = yield from self.make_request(
'GET',
self.build_url('projects', self.project_id),
expects=(200, ),
)
data = yield from response.json()
return data
return metadata.FigshareProjectMetadata(data)
@asyncio.coroutine
def _list_articles(self):
response = yield from self.make_request(
'GET',
self.build_url('projects', self.project_id, 'articles'),
expects=(200, ),
)
return (yield from response.json())
@asyncio.coroutine
def _get_article_metadata(self, article_id):
provider = yield from self._make_article_provider(article_id, check_parent=False)
return (yield from provider.about())
@asyncio.coroutine
def _project_metadata_contents(self):
articles_json = yield from self._list_articles()
contents = yield from asyncio.gather(*[
self._get_article_metadata(each['id'])
for each in articles_json
])
return [each for each in contents if each]
@asyncio.coroutine
def _create_article(self, name):
response = yield from self.make_request(
'POST',
self.build_url('articles'),
data=json.dumps({
'title': name,
'defined_type': 'dataset',
}),
headers={'Content-Type': 'application/json'},
expects=(200, ),
)
return (yield from response.json())
@asyncio.coroutine
def download(self, path, **kwargs):
if path.identifier is None:
raise exceptions.NotFoundError(str(path))
provider = yield from self._make_article_provider(path.parts[1].identifier)
return (yield from provider.download(path, **kwargs))
@asyncio.coroutine
def upload(self, stream, path, **kwargs):
if not path.parent.is_root:
provider = yield from self._make_article_provider(path.parent.identifier)
else:
article_json = yield from self._create_article(path.name)
provider = yield from self._make_article_provider(article_json['article_id'], check_parent=False)
yield from provider._add_to_project(self.project_id)
return (yield from provider.upload(stream, path, **kwargs))
@asyncio.coroutine
def delete(self, path, **kwargs):
provider = yield from self._make_article_provider(path.parts[1].identifier)
if len(path.parts) == 3:
yield from provider.delete(path, **kwargs)
else:
yield from provider._remove_from_project(self.project_id)
@asyncio.coroutine
def metadata(self, path, **kwargs):
if path.is_root:
return (yield from self._project_metadata_contents())
if path.identifier is None:
raise exceptions.NotFoundError(str(path))
provider = yield from self._make_article_provider(path.parts[1].identifier)
return (yield from provider.metadata(path, **kwargs))
@asyncio.coroutine
def revisions(self, path, **kwargs):
raise exceptions.ProviderError({'message': 'figshare does not support file revisions.'}, code=405)
class FigshareArticleProvider(BaseFigshareProvider):
def __init__(self, auth, credentials, settings, child=False):
super().__init__(auth, credentials, settings)
self.article_id = self.settings['article_id']
self.child = child
@asyncio.coroutine
def validate_path(self, path, parent=None, **kwargs):
split = path.rstrip('/').split('/')[1:]
wbpath = parent or WaterButlerPath('/', _ids=(self.article_id, ), folder=True)
if split:
name = split.pop(0)
try:
fid = int(name)
except ValueError:
fid = name
article_json = yield from self._get_article_json()
try:
wbpath = wbpath.child(**next(
{
'_id': x['id'],
'name': x['name'],
} for x in article_json['files']
if x['id'] == fid
))
except StopIteration:
wbpath = wbpath.child(name)
return wbpath
@asyncio.coroutine
def _get_article_json(self):
response = yield from self.make_request(
'GET',
self.build_url('articles', self.article_id),
expects=(200, ),
)
data = yield from response.json()
return data['items'][0]
@asyncio.coroutine
def _add_to_project(self, project_id):
resp = yield from self.make_request(
'PUT',
self.build_url('projects', project_id, 'articles'),
data=json.dumps({'article_id': int(self.article_id)}),
headers={'Content-Type': 'application/json'},
expects=(200, ),
)
return (yield from resp.json())
@asyncio.coroutine
def _remove_from_project(self, project_id):
resp = yield from self.make_request(
'DELETE',
self.build_url('projects', project_id, 'articles'),
data=json.dumps({'article_id': int(self.article_id)}),
headers={'Content-Type': 'application/json'},
expects=(200, ),
)
return (yield from resp.json())
def _serialize_item(self, item, parent):
defined_type = item.get('defined_type')
files = item.get('files')
if defined_type == 'fileset':
metadata_class = metadata.FigshareArticleMetadata
metadata_kwargs = {}
elif defined_type and not files:
# Hide single-file articles with no contents
return None
else:
metadata_class = metadata.FigshareFileMetadata
metadata_kwargs = {'parent': parent, 'child': self.child}
if defined_type:
item = item['files'][0]
return metadata_class(item, **metadata_kwargs)
@asyncio.coroutine
def about(self):
article_json = yield from self._get_article_json()
return self._serialize_item(article_json, article_json)
@asyncio.coroutine
def download(self, path, **kwargs):
"""Download a file. Note: Although Figshare may return a download URL,
the `accept_url` parameter is ignored here, since Figshare does not
support HTTPS for downloads.
:param str path: Path to the key you want to download
:rtype ResponseWrapper:
"""
if path.identifier is None:
raise exceptions.NotFoundError(str(path))
file_metadata = yield from self.metadata(path)
download_url = file_metadata.extra['downloadUrl']
if download_url is None:
raise exceptions.DownloadError(
'Cannot download private files',
code=http.client.FORBIDDEN,
)
resp = yield from aiohttp.request('GET', download_url)
return streams.ResponseStreamReader(resp)
@asyncio.coroutine
def delete(self, path, **kwargs):
yield from self.make_request(
'DELETE',
self.build_url('articles', str(self.article_id), 'files', str(path.identifier)),
expects=(200, ),
throws=exceptions.DeleteError,
)
@asyncio.coroutine
def upload(self, stream, path, **kwargs):
article_json = yield from self._get_article_json()
stream = streams.FormDataStream(
filedata=(stream, path.name)
)
response = yield from self.make_request(
'PUT',
self.build_url('articles', self.article_id, 'files'),
data=stream,
expects=(200, ),
headers=stream.headers,
)
data = yield from response.json()
return metadata.FigshareFileMetadata(data, parent=article_json, child=self.child), True
@asyncio.coroutine
def metadata(self, path, **kwargs):
if path.identifier is None:
raise exceptions.NotFoundError(str(path))
article_json = yield from self._get_article_json()
if path.is_root or str(path.identifier) == self.article_id:
return [x for x in [
self._serialize_item(item, parent=article_json)
for item in article_json['files']
] if x]
file_json = figshare_utils.file_or_error(article_json, path.identifier)
return self._serialize_item(file_json, parent=article_json)
@asyncio.coroutine
def revisions(self, path, **kwargs):
raise exceptions.ProviderError({'message': 'figshare does not support file revisions.'}, code=405)
| apache-2.0 |
slisson/intellij-community | python/helpers/pycharm/django_manage_shell.py | 49 | 1127 | #!/usr/bin/env python
from fix_getpass import fixGetpass
import os
from django.core import management
import sys
try:
from runpy import run_module
except ImportError:
from runpy_compat import run_module
def run(working_dir):
sys.path.insert(0, working_dir)
manage_file = os.getenv('PYCHARM_DJANGO_MANAGE_MODULE')
if not manage_file:
manage_file = 'manage'
def execute_manager(settings_mod, argv = None):
management.setup_environ(settings_mod)
management.execute_manager = execute_manager
def execute_from_command_line(argv=None):
pass
management.execute_from_command_line = execute_from_command_line
fixGetpass()
try:
#import settings to prevent circular dependencies later on import django.db
from django.conf import settings
apps=settings.INSTALLED_APPS
# From django.core.management.shell
# XXX: (Temporary) workaround for ticket #1796: force early loading of all
# models from installed apps.
from django.db.models.loading import get_models
get_models()
except:
pass
run_module(manage_file, None, '__main__', True)
| apache-2.0 |
boundarydevices/android_external_chromium_org | build/android/pylib/utils/command_option_parser.py | 160 | 2419 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""An option parser which handles the first arg as a command.
Add other nice functionality such as printing a list of commands
and an example in usage.
"""
import optparse
import sys
class CommandOptionParser(optparse.OptionParser):
"""Wrapper class for OptionParser to help with listing commands."""
def __init__(self, *args, **kwargs):
"""Creates a CommandOptionParser.
Args:
commands_dict: A dictionary mapping command strings to an object defining
- add_options_func: Adds options to the option parser
- run_command_func: Runs the command itself.
example: An example command.
everything else: Passed to optparse.OptionParser contructor.
"""
self.commands_dict = kwargs.pop('commands_dict', {})
self.example = kwargs.pop('example', '')
if not 'usage' in kwargs:
kwargs['usage'] = 'Usage: %prog <command> [options]'
optparse.OptionParser.__init__(self, *args, **kwargs)
#override
def get_usage(self):
normal_usage = optparse.OptionParser.get_usage(self)
command_list = self.get_command_list()
example = self.get_example()
return self.expand_prog_name(normal_usage + example + command_list)
#override
def get_command_list(self):
if self.commands_dict.keys():
return '\nCommands:\n %s\n' % '\n '.join(
sorted(self.commands_dict.keys()))
return ''
def get_example(self):
if self.example:
return '\nExample:\n %s\n' % self.example
return ''
def ParseAndExecute(option_parser, argv=None):
"""Parses options/args from argv and runs the specified command.
Args:
option_parser: A CommandOptionParser object.
argv: Command line arguments. If None, automatically draw from sys.argv.
Returns:
An exit code.
"""
if not argv:
argv = sys.argv
if len(argv) < 2 or argv[1] not in option_parser.commands_dict:
# Parse args first, if this is '--help', optparse will print help and exit
option_parser.parse_args(argv)
option_parser.error('Invalid command.')
cmd = option_parser.commands_dict[argv[1]]
cmd.add_options_func(option_parser)
options, args = option_parser.parse_args(argv)
return cmd.run_command_func(argv[1], options, args, option_parser)
| bsd-3-clause |
Think-Silicon/qemu | scripts/analyse-9p-simpletrace.py | 333 | 9058 | #!/usr/bin/env python
# Pretty print 9p simpletrace log
# Usage: ./analyse-9p-simpletrace <trace-events> <trace-pid>
#
# Author: Harsh Prateek Bora
import os
import simpletrace
symbol_9p = {
6 : 'TLERROR',
7 : 'RLERROR',
8 : 'TSTATFS',
9 : 'RSTATFS',
12 : 'TLOPEN',
13 : 'RLOPEN',
14 : 'TLCREATE',
15 : 'RLCREATE',
16 : 'TSYMLINK',
17 : 'RSYMLINK',
18 : 'TMKNOD',
19 : 'RMKNOD',
20 : 'TRENAME',
21 : 'RRENAME',
22 : 'TREADLINK',
23 : 'RREADLINK',
24 : 'TGETATTR',
25 : 'RGETATTR',
26 : 'TSETATTR',
27 : 'RSETATTR',
30 : 'TXATTRWALK',
31 : 'RXATTRWALK',
32 : 'TXATTRCREATE',
33 : 'RXATTRCREATE',
40 : 'TREADDIR',
41 : 'RREADDIR',
50 : 'TFSYNC',
51 : 'RFSYNC',
52 : 'TLOCK',
53 : 'RLOCK',
54 : 'TGETLOCK',
55 : 'RGETLOCK',
70 : 'TLINK',
71 : 'RLINK',
72 : 'TMKDIR',
73 : 'RMKDIR',
74 : 'TRENAMEAT',
75 : 'RRENAMEAT',
76 : 'TUNLINKAT',
77 : 'RUNLINKAT',
100 : 'TVERSION',
101 : 'RVERSION',
102 : 'TAUTH',
103 : 'RAUTH',
104 : 'TATTACH',
105 : 'RATTACH',
106 : 'TERROR',
107 : 'RERROR',
108 : 'TFLUSH',
109 : 'RFLUSH',
110 : 'TWALK',
111 : 'RWALK',
112 : 'TOPEN',
113 : 'ROPEN',
114 : 'TCREATE',
115 : 'RCREATE',
116 : 'TREAD',
117 : 'RREAD',
118 : 'TWRITE',
119 : 'RWRITE',
120 : 'TCLUNK',
121 : 'RCLUNK',
122 : 'TREMOVE',
123 : 'RREMOVE',
124 : 'TSTAT',
125 : 'RSTAT',
126 : 'TWSTAT',
127 : 'RWSTAT'
}
class VirtFSRequestTracker(simpletrace.Analyzer):
def begin(self):
print "Pretty printing 9p simpletrace log ..."
def v9fs_rerror(self, tag, id, err):
print "RERROR (tag =", tag, ", id =", symbol_9p[id], ", err = \"", os.strerror(err), "\")"
def v9fs_version(self, tag, id, msize, version):
print "TVERSION (tag =", tag, ", msize =", msize, ", version =", version, ")"
def v9fs_version_return(self, tag, id, msize, version):
print "RVERSION (tag =", tag, ", msize =", msize, ", version =", version, ")"
def v9fs_attach(self, tag, id, fid, afid, uname, aname):
print "TATTACH (tag =", tag, ", fid =", fid, ", afid =", afid, ", uname =", uname, ", aname =", aname, ")"
def v9fs_attach_return(self, tag, id, type, version, path):
print "RATTACH (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "})"
def v9fs_stat(self, tag, id, fid):
print "TSTAT (tag =", tag, ", fid =", fid, ")"
def v9fs_stat_return(self, tag, id, mode, atime, mtime, length):
print "RSTAT (tag =", tag, ", mode =", mode, ", atime =", atime, ", mtime =", mtime, ", length =", length, ")"
def v9fs_getattr(self, tag, id, fid, request_mask):
print "TGETATTR (tag =", tag, ", fid =", fid, ", request_mask =", hex(request_mask), ")"
def v9fs_getattr_return(self, tag, id, result_mask, mode, uid, gid):
print "RGETATTR (tag =", tag, ", result_mask =", hex(result_mask), ", mode =", oct(mode), ", uid =", uid, ", gid =", gid, ")"
def v9fs_walk(self, tag, id, fid, newfid, nwnames):
print "TWALK (tag =", tag, ", fid =", fid, ", newfid =", newfid, ", nwnames =", nwnames, ")"
def v9fs_walk_return(self, tag, id, nwnames, qids):
print "RWALK (tag =", tag, ", nwnames =", nwnames, ", qids =", hex(qids), ")"
def v9fs_open(self, tag, id, fid, mode):
print "TOPEN (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ")"
def v9fs_open_return(self, tag, id, type, version, path, iounit):
print "ROPEN (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")"
def v9fs_lcreate(self, tag, id, dfid, flags, mode, gid):
print "TLCREATE (tag =", tag, ", dfid =", dfid, ", flags =", oct(flags), ", mode =", oct(mode), ", gid =", gid, ")"
def v9fs_lcreate_return(self, tag, id, type, version, path, iounit):
print "RLCREATE (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")"
def v9fs_fsync(self, tag, id, fid, datasync):
print "TFSYNC (tag =", tag, ", fid =", fid, ", datasync =", datasync, ")"
def v9fs_clunk(self, tag, id, fid):
print "TCLUNK (tag =", tag, ", fid =", fid, ")"
def v9fs_read(self, tag, id, fid, off, max_count):
print "TREAD (tag =", tag, ", fid =", fid, ", off =", off, ", max_count =", max_count, ")"
def v9fs_read_return(self, tag, id, count, err):
print "RREAD (tag =", tag, ", count =", count, ", err =", err, ")"
def v9fs_readdir(self, tag, id, fid, offset, max_count):
print "TREADDIR (tag =", tag, ", fid =", fid, ", offset =", offset, ", max_count =", max_count, ")"
def v9fs_readdir_return(self, tag, id, count, retval):
print "RREADDIR (tag =", tag, ", count =", count, ", retval =", retval, ")"
def v9fs_write(self, tag, id, fid, off, count, cnt):
print "TWRITE (tag =", tag, ", fid =", fid, ", off =", off, ", count =", count, ", cnt =", cnt, ")"
def v9fs_write_return(self, tag, id, total, err):
print "RWRITE (tag =", tag, ", total =", total, ", err =", err, ")"
def v9fs_create(self, tag, id, fid, name, perm, mode):
print "TCREATE (tag =", tag, ", fid =", fid, ", perm =", oct(perm), ", name =", name, ", mode =", oct(mode), ")"
def v9fs_create_return(self, tag, id, type, version, path, iounit):
print "RCREATE (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")"
def v9fs_symlink(self, tag, id, fid, name, symname, gid):
print "TSYMLINK (tag =", tag, ", fid =", fid, ", name =", name, ", symname =", symname, ", gid =", gid, ")"
def v9fs_symlink_return(self, tag, id, type, version, path):
print "RSYMLINK (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "})"
def v9fs_flush(self, tag, id, flush_tag):
print "TFLUSH (tag =", tag, ", flush_tag =", flush_tag, ")"
def v9fs_link(self, tag, id, dfid, oldfid, name):
print "TLINK (tag =", tag, ", dfid =", dfid, ", oldfid =", oldfid, ", name =", name, ")"
def v9fs_remove(self, tag, id, fid):
print "TREMOVE (tag =", tag, ", fid =", fid, ")"
def v9fs_wstat(self, tag, id, fid, mode, atime, mtime):
print "TWSTAT (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ", atime =", atime, "mtime =", mtime, ")"
def v9fs_mknod(self, tag, id, fid, mode, major, minor):
print "TMKNOD (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ", major =", major, ", minor =", minor, ")"
def v9fs_lock(self, tag, id, fid, type, start, length):
print "TLOCK (tag =", tag, ", fid =", fid, "type =", type, ", start =", start, ", length =", length, ")"
def v9fs_lock_return(self, tag, id, status):
print "RLOCK (tag =", tag, ", status =", status, ")"
def v9fs_getlock(self, tag, id, fid, type, start, length):
print "TGETLOCK (tag =", tag, ", fid =", fid, "type =", type, ", start =", start, ", length =", length, ")"
def v9fs_getlock_return(self, tag, id, type, start, length, proc_id):
print "RGETLOCK (tag =", tag, "type =", type, ", start =", start, ", length =", length, ", proc_id =", proc_id, ")"
def v9fs_mkdir(self, tag, id, fid, name, mode, gid):
print "TMKDIR (tag =", tag, ", fid =", fid, ", name =", name, ", mode =", mode, ", gid =", gid, ")"
def v9fs_mkdir_return(self, tag, id, type, version, path, err):
print "RMKDIR (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, err =", err, ")"
def v9fs_xattrwalk(self, tag, id, fid, newfid, name):
print "TXATTRWALK (tag =", tag, ", fid =", fid, ", newfid =", newfid, ", xattr name =", name, ")"
def v9fs_xattrwalk_return(self, tag, id, size):
print "RXATTRWALK (tag =", tag, ", xattrsize =", size, ")"
def v9fs_xattrcreate(self, tag, id, fid, name, size, flags):
print "TXATTRCREATE (tag =", tag, ", fid =", fid, ", name =", name, ", xattrsize =", size, ", flags =", flags, ")"
def v9fs_readlink(self, tag, id, fid):
print "TREADLINK (tag =", tag, ", fid =", fid, ")"
def v9fs_readlink_return(self, tag, id, target):
print "RREADLINK (tag =", tag, ", target =", target, ")"
simpletrace.run(VirtFSRequestTracker())
| gpl-2.0 |
ewjoachim/django-extensions | django_extensions/validators.py | 65 | 2090 | import unicodedata
from django.core.exceptions import ValidationError
from django.utils.deconstruct import deconstructible
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
@deconstructible
class NoControlCharactersValidator(object):
message = _("Control Characters like new lines or tabs are not allowed.")
code = "no_control_characters"
whitelist = None
def __init__(self, message=None, code=None, whitelist=None):
if message:
self.message = message
if code:
self.code = code
if whitelist:
self.whitelist = whitelist
def __call__(self, value):
value = force_text(value)
whitelist = self.whitelist
category = unicodedata.category
for character in value:
if whitelist and character in whitelist:
continue
if category(character)[0] == "C":
params = {'value': value, 'whitelist': whitelist}
raise ValidationError(self.message, code=self.code, params=params)
def __eq__(self, other):
return (
isinstance(other, NoControlCharactersValidator) and
(self.whitelist == other.whitelist) and
(self.message == other.message) and
(self.code == other.code)
)
@deconstructible
class NoWhitespaceValidator(object):
message = _("Leading and Trailing whitespace is not allowed.")
code = "no_whitespace"
def __init__(self, message=None, code=None, whitelist=None):
if message:
self.message = message
if code:
self.code = code
def __call__(self, value):
value = force_text(value)
if value != value.strip():
params = {'value': value}
raise ValidationError(self.message, code=self.code, params=params)
def __eq__(self, other):
return (
isinstance(other, NoWhitespaceValidator) and
(self.message == other.message) and
(self.code == other.code)
)
| mit |
EricMuller/mywebmarks-backend | requirements/twisted/Twisted-17.1.0/src/twisted/scripts/test/test_scripts.py | 12 | 4880 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for the command-line scripts in the top-level I{bin/} directory.
Tests for actual functionality belong elsewhere, written in a way that doesn't
involve launching child processes.
"""
from os import devnull, getcwd, chdir
from sys import executable
from subprocess import PIPE, Popen
from twisted.trial.unittest import SkipTest, TestCase
from twisted.python.modules import getModule
from twisted.python.filepath import FilePath
from twisted.python.test.test_shellcomp import ZshScriptTestMixin
def outputFromPythonScript(script, *args):
"""
Synchronously run a Python script, with the same Python interpreter that
ran the process calling this function, using L{Popen}, using the given
command-line arguments, with standard input and standard error both
redirected to L{os.devnull}, and return its output as a string.
@param script: The path to the script.
@type script: L{FilePath}
@param args: The command-line arguments to follow the script in its
invocation (the desired C{sys.argv[1:]}).
@type args: L{tuple} of L{str}
@return: the output passed to the proces's C{stdout}, without any messages
from C{stderr}.
@rtype: L{bytes}
"""
with open(devnull, "rb") as nullInput, open(devnull, "wb") as nullError:
process = Popen(
[executable, script.path] + list(args),
stdout=PIPE, stderr=nullError, stdin=nullInput)
stdout = process.communicate()[0]
return stdout
class ScriptTestsMixin(object):
"""
Mixin for L{TestCase} subclasses which defines a helper function for testing
a Twisted-using script.
"""
bin = getModule("twisted").pathEntry.filePath.child("bin")
def scriptTest(self, name):
"""
Verify that the given script runs and uses the version of Twisted
currently being tested.
This only works when running tests against a vcs checkout of Twisted,
since it relies on the scripts being in the place they are kept in
version control, and exercises their logic for finding the right version
of Twisted to use in that situation.
@param name: A path fragment, relative to the I{bin} directory of a
Twisted source checkout, identifying a script to test.
@type name: C{str}
@raise SkipTest: if the script is not where it is expected to be.
"""
script = self.bin.preauthChild(name)
if not script.exists():
raise SkipTest(
"Script tests do not apply to installed configuration.")
from twisted.copyright import version
scriptVersion = outputFromPythonScript(script, '--version')
self.assertIn(str(version), scriptVersion)
class ScriptTests(TestCase, ScriptTestsMixin):
"""
Tests for the core scripts.
"""
def test_twistd(self):
self.scriptTest("twistd")
def test_twistdPathInsert(self):
"""
The twistd script adds the current working directory to sys.path so
that it's able to import modules from it.
"""
script = self.bin.child("twistd")
if not script.exists():
raise SkipTest(
"Script tests do not apply to installed configuration.")
cwd = getcwd()
self.addCleanup(chdir, cwd)
testDir = FilePath(self.mktemp())
testDir.makedirs()
chdir(testDir.path)
testDir.child("bar.tac").setContent(
"import sys\n"
"print sys.path\n")
output = outputFromPythonScript(script, '-ny', 'bar.tac')
self.assertIn(repr(testDir.path), output)
def test_trial(self):
self.scriptTest("trial")
def test_trialPathInsert(self):
"""
The trial script adds the current working directory to sys.path so that
it's able to import modules from it.
"""
script = self.bin.child("trial")
if not script.exists():
raise SkipTest(
"Script tests do not apply to installed configuration.")
cwd = getcwd()
self.addCleanup(chdir, cwd)
testDir = FilePath(self.mktemp())
testDir.makedirs()
chdir(testDir.path)
testDir.child("foo.py").setContent("")
output = outputFromPythonScript(script, 'foo')
self.assertIn("PASSED", output)
def test_pyhtmlizer(self):
self.scriptTest("pyhtmlizer")
class ZshIntegrationTests(TestCase, ZshScriptTestMixin):
"""
Test that zsh completion functions are generated without error
"""
generateFor = [('twistd', 'twisted.scripts.twistd.ServerOptions'),
('trial', 'twisted.scripts.trial.Options'),
('pyhtmlizer', 'twisted.scripts.htmlizer.Options'),
]
| mit |
vmthunder/brick | brick/initiator/connector.py | 3 | 35458 | # Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import socket
import time
from brick import exception
from brick import executor
from brick.initiator import host_driver
from brick.initiator import linuxfc
from brick.initiator import linuxscsi
from brick.remotefs import remotefs
from brick.openstack.common.gettextutils import _
from brick.openstack.common import log as logging
from brick.openstack.common import loopingcall
from brick.openstack.common import processutils as putils
LOG = logging.getLogger(__name__)
DEVICE_SCAN_ATTEMPTS_DEFAULT = 3
def get_connector_properties(root_helper, my_ip):
"""Get the connection properties for all protocols."""
iscsi = ISCSIConnector(root_helper=root_helper)
fc = linuxfc.LinuxFibreChannel(root_helper=root_helper)
props = {}
props['ip'] = my_ip
props['host'] = socket.gethostname()
initiator = iscsi.get_initiator()
if initiator:
props['initiator'] = initiator
wwpns = fc.get_fc_wwpns()
if wwpns:
props['wwpns'] = wwpns
wwnns = fc.get_fc_wwnns()
if wwnns:
props['wwnns'] = wwnns
return props
class InitiatorConnector(executor.Executor):
def __init__(self, root_helper, driver=None,
execute=putils.execute,
device_scan_attempts=DEVICE_SCAN_ATTEMPTS_DEFAULT,
*args, **kwargs):
super(InitiatorConnector, self).__init__(root_helper, execute=execute,
*args, **kwargs)
if not driver:
driver = host_driver.HostDriver()
self.set_driver(driver)
self.device_scan_attempts = device_scan_attempts
def set_driver(self, driver):
"""The driver is used to find used LUNs."""
self.driver = driver
@staticmethod
def factory(protocol, root_helper, driver=None,
execute=putils.execute, use_multipath=False,
device_scan_attempts=DEVICE_SCAN_ATTEMPTS_DEFAULT,
*args, **kwargs):
"""Build a Connector object based upon protocol."""
LOG.debug("Factory for %s" % protocol)
protocol = protocol.upper()
if protocol == "ISCSI":
return ISCSIConnector(root_helper=root_helper,
driver=driver,
execute=execute,
use_multipath=use_multipath,
device_scan_attempts=device_scan_attempts,
*args, **kwargs)
elif protocol == "ISER":
return ISERConnector(root_helper=root_helper,
driver=driver,
execute=execute,
use_multipath=use_multipath,
device_scan_attempts=device_scan_attempts,
*args, **kwargs)
elif protocol == "FIBRE_CHANNEL":
return FibreChannelConnector(root_helper=root_helper,
driver=driver,
execute=execute,
use_multipath=use_multipath,
device_scan_attempts=
device_scan_attempts,
*args, **kwargs)
elif protocol == "AOE":
return AoEConnector(root_helper=root_helper,
driver=driver,
execute=execute,
device_scan_attempts=device_scan_attempts,
*args, **kwargs)
elif protocol == "NFS" or protocol == "GLUSTERFS":
return RemoteFsConnector(mount_type=protocol.lower(),
root_helper=root_helper,
driver=driver,
execute=execute,
device_scan_attempts=device_scan_attempts,
*args, **kwargs)
elif protocol == "LOCAL":
return LocalConnector(root_helper=root_helper,
driver=driver,
execute=execute,
device_scan_attempts=device_scan_attempts,
*args, **kwargs)
else:
msg = (_("Invalid InitiatorConnector protocol "
"specified %(protocol)s") %
dict(protocol=protocol))
raise ValueError(msg)
def check_valid_device(self, path):
cmd = ('dd', 'if=%(path)s' % {"path": path},
'of=/dev/null', 'count=1')
out, info = None, None
try:
out, info = self._execute(*cmd, run_as_root=True,
root_helper=self._root_helper)
except putils.ProcessExecutionError as e:
LOG.error(_("Failed to access the device on the path "
"%(path)s: %(error)s %(info)s.") %
{"path": path, "error": e.stderr,
"info": info})
return False
# If the info is none, the path does not exist.
if info is None:
return False
return True
def connect_volume(self, connection_properties):
"""Connect to a volume.
The connection_properties describes the information needed by
the specific protocol to use to make the connection.
"""
raise NotImplementedError()
def disconnect_volume(self, connection_properties, device_info):
"""Disconnect a volume from the local host.
The connection_properties are the same as from connect_volume.
The device_info is returned from connect_volume.
"""
raise NotImplementedError()
class ISCSIConnector(InitiatorConnector):
"""Connector class to attach/detach iSCSI volumes."""
def __init__(self, root_helper, driver=None,
execute=putils.execute, use_multipath=False,
device_scan_attempts=DEVICE_SCAN_ATTEMPTS_DEFAULT,
*args, **kwargs):
self._linuxscsi = linuxscsi.LinuxSCSI(root_helper, execute)
super(ISCSIConnector, self).__init__(root_helper, driver=driver,
execute=execute,
device_scan_attempts=
device_scan_attempts,
*args, **kwargs)
self.use_multipath = use_multipath
def set_execute(self, execute):
super(ISCSIConnector, self).set_execute(execute)
self._linuxscsi.set_execute(execute)
def connect_volume(self, connection_properties):
"""Attach the volume to instance_name.
connection_properties for iSCSI must include:
target_portal - ip and optional port
target_iqn - iSCSI Qualified Name
target_lun - LUN id of the volume
"""
device_info = {'type': 'block'}
if self.use_multipath:
#multipath installed, discovering other targets if available
target_portal = connection_properties['target_portal']
out = self._run_iscsiadm_bare(['-m',
'discovery',
'-t',
'sendtargets',
'-p',
target_portal],
check_exit_code=[0, 255])[0] \
or ""
for ip, iqn in self._get_target_portals_from_iscsiadm_output(out):
props = connection_properties.copy()
props['target_portal'] = ip
props['target_iqn'] = iqn
self._connect_to_iscsi_portal(props)
self._rescan_iscsi()
else:
self._connect_to_iscsi_portal(connection_properties)
host_device = self._get_device_path(connection_properties)
# The /dev/disk/by-path/... node is not always present immediately
# TODO(justinsb): This retry-with-delay is a pattern, move to utils?
tries = 0
while not os.path.exists(host_device):
if tries >= self.device_scan_attempts:
raise exception.VolumeDeviceNotFound(device=host_device)
LOG.warn(_("ISCSI volume not yet found at: %(host_device)s. "
"Will rescan & retry. Try number: %(tries)s"),
{'host_device': host_device,
'tries': tries})
# The rescan isn't documented as being necessary(?), but it helps
self._run_iscsiadm(connection_properties, ("--rescan",))
tries = tries + 1
if not os.path.exists(host_device):
time.sleep(tries ** 2)
if tries != 0:
LOG.debug(_("Found iSCSI node %(host_device)s "
"(after %(tries)s rescans)"),
{'host_device': host_device, 'tries': tries})
if self.use_multipath:
#we use the multipath device instead of the single path device
self._rescan_multipath()
multipath_device = self._get_multipath_device_name(host_device)
if multipath_device is not None:
host_device = multipath_device
device_info['path'] = host_device
return device_info
def disconnect_volume(self, connection_properties, device_info):
"""Detach the volume from instance_name.
connection_properties for iSCSI must include:
target_portal - IP and optional port
target_iqn - iSCSI Qualified Name
target_lun - LUN id of the volume
"""
# Moved _rescan_iscsi and _rescan_multipath
# from _disconnect_volume_multipath_iscsi to here.
# Otherwise, if we do rescan after _linuxscsi.remove_multipath_device
# but before logging out, the removed devices under /dev/disk/by-path
# will reappear after rescan.
self._rescan_iscsi()
host_device = self._get_device_path(connection_properties)
multipath_device = None
if self.use_multipath:
self._rescan_multipath()
multipath_device = self._get_multipath_device_name(host_device)
if multipath_device:
device_realpath = os.path.realpath(host_device)
self._linuxscsi.remove_multipath_device(device_realpath)
return self._disconnect_volume_multipath_iscsi(
connection_properties, multipath_device)
# remove the device from the scsi subsystem
# this eliminates any stale entries until logout
dev_name = self._linuxscsi.get_name_from_path(host_device)
if dev_name:
self._linuxscsi.remove_scsi_device(dev_name)
# NOTE(vish): Only disconnect from the target if no luns from the
# target are in use.
device_prefix = ("/dev/disk/by-path/ip-%(portal)s-iscsi-%(iqn)s-lun-" %
{'portal': connection_properties['target_portal'],
'iqn': connection_properties['target_iqn']})
devices = self.driver.get_all_block_devices()
devices = [dev for dev in devices if dev.startswith(device_prefix)]
if not devices:
self._disconnect_from_iscsi_portal(connection_properties)
def _get_device_path(self, connection_properties):
path = ("/dev/disk/by-path/ip-%(portal)s-iscsi-%(iqn)s-lun-%(lun)s" %
{'portal': connection_properties['target_portal'],
'iqn': connection_properties['target_iqn'],
'lun': connection_properties.get('target_lun', 0)})
return path
def get_initiator(self):
"""Secure helper to read file as root."""
file_path = '/etc/iscsi/initiatorname.iscsi'
try:
lines, _err = self._execute('cat', file_path, run_as_root=True,
root_helper=self._root_helper)
for l in lines.split('\n'):
if l.startswith('InitiatorName='):
return l[l.index('=') + 1:].strip()
except putils.ProcessExecutionError:
msg = (_("Could not find the iSCSI Initiator File %s")
% file_path)
LOG.warn(msg)
return None
def _run_iscsiadm(self, connection_properties, iscsi_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = self._execute('iscsiadm', '-m', 'node', '-T',
connection_properties['target_iqn'],
'-p',
connection_properties['target_portal'],
*iscsi_command, run_as_root=True,
root_helper=self._root_helper,
check_exit_code=check_exit_code)
LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
(iscsi_command, out, err))
return (out, err)
def _iscsiadm_update(self, connection_properties, property_key,
property_value, **kwargs):
iscsi_command = ('--op', 'update', '-n', property_key,
'-v', property_value)
return self._run_iscsiadm(connection_properties, iscsi_command,
**kwargs)
def _get_target_portals_from_iscsiadm_output(self, output):
# return both portals and iqns
return [line.split() for line in output.splitlines()]
def _disconnect_volume_multipath_iscsi(self, connection_properties,
multipath_name):
"""This removes a multipath device and it's LUNs."""
LOG.debug("Disconnect multipath device %s" % multipath_name)
block_devices = self.driver.get_all_block_devices()
devices = []
for dev in block_devices:
if "/mapper/" in dev:
devices.append(dev)
else:
mpdev = self._get_multipath_device_name(dev)
if mpdev:
devices.append(mpdev)
# Do a discovery to find all targets.
# Targets for multiple paths for the same multipath device
# may not be the same.
out = self._run_iscsiadm_bare(['-m',
'discovery',
'-t',
'sendtargets',
'-p',
connection_properties['target_portal']],
check_exit_code=[0, 255])[0] \
or ""
ips_iqns = self._get_target_portals_from_iscsiadm_output(out)
if not devices:
# disconnect if no other multipath devices
self._disconnect_mpath(connection_properties, ips_iqns)
return
# Get a target for all other multipath devices
other_iqns = [self._get_multipath_iqn(device)
for device in devices]
# Get all the targets for the current multipath device
current_iqns = [iqn for ip, iqn in ips_iqns]
in_use = False
for current in current_iqns:
if current in other_iqns:
in_use = True
break
# If no other multipath device attached has the same iqn
# as the current device
if not in_use:
# disconnect if no other multipath devices with same iqn
self._disconnect_mpath(connection_properties, ips_iqns)
return
# else do not disconnect iscsi portals,
# as they are used for other luns
return
def _connect_to_iscsi_portal(self, connection_properties):
# NOTE(vish): If we are on the same host as nova volume, the
# discovery makes the target so we don't need to
# run --op new. Therefore, we check to see if the
# target exists, and if we get 255 (Not Found), then
# we run --op new. This will also happen if another
# volume is using the same target.
try:
self._run_iscsiadm(connection_properties, ())
except putils.ProcessExecutionError as exc:
# iscsiadm returns 21 for "No records found" after version 2.0-871
if exc.exit_code in [21, 255]:
self._run_iscsiadm(connection_properties, ('--op', 'new'))
else:
raise
if connection_properties.get('auth_method'):
self._iscsiadm_update(connection_properties,
"node.session.auth.authmethod",
connection_properties['auth_method'])
self._iscsiadm_update(connection_properties,
"node.session.auth.username",
connection_properties['auth_username'])
self._iscsiadm_update(connection_properties,
"node.session.auth.password",
connection_properties['auth_password'])
#duplicate logins crash iscsiadm after load,
#so we scan active sessions to see if the node is logged in.
out = self._run_iscsiadm_bare(["-m", "session"],
run_as_root=True,
check_exit_code=[0, 1, 21])[0] or ""
portals = [{'portal': p.split(" ")[2], 'iqn': p.split(" ")[3]}
for p in out.splitlines() if p.startswith("tcp:")]
stripped_portal = connection_properties['target_portal'].split(",")[0]
if len(portals) == 0 or len([s for s in portals
if stripped_portal ==
s['portal'].split(",")[0]
and
s['iqn'] ==
connection_properties['target_iqn']]
) == 0:
try:
self._run_iscsiadm(connection_properties,
("--login",),
check_exit_code=[0, 255])
except putils.ProcessExecutionError as err:
#as this might be one of many paths,
#only set successful logins to startup automatically
if err.exit_code in [15]:
self._iscsiadm_update(connection_properties,
"node.startup",
"automatic")
return
self._iscsiadm_update(connection_properties,
"node.startup",
"automatic")
def _disconnect_from_iscsi_portal(self, connection_properties):
self._iscsiadm_update(connection_properties, "node.startup", "manual",
check_exit_code=[0, 21, 255])
self._run_iscsiadm(connection_properties, ("--logout",),
check_exit_code=[0, 21, 255])
self._run_iscsiadm(connection_properties, ('--op', 'delete'),
check_exit_code=[0, 21, 255])
def _get_multipath_device_name(self, single_path_device):
device = os.path.realpath(single_path_device)
out = self._run_multipath(['-ll',
device],
check_exit_code=[0, 1])[0]
mpath_line = [line for line in out.splitlines()
if "scsi_id" not in line] # ignore udev errors
if len(mpath_line) > 0 and len(mpath_line[0]) > 0:
return "/dev/mapper/%s" % mpath_line[0].split(" ")[0]
return None
def _get_iscsi_devices(self):
try:
devices = list(os.walk('/dev/disk/by-path'))[0][-1]
except IndexError:
return []
return [entry for entry in devices if entry.startswith("ip-")]
def _disconnect_mpath(self, connection_properties, ips_iqns):
for ip, iqn in ips_iqns:
props = connection_properties.copy()
props['target_portal'] = ip
props['target_iqn'] = iqn
self._disconnect_from_iscsi_portal(props)
self._rescan_multipath()
def _get_multipath_iqn(self, multipath_device):
entries = self._get_iscsi_devices()
for entry in entries:
entry_real_path = os.path.realpath("/dev/disk/by-path/%s" % entry)
entry_multipath = self._get_multipath_device_name(entry_real_path)
if entry_multipath == multipath_device:
return entry.split("iscsi-")[1].split("-lun")[0]
return None
def _run_iscsiadm_bare(self, iscsi_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = self._execute('iscsiadm',
*iscsi_command,
run_as_root=True,
root_helper=self._root_helper,
check_exit_code=check_exit_code)
LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
(iscsi_command, out, err))
return (out, err)
def _run_multipath(self, multipath_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = self._execute('multipath',
*multipath_command,
run_as_root=True,
root_helper=self._root_helper,
check_exit_code=check_exit_code)
LOG.debug("multipath %s: stdout=%s stderr=%s" %
(multipath_command, out, err))
return (out, err)
def _rescan_iscsi(self):
self._run_iscsiadm_bare(('-m', 'node', '--rescan'),
check_exit_code=[0, 1, 21, 255])
self._run_iscsiadm_bare(('-m', 'session', '--rescan'),
check_exit_code=[0, 1, 21, 255])
def _rescan_multipath(self):
self._run_multipath('-r', check_exit_code=[0, 1, 21])
class ISERConnector(ISCSIConnector):
def _get_device_path(self, iser_properties):
return ("/dev/disk/by-path/ip-%s-iser-%s-lun-%s" %
(iser_properties['target_portal'],
iser_properties['target_iqn'],
iser_properties.get('target_lun', 0)))
class FibreChannelConnector(InitiatorConnector):
"""Connector class to attach/detach Fibre Channel volumes."""
def __init__(self, root_helper, driver=None,
execute=putils.execute, use_multipath=False,
device_scan_attempts=DEVICE_SCAN_ATTEMPTS_DEFAULT,
*args, **kwargs):
self._linuxscsi = linuxscsi.LinuxSCSI(root_helper, execute)
self._linuxfc = linuxfc.LinuxFibreChannel(root_helper, execute)
super(FibreChannelConnector, self).__init__(root_helper, driver=driver,
execute=execute,
device_scan_attempts=
device_scan_attempts,
*args, **kwargs)
self.use_multipath = use_multipath
def set_execute(self, execute):
super(FibreChannelConnector, self).set_execute(execute)
self._linuxscsi.set_execute(execute)
self._linuxfc.set_execute(execute)
def connect_volume(self, connection_properties):
"""Attach the volume to instance_name.
connection_properties for Fibre Channel must include:
target_portal - ip and optional port
target_iqn - iSCSI Qualified Name
target_lun - LUN id of the volume
"""
LOG.debug("execute = %s" % self._execute)
device_info = {'type': 'block'}
ports = connection_properties['target_wwn']
wwns = []
# we support a list of wwns or a single wwn
if isinstance(ports, list):
for wwn in ports:
wwns.append(str(wwn))
elif isinstance(ports, basestring):
wwns.append(str(ports))
# We need to look for wwns on every hba
# because we don't know ahead of time
# where they will show up.
hbas = self._linuxfc.get_fc_hbas_info()
host_devices = []
for hba in hbas:
pci_num = self._get_pci_num(hba)
if pci_num is not None:
for wwn in wwns:
target_wwn = "0x%s" % wwn.lower()
host_device = ("/dev/disk/by-path/pci-%s-fc-%s-lun-%s" %
(pci_num,
target_wwn,
connection_properties.get('target_lun', 0)))
host_devices.append(host_device)
if len(host_devices) == 0:
# this is empty because we don't have any FC HBAs
msg = _("We are unable to locate any Fibre Channel devices")
LOG.warn(msg)
raise exception.NoFibreChannelHostsFound()
# The /dev/disk/by-path/... node is not always present immediately
# We only need to find the first device. Once we see the first device
# multipath will have any others.
def _wait_for_device_discovery(host_devices):
tries = self.tries
for device in host_devices:
LOG.debug(_("Looking for Fibre Channel dev %(device)s"),
{'device': device})
if os.path.exists(device):
self.host_device = device
# get the /dev/sdX device. This is used
# to find the multipath device.
self.device_name = os.path.realpath(device)
raise loopingcall.LoopingCallDone()
if self.tries >= self.device_scan_attempts:
msg = _("Fibre Channel volume device not found.")
LOG.error(msg)
raise exception.NoFibreChannelVolumeDeviceFound()
LOG.warn(_("Fibre volume not yet found. "
"Will rescan & retry. Try number: %(tries)s"),
{'tries': tries})
self._linuxfc.rescan_hosts(hbas)
self.tries = self.tries + 1
self.host_device = None
self.device_name = None
self.tries = 0
timer = loopingcall.FixedIntervalLoopingCall(
_wait_for_device_discovery, host_devices)
timer.start(interval=2).wait()
tries = self.tries
if self.host_device is not None and self.device_name is not None:
LOG.debug(_("Found Fibre Channel volume %(name)s "
"(after %(tries)s rescans)"),
{'name': self.device_name, 'tries': tries})
# see if the new drive is part of a multipath
# device. If so, we'll use the multipath device.
if self.use_multipath:
mdev_info = self._linuxscsi.find_multipath_device(self.device_name)
if mdev_info is not None:
LOG.debug(_("Multipath device discovered %(device)s")
% {'device': mdev_info['device']})
device_path = mdev_info['device']
devices = mdev_info['devices']
device_info['multipath_id'] = mdev_info['id']
else:
# we didn't find a multipath device.
# so we assume the kernel only sees 1 device
device_path = self.host_device
dev_info = self._linuxscsi.get_device_info(self.device_name)
devices = [dev_info]
else:
device_path = self.host_device
dev_info = self._linuxscsi.get_device_info(self.device_name)
devices = [dev_info]
device_info['path'] = device_path
device_info['devices'] = devices
return device_info
def disconnect_volume(self, connection_properties, device_info):
"""Detach the volume from instance_name.
connection_properties for Fibre Channel must include:
target_wwn - iSCSI Qualified Name
target_lun - LUN id of the volume
"""
devices = device_info['devices']
# If this is a multipath device, we need to search again
# and make sure we remove all the devices. Some of them
# might not have shown up at attach time.
if self.use_multipath and 'multipath_id' in device_info:
multipath_id = device_info['multipath_id']
mdev_info = self._linuxscsi.find_multipath_device(multipath_id)
devices = mdev_info['devices']
LOG.debug("devices to remove = %s" % devices)
# There may have been more than 1 device mounted
# by the kernel for this volume. We have to remove
# all of them
for device in devices:
self._linuxscsi.remove_scsi_device(device["device"])
def _get_pci_num(self, hba):
# NOTE(walter-boring)
# device path is in format of
# /sys/devices/pci0000:00/0000:00:03.0/0000:05:00.3/host2/fc_host/host2
# sometimes an extra entry exists before the host2 value
# we always want the value prior to the host2 value
pci_num = None
if hba is not None:
if "device_path" in hba:
index = 0
device_path = hba['device_path'].split('/')
for value in device_path:
if value.startswith('host'):
break
index = index + 1
if index > 0:
pci_num = device_path[index - 1]
return pci_num
class RemoteFsConnector(InitiatorConnector):
"""Connector class to attach/detach NFS and GlusterFS volumes."""
def __init__(self, mount_type, root_helper, driver=None,
execute=putils.execute,
device_scan_attempts=DEVICE_SCAN_ATTEMPTS_DEFAULT,
*args, **kwargs):
kwargs = kwargs or {}
conn = kwargs.get('conn')
if conn:
mount_point_base = conn.get('mount_point_base')
if mount_type.lower() == 'nfs':
kwargs['nfs_mount_point_base'] =\
kwargs.get('nfs_mount_point_base') or\
mount_point_base
elif mount_type.lower() == 'glusterfs':
kwargs['glusterfs_mount_point_base'] =\
kwargs.get('glusterfs_mount_point_base') or\
mount_point_base
else:
LOG.warn(_("Connection details not present."
" RemoteFsClient may not initialize properly."))
self._remotefsclient = remotefs.RemoteFsClient(mount_type, root_helper,
execute=execute,
*args, **kwargs)
super(RemoteFsConnector, self).__init__(root_helper, driver=driver,
execute=execute,
device_scan_attempts=
device_scan_attempts,
*args, **kwargs)
def set_execute(self, execute):
super(RemoteFsConnector, self).set_execute(execute)
self._remotefsclient.set_execute(execute)
def connect_volume(self, connection_properties):
"""Ensure that the filesystem containing the volume is mounted.
connection_properties must include:
export - remote filesystem device (e.g. '172.18.194.100:/var/nfs')
name - file name within the filesystem
connection_properties may optionally include:
options - options to pass to mount
"""
mnt_flags = []
if connection_properties.get('options'):
mnt_flags = connection_properties['options'].split()
nfs_share = connection_properties['export']
self._remotefsclient.mount(nfs_share, mnt_flags)
mount_point = self._remotefsclient.get_mount_point(nfs_share)
path = mount_point + '/' + connection_properties['name']
return {'path': path}
def disconnect_volume(self, connection_properties, device_info):
"""No need to do anything to disconnect a volume in a filesystem."""
class LocalConnector(InitiatorConnector):
""""Connector class to attach/detach File System backed volumes."""
def __init__(self, root_helper, driver=None, execute=putils.execute,
*args, **kwargs):
super(LocalConnector, self).__init__(root_helper, driver=driver,
execute=execute, *args, **kwargs)
def connect_volume(self, connection_properties):
"""Connect to a volume.
connection_properties must include:
device_path - path to the volume to be connected
"""
if 'device_path' not in connection_properties:
msg = (_("Invalid connection_properties specified "
"no device_path attribute"))
raise ValueError(msg)
device_info = {'type': 'local',
'path': connection_properties['device_path']}
return device_info
def disconnect_volume(self, connection_properties, device_info):
"""Disconnect a volume from the local host."""
pass
| apache-2.0 |
Azure/azure-sdk-for-python | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2018_02_01/models/_models_py3.py | 1 | 92077 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Dict, List, Optional, Union
import msrest.serialization
from ._storage_management_client_enums import *
class AccountSasParameters(msrest.serialization.Model):
"""The parameters to list SAS credentials of a storage account.
All required parameters must be populated in order to send to Azure.
:param services: Required. The signed services accessible with the account SAS. Possible values
include: Blob (b), Queue (q), Table (t), File (f). Possible values include: "b", "q", "t", "f".
:type services: str or ~azure.mgmt.storage.v2018_02_01.models.Services
:param resource_types: Required. The signed resource types that are accessible with the account
SAS. Service (s): Access to service-level APIs; Container (c): Access to container-level APIs;
Object (o): Access to object-level APIs for blobs, queue messages, table entities, and files.
Possible values include: "s", "c", "o".
:type resource_types: str or ~azure.mgmt.storage.v2018_02_01.models.SignedResourceTypes
:param permissions: Required. The signed permissions for the account SAS. Possible values
include: Read (r), Write (w), Delete (d), List (l), Add (a), Create (c), Update (u) and Process
(p). Possible values include: "r", "d", "w", "l", "a", "c", "u", "p".
:type permissions: str or ~azure.mgmt.storage.v2018_02_01.models.Permissions
:param ip_address_or_range: An IP address or a range of IP addresses from which to accept
requests.
:type ip_address_or_range: str
:param protocols: The protocol permitted for a request made with the account SAS. Possible
values include: "https,http", "https".
:type protocols: str or ~azure.mgmt.storage.v2018_02_01.models.HttpProtocol
:param shared_access_start_time: The time at which the SAS becomes valid.
:type shared_access_start_time: ~datetime.datetime
:param shared_access_expiry_time: Required. The time at which the shared access signature
becomes invalid.
:type shared_access_expiry_time: ~datetime.datetime
:param key_to_sign: The key to sign the account SAS token with.
:type key_to_sign: str
"""
_validation = {
'services': {'required': True},
'resource_types': {'required': True},
'permissions': {'required': True},
'shared_access_expiry_time': {'required': True},
}
_attribute_map = {
'services': {'key': 'signedServices', 'type': 'str'},
'resource_types': {'key': 'signedResourceTypes', 'type': 'str'},
'permissions': {'key': 'signedPermission', 'type': 'str'},
'ip_address_or_range': {'key': 'signedIp', 'type': 'str'},
'protocols': {'key': 'signedProtocol', 'type': 'str'},
'shared_access_start_time': {'key': 'signedStart', 'type': 'iso-8601'},
'shared_access_expiry_time': {'key': 'signedExpiry', 'type': 'iso-8601'},
'key_to_sign': {'key': 'keyToSign', 'type': 'str'},
}
def __init__(
self,
*,
services: Union[str, "Services"],
resource_types: Union[str, "SignedResourceTypes"],
permissions: Union[str, "Permissions"],
shared_access_expiry_time: datetime.datetime,
ip_address_or_range: Optional[str] = None,
protocols: Optional[Union[str, "HttpProtocol"]] = None,
shared_access_start_time: Optional[datetime.datetime] = None,
key_to_sign: Optional[str] = None,
**kwargs
):
super(AccountSasParameters, self).__init__(**kwargs)
self.services = services
self.resource_types = resource_types
self.permissions = permissions
self.ip_address_or_range = ip_address_or_range
self.protocols = protocols
self.shared_access_start_time = shared_access_start_time
self.shared_access_expiry_time = shared_access_expiry_time
self.key_to_sign = key_to_sign
class Resource(msrest.serialization.Model):
"""Common fields that are returned in the response for all Azure Resource Manager resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class AzureEntityResource(Resource):
"""The resource model definition for an Azure Resource Manager resource with an etag.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar etag: Resource Etag.
:vartype etag: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureEntityResource, self).__init__(**kwargs)
self.etag = None
class BlobContainer(AzureEntityResource):
"""Properties of the blob container, including Id, resource name, resource type, Etag.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar etag: Resource Etag.
:vartype etag: str
:param public_access: Specifies whether data in the container may be accessed publicly and the
level of access. Possible values include: "Container", "Blob", "None".
:type public_access: str or ~azure.mgmt.storage.v2018_02_01.models.PublicAccess
:ivar last_modified_time: Returns the date and time the container was last modified.
:vartype last_modified_time: ~datetime.datetime
:ivar lease_status: The lease status of the container. Possible values include: "Locked",
"Unlocked".
:vartype lease_status: str or ~azure.mgmt.storage.v2018_02_01.models.LeaseStatus
:ivar lease_state: Lease state of the container. Possible values include: "Available",
"Leased", "Expired", "Breaking", "Broken".
:vartype lease_state: str or ~azure.mgmt.storage.v2018_02_01.models.LeaseState
:ivar lease_duration: Specifies whether the lease on a container is of infinite or fixed
duration, only when the container is leased. Possible values include: "Infinite", "Fixed".
:vartype lease_duration: str or ~azure.mgmt.storage.v2018_02_01.models.LeaseDuration
:param metadata: A name-value pair to associate with the container as metadata.
:type metadata: dict[str, str]
:ivar immutability_policy: The ImmutabilityPolicy property of the container.
:vartype immutability_policy:
~azure.mgmt.storage.v2018_02_01.models.ImmutabilityPolicyProperties
:ivar legal_hold: The LegalHold property of the container.
:vartype legal_hold: ~azure.mgmt.storage.v2018_02_01.models.LegalHoldProperties
:ivar has_legal_hold: The hasLegalHold public property is set to true by SRP if there are at
least one existing tag. The hasLegalHold public property is set to false by SRP if all existing
legal hold tags are cleared out. There can be a maximum of 1000 blob containers with
hasLegalHold=true for a given account.
:vartype has_legal_hold: bool
:ivar has_immutability_policy: The hasImmutabilityPolicy public property is set to true by SRP
if ImmutabilityPolicy has been created for this container. The hasImmutabilityPolicy public
property is set to false by SRP if ImmutabilityPolicy has not been created for this container.
:vartype has_immutability_policy: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'last_modified_time': {'readonly': True},
'lease_status': {'readonly': True},
'lease_state': {'readonly': True},
'lease_duration': {'readonly': True},
'immutability_policy': {'readonly': True},
'legal_hold': {'readonly': True},
'has_legal_hold': {'readonly': True},
'has_immutability_policy': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'public_access': {'key': 'properties.publicAccess', 'type': 'str'},
'last_modified_time': {'key': 'properties.lastModifiedTime', 'type': 'iso-8601'},
'lease_status': {'key': 'properties.leaseStatus', 'type': 'str'},
'lease_state': {'key': 'properties.leaseState', 'type': 'str'},
'lease_duration': {'key': 'properties.leaseDuration', 'type': 'str'},
'metadata': {'key': 'properties.metadata', 'type': '{str}'},
'immutability_policy': {'key': 'properties.immutabilityPolicy', 'type': 'ImmutabilityPolicyProperties'},
'legal_hold': {'key': 'properties.legalHold', 'type': 'LegalHoldProperties'},
'has_legal_hold': {'key': 'properties.hasLegalHold', 'type': 'bool'},
'has_immutability_policy': {'key': 'properties.hasImmutabilityPolicy', 'type': 'bool'},
}
def __init__(
self,
*,
public_access: Optional[Union[str, "PublicAccess"]] = None,
metadata: Optional[Dict[str, str]] = None,
**kwargs
):
super(BlobContainer, self).__init__(**kwargs)
self.public_access = public_access
self.last_modified_time = None
self.lease_status = None
self.lease_state = None
self.lease_duration = None
self.metadata = metadata
self.immutability_policy = None
self.legal_hold = None
self.has_legal_hold = None
self.has_immutability_policy = None
class CheckNameAvailabilityResult(msrest.serialization.Model):
"""The CheckNameAvailability operation response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name_available: Gets a boolean value that indicates whether the name is available for you
to use. If true, the name is available. If false, the name has already been taken or is invalid
and cannot be used.
:vartype name_available: bool
:ivar reason: Gets the reason that a storage account name could not be used. The Reason element
is only returned if NameAvailable is false. Possible values include: "AccountNameInvalid",
"AlreadyExists".
:vartype reason: str or ~azure.mgmt.storage.v2018_02_01.models.Reason
:ivar message: Gets an error message explaining the Reason value in more detail.
:vartype message: str
"""
_validation = {
'name_available': {'readonly': True},
'reason': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'name_available': {'key': 'nameAvailable', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CheckNameAvailabilityResult, self).__init__(**kwargs)
self.name_available = None
self.reason = None
self.message = None
class CustomDomain(msrest.serialization.Model):
"""The custom domain assigned to this storage account. This can be set via Update.
All required parameters must be populated in order to send to Azure.
:param name: Required. Gets or sets the custom domain name assigned to the storage account.
Name is the CNAME source.
:type name: str
:param use_sub_domain_name: Indicates whether indirect CName validation is enabled. Default
value is false. This should only be set on updates.
:type use_sub_domain_name: bool
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'use_sub_domain_name': {'key': 'useSubDomainName', 'type': 'bool'},
}
def __init__(
self,
*,
name: str,
use_sub_domain_name: Optional[bool] = None,
**kwargs
):
super(CustomDomain, self).__init__(**kwargs)
self.name = name
self.use_sub_domain_name = use_sub_domain_name
class Dimension(msrest.serialization.Model):
"""Dimension of blobs, possibly be blob type or access tier.
:param name: Display name of dimension.
:type name: str
:param display_name: Display name of dimension.
:type display_name: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display_name: Optional[str] = None,
**kwargs
):
super(Dimension, self).__init__(**kwargs)
self.name = name
self.display_name = display_name
class Encryption(msrest.serialization.Model):
"""The encryption settings on the storage account.
All required parameters must be populated in order to send to Azure.
:param services: List of services which support encryption.
:type services: ~azure.mgmt.storage.v2018_02_01.models.EncryptionServices
:param key_source: Required. The encryption keySource (provider). Possible values
(case-insensitive): Microsoft.Storage, Microsoft.Keyvault. Possible values include:
"Microsoft.Storage", "Microsoft.Keyvault". Default value: "Microsoft.Storage".
:type key_source: str or ~azure.mgmt.storage.v2018_02_01.models.KeySource
:param key_vault_properties: Properties provided by key vault.
:type key_vault_properties: ~azure.mgmt.storage.v2018_02_01.models.KeyVaultProperties
"""
_validation = {
'key_source': {'required': True},
}
_attribute_map = {
'services': {'key': 'services', 'type': 'EncryptionServices'},
'key_source': {'key': 'keySource', 'type': 'str'},
'key_vault_properties': {'key': 'keyvaultproperties', 'type': 'KeyVaultProperties'},
}
def __init__(
self,
*,
key_source: Union[str, "KeySource"] = "Microsoft.Storage",
services: Optional["EncryptionServices"] = None,
key_vault_properties: Optional["KeyVaultProperties"] = None,
**kwargs
):
super(Encryption, self).__init__(**kwargs)
self.services = services
self.key_source = key_source
self.key_vault_properties = key_vault_properties
class EncryptionService(msrest.serialization.Model):
"""A service that allows server-side encryption to be used.
Variables are only populated by the server, and will be ignored when sending a request.
:param enabled: A boolean indicating whether or not the service encrypts the data as it is
stored.
:type enabled: bool
:ivar last_enabled_time: Gets a rough estimate of the date/time when the encryption was last
enabled by the user. Only returned when encryption is enabled. There might be some unencrypted
blobs which were written after this time, as it is just a rough estimate.
:vartype last_enabled_time: ~datetime.datetime
"""
_validation = {
'last_enabled_time': {'readonly': True},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'last_enabled_time': {'key': 'lastEnabledTime', 'type': 'iso-8601'},
}
def __init__(
self,
*,
enabled: Optional[bool] = None,
**kwargs
):
super(EncryptionService, self).__init__(**kwargs)
self.enabled = enabled
self.last_enabled_time = None
class EncryptionServices(msrest.serialization.Model):
"""A list of services that support encryption.
Variables are only populated by the server, and will be ignored when sending a request.
:param blob: The encryption function of the blob storage service.
:type blob: ~azure.mgmt.storage.v2018_02_01.models.EncryptionService
:param file: The encryption function of the file storage service.
:type file: ~azure.mgmt.storage.v2018_02_01.models.EncryptionService
:ivar table: The encryption function of the table storage service.
:vartype table: ~azure.mgmt.storage.v2018_02_01.models.EncryptionService
:ivar queue: The encryption function of the queue storage service.
:vartype queue: ~azure.mgmt.storage.v2018_02_01.models.EncryptionService
"""
_validation = {
'table': {'readonly': True},
'queue': {'readonly': True},
}
_attribute_map = {
'blob': {'key': 'blob', 'type': 'EncryptionService'},
'file': {'key': 'file', 'type': 'EncryptionService'},
'table': {'key': 'table', 'type': 'EncryptionService'},
'queue': {'key': 'queue', 'type': 'EncryptionService'},
}
def __init__(
self,
*,
blob: Optional["EncryptionService"] = None,
file: Optional["EncryptionService"] = None,
**kwargs
):
super(EncryptionServices, self).__init__(**kwargs)
self.blob = blob
self.file = file
self.table = None
self.queue = None
class Endpoints(msrest.serialization.Model):
"""The URIs that are used to perform a retrieval of a public blob, queue, table, web or dfs object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar blob: Gets the blob endpoint.
:vartype blob: str
:ivar queue: Gets the queue endpoint.
:vartype queue: str
:ivar table: Gets the table endpoint.
:vartype table: str
:ivar file: Gets the file endpoint.
:vartype file: str
:ivar web: Gets the web endpoint.
:vartype web: str
:ivar dfs: Gets the dfs endpoint.
:vartype dfs: str
"""
_validation = {
'blob': {'readonly': True},
'queue': {'readonly': True},
'table': {'readonly': True},
'file': {'readonly': True},
'web': {'readonly': True},
'dfs': {'readonly': True},
}
_attribute_map = {
'blob': {'key': 'blob', 'type': 'str'},
'queue': {'key': 'queue', 'type': 'str'},
'table': {'key': 'table', 'type': 'str'},
'file': {'key': 'file', 'type': 'str'},
'web': {'key': 'web', 'type': 'str'},
'dfs': {'key': 'dfs', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Endpoints, self).__init__(**kwargs)
self.blob = None
self.queue = None
self.table = None
self.file = None
self.web = None
self.dfs = None
class Identity(msrest.serialization.Model):
"""Identity for the resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar principal_id: The principal ID of resource identity.
:vartype principal_id: str
:ivar tenant_id: The tenant ID of resource.
:vartype tenant_id: str
:ivar type: Required. The identity type. Default value: "SystemAssigned".
:vartype type: str
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
'type': {'required': True, 'constant': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
type = "SystemAssigned"
def __init__(
self,
**kwargs
):
super(Identity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
class ImmutabilityPolicy(AzureEntityResource):
"""The ImmutabilityPolicy property of a blob container, including Id, resource name, resource type, Etag.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar etag: Resource Etag.
:vartype etag: str
:param immutability_period_since_creation_in_days: Required. The immutability period for the
blobs in the container since the policy creation, in days.
:type immutability_period_since_creation_in_days: int
:ivar state: The ImmutabilityPolicy state of a blob container, possible values include: Locked
and Unlocked. Possible values include: "Locked", "Unlocked".
:vartype state: str or ~azure.mgmt.storage.v2018_02_01.models.ImmutabilityPolicyState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'immutability_period_since_creation_in_days': {'required': True},
'state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'immutability_period_since_creation_in_days': {'key': 'properties.immutabilityPeriodSinceCreationInDays', 'type': 'int'},
'state': {'key': 'properties.state', 'type': 'str'},
}
def __init__(
self,
*,
immutability_period_since_creation_in_days: int,
**kwargs
):
super(ImmutabilityPolicy, self).__init__(**kwargs)
self.immutability_period_since_creation_in_days = immutability_period_since_creation_in_days
self.state = None
class ImmutabilityPolicyProperties(msrest.serialization.Model):
"""The properties of an ImmutabilityPolicy of a blob container.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar etag: ImmutabilityPolicy Etag.
:vartype etag: str
:ivar update_history: The ImmutabilityPolicy update history of the blob container.
:vartype update_history: list[~azure.mgmt.storage.v2018_02_01.models.UpdateHistoryProperty]
:param immutability_period_since_creation_in_days: The immutability period for the blobs in the
container since the policy creation, in days.
:type immutability_period_since_creation_in_days: int
:ivar state: The ImmutabilityPolicy state of a blob container, possible values include: Locked
and Unlocked. Possible values include: "Locked", "Unlocked".
:vartype state: str or ~azure.mgmt.storage.v2018_02_01.models.ImmutabilityPolicyState
"""
_validation = {
'etag': {'readonly': True},
'update_history': {'readonly': True},
'state': {'readonly': True},
}
_attribute_map = {
'etag': {'key': 'etag', 'type': 'str'},
'update_history': {'key': 'updateHistory', 'type': '[UpdateHistoryProperty]'},
'immutability_period_since_creation_in_days': {'key': 'properties.immutabilityPeriodSinceCreationInDays', 'type': 'int'},
'state': {'key': 'properties.state', 'type': 'str'},
}
def __init__(
self,
*,
immutability_period_since_creation_in_days: Optional[int] = None,
**kwargs
):
super(ImmutabilityPolicyProperties, self).__init__(**kwargs)
self.etag = None
self.update_history = None
self.immutability_period_since_creation_in_days = immutability_period_since_creation_in_days
self.state = None
class IPRule(msrest.serialization.Model):
"""IP rule with specific IP or IP range in CIDR format.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param ip_address_or_range: Required. Specifies the IP or IP range in CIDR format. Only IPV4
address is allowed.
:type ip_address_or_range: str
:ivar action: The action of IP ACL rule. Default value: "Allow".
:vartype action: str
"""
_validation = {
'ip_address_or_range': {'required': True},
'action': {'constant': True},
}
_attribute_map = {
'ip_address_or_range': {'key': 'value', 'type': 'str'},
'action': {'key': 'action', 'type': 'str'},
}
action = "Allow"
def __init__(
self,
*,
ip_address_or_range: str,
**kwargs
):
super(IPRule, self).__init__(**kwargs)
self.ip_address_or_range = ip_address_or_range
class KeyVaultProperties(msrest.serialization.Model):
"""Properties of key vault.
:param key_name: The name of KeyVault key.
:type key_name: str
:param key_version: The version of KeyVault key.
:type key_version: str
:param key_vault_uri: The Uri of KeyVault.
:type key_vault_uri: str
"""
_attribute_map = {
'key_name': {'key': 'keyname', 'type': 'str'},
'key_version': {'key': 'keyversion', 'type': 'str'},
'key_vault_uri': {'key': 'keyvaulturi', 'type': 'str'},
}
def __init__(
self,
*,
key_name: Optional[str] = None,
key_version: Optional[str] = None,
key_vault_uri: Optional[str] = None,
**kwargs
):
super(KeyVaultProperties, self).__init__(**kwargs)
self.key_name = key_name
self.key_version = key_version
self.key_vault_uri = key_vault_uri
class LeaseContainerRequest(msrest.serialization.Model):
"""Lease Container request schema.
All required parameters must be populated in order to send to Azure.
:param action: Required. Specifies the lease action. Can be one of the available actions.
Possible values include: "Acquire", "Renew", "Change", "Release", "Break".
:type action: str or ~azure.mgmt.storage.v2018_02_01.models.LeaseContainerRequestAction
:param lease_id: Identifies the lease. Can be specified in any valid GUID string format.
:type lease_id: str
:param break_period: Optional. For a break action, proposed duration the lease should continue
before it is broken, in seconds, between 0 and 60.
:type break_period: int
:param lease_duration: Required for acquire. Specifies the duration of the lease, in seconds,
or negative one (-1) for a lease that never expires.
:type lease_duration: int
:param proposed_lease_id: Optional for acquire, required for change. Proposed lease ID, in a
GUID string format.
:type proposed_lease_id: str
"""
_validation = {
'action': {'required': True},
}
_attribute_map = {
'action': {'key': 'action', 'type': 'str'},
'lease_id': {'key': 'leaseId', 'type': 'str'},
'break_period': {'key': 'breakPeriod', 'type': 'int'},
'lease_duration': {'key': 'leaseDuration', 'type': 'int'},
'proposed_lease_id': {'key': 'proposedLeaseId', 'type': 'str'},
}
def __init__(
self,
*,
action: Union[str, "LeaseContainerRequestAction"],
lease_id: Optional[str] = None,
break_period: Optional[int] = None,
lease_duration: Optional[int] = None,
proposed_lease_id: Optional[str] = None,
**kwargs
):
super(LeaseContainerRequest, self).__init__(**kwargs)
self.action = action
self.lease_id = lease_id
self.break_period = break_period
self.lease_duration = lease_duration
self.proposed_lease_id = proposed_lease_id
class LeaseContainerResponse(msrest.serialization.Model):
"""Lease Container response schema.
:param lease_id: Returned unique lease ID that must be included with any request to delete the
container, or to renew, change, or release the lease.
:type lease_id: str
:param lease_time_seconds: Approximate time remaining in the lease period, in seconds.
:type lease_time_seconds: str
"""
_attribute_map = {
'lease_id': {'key': 'leaseId', 'type': 'str'},
'lease_time_seconds': {'key': 'leaseTimeSeconds', 'type': 'str'},
}
def __init__(
self,
*,
lease_id: Optional[str] = None,
lease_time_seconds: Optional[str] = None,
**kwargs
):
super(LeaseContainerResponse, self).__init__(**kwargs)
self.lease_id = lease_id
self.lease_time_seconds = lease_time_seconds
class LegalHold(msrest.serialization.Model):
"""The LegalHold property of a blob container.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar has_legal_hold: The hasLegalHold public property is set to true by SRP if there are at
least one existing tag. The hasLegalHold public property is set to false by SRP if all existing
legal hold tags are cleared out. There can be a maximum of 1000 blob containers with
hasLegalHold=true for a given account.
:vartype has_legal_hold: bool
:param tags: Required. A set of tags. Each tag should be 3 to 23 alphanumeric characters and is
normalized to lower case at SRP.
:type tags: list[str]
"""
_validation = {
'has_legal_hold': {'readonly': True},
'tags': {'required': True},
}
_attribute_map = {
'has_legal_hold': {'key': 'hasLegalHold', 'type': 'bool'},
'tags': {'key': 'tags', 'type': '[str]'},
}
def __init__(
self,
*,
tags: List[str],
**kwargs
):
super(LegalHold, self).__init__(**kwargs)
self.has_legal_hold = None
self.tags = tags
class LegalHoldProperties(msrest.serialization.Model):
"""The LegalHold property of a blob container.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar has_legal_hold: The hasLegalHold public property is set to true by SRP if there are at
least one existing tag. The hasLegalHold public property is set to false by SRP if all existing
legal hold tags are cleared out. There can be a maximum of 1000 blob containers with
hasLegalHold=true for a given account.
:vartype has_legal_hold: bool
:param tags: A set of tags. The list of LegalHold tags of a blob container.
:type tags: list[~azure.mgmt.storage.v2018_02_01.models.TagProperty]
"""
_validation = {
'has_legal_hold': {'readonly': True},
}
_attribute_map = {
'has_legal_hold': {'key': 'hasLegalHold', 'type': 'bool'},
'tags': {'key': 'tags', 'type': '[TagProperty]'},
}
def __init__(
self,
*,
tags: Optional[List["TagProperty"]] = None,
**kwargs
):
super(LegalHoldProperties, self).__init__(**kwargs)
self.has_legal_hold = None
self.tags = tags
class ListAccountSasResponse(msrest.serialization.Model):
"""The List SAS credentials operation response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar account_sas_token: List SAS credentials of storage account.
:vartype account_sas_token: str
"""
_validation = {
'account_sas_token': {'readonly': True},
}
_attribute_map = {
'account_sas_token': {'key': 'accountSasToken', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ListAccountSasResponse, self).__init__(**kwargs)
self.account_sas_token = None
class ListContainerItem(AzureEntityResource):
"""The blob container properties be listed out.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar etag: Resource Etag.
:vartype etag: str
:param public_access: Specifies whether data in the container may be accessed publicly and the
level of access. Possible values include: "Container", "Blob", "None".
:type public_access: str or ~azure.mgmt.storage.v2018_02_01.models.PublicAccess
:ivar last_modified_time: Returns the date and time the container was last modified.
:vartype last_modified_time: ~datetime.datetime
:ivar lease_status: The lease status of the container. Possible values include: "Locked",
"Unlocked".
:vartype lease_status: str or ~azure.mgmt.storage.v2018_02_01.models.LeaseStatus
:ivar lease_state: Lease state of the container. Possible values include: "Available",
"Leased", "Expired", "Breaking", "Broken".
:vartype lease_state: str or ~azure.mgmt.storage.v2018_02_01.models.LeaseState
:ivar lease_duration: Specifies whether the lease on a container is of infinite or fixed
duration, only when the container is leased. Possible values include: "Infinite", "Fixed".
:vartype lease_duration: str or ~azure.mgmt.storage.v2018_02_01.models.LeaseDuration
:param metadata: A name-value pair to associate with the container as metadata.
:type metadata: dict[str, str]
:ivar immutability_policy: The ImmutabilityPolicy property of the container.
:vartype immutability_policy:
~azure.mgmt.storage.v2018_02_01.models.ImmutabilityPolicyProperties
:ivar legal_hold: The LegalHold property of the container.
:vartype legal_hold: ~azure.mgmt.storage.v2018_02_01.models.LegalHoldProperties
:ivar has_legal_hold: The hasLegalHold public property is set to true by SRP if there are at
least one existing tag. The hasLegalHold public property is set to false by SRP if all existing
legal hold tags are cleared out. There can be a maximum of 1000 blob containers with
hasLegalHold=true for a given account.
:vartype has_legal_hold: bool
:ivar has_immutability_policy: The hasImmutabilityPolicy public property is set to true by SRP
if ImmutabilityPolicy has been created for this container. The hasImmutabilityPolicy public
property is set to false by SRP if ImmutabilityPolicy has not been created for this container.
:vartype has_immutability_policy: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'last_modified_time': {'readonly': True},
'lease_status': {'readonly': True},
'lease_state': {'readonly': True},
'lease_duration': {'readonly': True},
'immutability_policy': {'readonly': True},
'legal_hold': {'readonly': True},
'has_legal_hold': {'readonly': True},
'has_immutability_policy': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'public_access': {'key': 'properties.publicAccess', 'type': 'str'},
'last_modified_time': {'key': 'properties.lastModifiedTime', 'type': 'iso-8601'},
'lease_status': {'key': 'properties.leaseStatus', 'type': 'str'},
'lease_state': {'key': 'properties.leaseState', 'type': 'str'},
'lease_duration': {'key': 'properties.leaseDuration', 'type': 'str'},
'metadata': {'key': 'properties.metadata', 'type': '{str}'},
'immutability_policy': {'key': 'properties.immutabilityPolicy', 'type': 'ImmutabilityPolicyProperties'},
'legal_hold': {'key': 'properties.legalHold', 'type': 'LegalHoldProperties'},
'has_legal_hold': {'key': 'properties.hasLegalHold', 'type': 'bool'},
'has_immutability_policy': {'key': 'properties.hasImmutabilityPolicy', 'type': 'bool'},
}
def __init__(
self,
*,
public_access: Optional[Union[str, "PublicAccess"]] = None,
metadata: Optional[Dict[str, str]] = None,
**kwargs
):
super(ListContainerItem, self).__init__(**kwargs)
self.public_access = public_access
self.last_modified_time = None
self.lease_status = None
self.lease_state = None
self.lease_duration = None
self.metadata = metadata
self.immutability_policy = None
self.legal_hold = None
self.has_legal_hold = None
self.has_immutability_policy = None
class ListContainerItems(msrest.serialization.Model):
"""The list of blob containers.
:param value: The list of blob containers.
:type value: list[~azure.mgmt.storage.v2018_02_01.models.ListContainerItem]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ListContainerItem]'},
}
def __init__(
self,
*,
value: Optional[List["ListContainerItem"]] = None,
**kwargs
):
super(ListContainerItems, self).__init__(**kwargs)
self.value = value
class ListServiceSasResponse(msrest.serialization.Model):
"""The List service SAS credentials operation response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar service_sas_token: List service SAS credentials of specific resource.
:vartype service_sas_token: str
"""
_validation = {
'service_sas_token': {'readonly': True},
}
_attribute_map = {
'service_sas_token': {'key': 'serviceSasToken', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ListServiceSasResponse, self).__init__(**kwargs)
self.service_sas_token = None
class MetricSpecification(msrest.serialization.Model):
"""Metric specification of operation.
:param name: Name of metric specification.
:type name: str
:param display_name: Display name of metric specification.
:type display_name: str
:param display_description: Display description of metric specification.
:type display_description: str
:param unit: Unit could be Bytes or Count.
:type unit: str
:param dimensions: Dimensions of blobs, including blob type and access tier.
:type dimensions: list[~azure.mgmt.storage.v2018_02_01.models.Dimension]
:param aggregation_type: Aggregation type could be Average.
:type aggregation_type: str
:param fill_gap_with_zero: The property to decide fill gap with zero or not.
:type fill_gap_with_zero: bool
:param category: The category this metric specification belong to, could be Capacity.
:type category: str
:param resource_id_dimension_name_override: Account Resource Id.
:type resource_id_dimension_name_override: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'display_description': {'key': 'displayDescription', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'dimensions': {'key': 'dimensions', 'type': '[Dimension]'},
'aggregation_type': {'key': 'aggregationType', 'type': 'str'},
'fill_gap_with_zero': {'key': 'fillGapWithZero', 'type': 'bool'},
'category': {'key': 'category', 'type': 'str'},
'resource_id_dimension_name_override': {'key': 'resourceIdDimensionNameOverride', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display_name: Optional[str] = None,
display_description: Optional[str] = None,
unit: Optional[str] = None,
dimensions: Optional[List["Dimension"]] = None,
aggregation_type: Optional[str] = None,
fill_gap_with_zero: Optional[bool] = None,
category: Optional[str] = None,
resource_id_dimension_name_override: Optional[str] = None,
**kwargs
):
super(MetricSpecification, self).__init__(**kwargs)
self.name = name
self.display_name = display_name
self.display_description = display_description
self.unit = unit
self.dimensions = dimensions
self.aggregation_type = aggregation_type
self.fill_gap_with_zero = fill_gap_with_zero
self.category = category
self.resource_id_dimension_name_override = resource_id_dimension_name_override
class NetworkRuleSet(msrest.serialization.Model):
"""Network rule set.
All required parameters must be populated in order to send to Azure.
:param bypass: Specifies whether traffic is bypassed for Logging/Metrics/AzureServices.
Possible values are any combination of Logging|Metrics|AzureServices (For example, "Logging,
Metrics"), or None to bypass none of those traffics. Possible values include: "None",
"Logging", "Metrics", "AzureServices". Default value: "AzureServices".
:type bypass: str or ~azure.mgmt.storage.v2018_02_01.models.Bypass
:param virtual_network_rules: Sets the virtual network rules.
:type virtual_network_rules: list[~azure.mgmt.storage.v2018_02_01.models.VirtualNetworkRule]
:param ip_rules: Sets the IP ACL rules.
:type ip_rules: list[~azure.mgmt.storage.v2018_02_01.models.IPRule]
:param default_action: Required. Specifies the default action of allow or deny when no other
rules match. Possible values include: "Allow", "Deny". Default value: "Allow".
:type default_action: str or ~azure.mgmt.storage.v2018_02_01.models.DefaultAction
"""
_validation = {
'default_action': {'required': True},
}
_attribute_map = {
'bypass': {'key': 'bypass', 'type': 'str'},
'virtual_network_rules': {'key': 'virtualNetworkRules', 'type': '[VirtualNetworkRule]'},
'ip_rules': {'key': 'ipRules', 'type': '[IPRule]'},
'default_action': {'key': 'defaultAction', 'type': 'str'},
}
def __init__(
self,
*,
default_action: Union[str, "DefaultAction"] = "Allow",
bypass: Optional[Union[str, "Bypass"]] = "AzureServices",
virtual_network_rules: Optional[List["VirtualNetworkRule"]] = None,
ip_rules: Optional[List["IPRule"]] = None,
**kwargs
):
super(NetworkRuleSet, self).__init__(**kwargs)
self.bypass = bypass
self.virtual_network_rules = virtual_network_rules
self.ip_rules = ip_rules
self.default_action = default_action
class Operation(msrest.serialization.Model):
"""Storage REST API operation definition.
:param name: Operation name: {provider}/{resource}/{operation}.
:type name: str
:param display: Display metadata associated with the operation.
:type display: ~azure.mgmt.storage.v2018_02_01.models.OperationDisplay
:param origin: The origin of operations.
:type origin: str
:param service_specification: One property of operation, include metric specifications.
:type service_specification: ~azure.mgmt.storage.v2018_02_01.models.ServiceSpecification
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
'service_specification': {'key': 'properties.serviceSpecification', 'type': 'ServiceSpecification'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display: Optional["OperationDisplay"] = None,
origin: Optional[str] = None,
service_specification: Optional["ServiceSpecification"] = None,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = name
self.display = display
self.origin = origin
self.service_specification = service_specification
class OperationDisplay(msrest.serialization.Model):
"""Display metadata associated with the operation.
:param provider: Service provider: Microsoft Storage.
:type provider: str
:param resource: Resource on which the operation is performed etc.
:type resource: str
:param operation: Type of operation: get, read, delete, etc.
:type operation: str
:param description: Description of the operation.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
provider: Optional[str] = None,
resource: Optional[str] = None,
operation: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
class OperationListResult(msrest.serialization.Model):
"""Result of the request to list Storage operations. It contains a list of operations and a URL link to get the next set of results.
:param value: List of Storage operations supported by the Storage resource provider.
:type value: list[~azure.mgmt.storage.v2018_02_01.models.Operation]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
}
def __init__(
self,
*,
value: Optional[List["Operation"]] = None,
**kwargs
):
super(OperationListResult, self).__init__(**kwargs)
self.value = value
class Restriction(msrest.serialization.Model):
"""The restriction because of which SKU cannot be used.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The type of restrictions. As of now only possible value for this is location.
:vartype type: str
:ivar values: The value of restrictions. If the restriction type is set to location. This would
be different locations where the SKU is restricted.
:vartype values: list[str]
:param reason_code: The reason for the restriction. As of now this can be "QuotaId" or
"NotAvailableForSubscription". Quota Id is set when the SKU has requiredQuotas parameter as the
subscription does not belong to that quota. The "NotAvailableForSubscription" is related to
capacity at DC. Possible values include: "QuotaId", "NotAvailableForSubscription".
:type reason_code: str or ~azure.mgmt.storage.v2018_02_01.models.ReasonCode
"""
_validation = {
'type': {'readonly': True},
'values': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'values': {'key': 'values', 'type': '[str]'},
'reason_code': {'key': 'reasonCode', 'type': 'str'},
}
def __init__(
self,
*,
reason_code: Optional[Union[str, "ReasonCode"]] = None,
**kwargs
):
super(Restriction, self).__init__(**kwargs)
self.type = None
self.values = None
self.reason_code = reason_code
class ServiceSasParameters(msrest.serialization.Model):
"""The parameters to list service SAS credentials of a specific resource.
All required parameters must be populated in order to send to Azure.
:param canonicalized_resource: Required. The canonical path to the signed resource.
:type canonicalized_resource: str
:param resource: The signed services accessible with the service SAS. Possible values include:
Blob (b), Container (c), File (f), Share (s). Possible values include: "b", "c", "f", "s".
:type resource: str or ~azure.mgmt.storage.v2018_02_01.models.SignedResource
:param permissions: The signed permissions for the service SAS. Possible values include: Read
(r), Write (w), Delete (d), List (l), Add (a), Create (c), Update (u) and Process (p). Possible
values include: "r", "d", "w", "l", "a", "c", "u", "p".
:type permissions: str or ~azure.mgmt.storage.v2018_02_01.models.Permissions
:param ip_address_or_range: An IP address or a range of IP addresses from which to accept
requests.
:type ip_address_or_range: str
:param protocols: The protocol permitted for a request made with the account SAS. Possible
values include: "https,http", "https".
:type protocols: str or ~azure.mgmt.storage.v2018_02_01.models.HttpProtocol
:param shared_access_start_time: The time at which the SAS becomes valid.
:type shared_access_start_time: ~datetime.datetime
:param shared_access_expiry_time: The time at which the shared access signature becomes
invalid.
:type shared_access_expiry_time: ~datetime.datetime
:param identifier: A unique value up to 64 characters in length that correlates to an access
policy specified for the container, queue, or table.
:type identifier: str
:param partition_key_start: The start of partition key.
:type partition_key_start: str
:param partition_key_end: The end of partition key.
:type partition_key_end: str
:param row_key_start: The start of row key.
:type row_key_start: str
:param row_key_end: The end of row key.
:type row_key_end: str
:param key_to_sign: The key to sign the account SAS token with.
:type key_to_sign: str
:param cache_control: The response header override for cache control.
:type cache_control: str
:param content_disposition: The response header override for content disposition.
:type content_disposition: str
:param content_encoding: The response header override for content encoding.
:type content_encoding: str
:param content_language: The response header override for content language.
:type content_language: str
:param content_type: The response header override for content type.
:type content_type: str
"""
_validation = {
'canonicalized_resource': {'required': True},
'identifier': {'max_length': 64, 'min_length': 0},
}
_attribute_map = {
'canonicalized_resource': {'key': 'canonicalizedResource', 'type': 'str'},
'resource': {'key': 'signedResource', 'type': 'str'},
'permissions': {'key': 'signedPermission', 'type': 'str'},
'ip_address_or_range': {'key': 'signedIp', 'type': 'str'},
'protocols': {'key': 'signedProtocol', 'type': 'str'},
'shared_access_start_time': {'key': 'signedStart', 'type': 'iso-8601'},
'shared_access_expiry_time': {'key': 'signedExpiry', 'type': 'iso-8601'},
'identifier': {'key': 'signedIdentifier', 'type': 'str'},
'partition_key_start': {'key': 'startPk', 'type': 'str'},
'partition_key_end': {'key': 'endPk', 'type': 'str'},
'row_key_start': {'key': 'startRk', 'type': 'str'},
'row_key_end': {'key': 'endRk', 'type': 'str'},
'key_to_sign': {'key': 'keyToSign', 'type': 'str'},
'cache_control': {'key': 'rscc', 'type': 'str'},
'content_disposition': {'key': 'rscd', 'type': 'str'},
'content_encoding': {'key': 'rsce', 'type': 'str'},
'content_language': {'key': 'rscl', 'type': 'str'},
'content_type': {'key': 'rsct', 'type': 'str'},
}
def __init__(
self,
*,
canonicalized_resource: str,
resource: Optional[Union[str, "SignedResource"]] = None,
permissions: Optional[Union[str, "Permissions"]] = None,
ip_address_or_range: Optional[str] = None,
protocols: Optional[Union[str, "HttpProtocol"]] = None,
shared_access_start_time: Optional[datetime.datetime] = None,
shared_access_expiry_time: Optional[datetime.datetime] = None,
identifier: Optional[str] = None,
partition_key_start: Optional[str] = None,
partition_key_end: Optional[str] = None,
row_key_start: Optional[str] = None,
row_key_end: Optional[str] = None,
key_to_sign: Optional[str] = None,
cache_control: Optional[str] = None,
content_disposition: Optional[str] = None,
content_encoding: Optional[str] = None,
content_language: Optional[str] = None,
content_type: Optional[str] = None,
**kwargs
):
super(ServiceSasParameters, self).__init__(**kwargs)
self.canonicalized_resource = canonicalized_resource
self.resource = resource
self.permissions = permissions
self.ip_address_or_range = ip_address_or_range
self.protocols = protocols
self.shared_access_start_time = shared_access_start_time
self.shared_access_expiry_time = shared_access_expiry_time
self.identifier = identifier
self.partition_key_start = partition_key_start
self.partition_key_end = partition_key_end
self.row_key_start = row_key_start
self.row_key_end = row_key_end
self.key_to_sign = key_to_sign
self.cache_control = cache_control
self.content_disposition = content_disposition
self.content_encoding = content_encoding
self.content_language = content_language
self.content_type = content_type
class ServiceSpecification(msrest.serialization.Model):
"""One property of operation, include metric specifications.
:param metric_specifications: Metric specifications of operation.
:type metric_specifications: list[~azure.mgmt.storage.v2018_02_01.models.MetricSpecification]
"""
_attribute_map = {
'metric_specifications': {'key': 'metricSpecifications', 'type': '[MetricSpecification]'},
}
def __init__(
self,
*,
metric_specifications: Optional[List["MetricSpecification"]] = None,
**kwargs
):
super(ServiceSpecification, self).__init__(**kwargs)
self.metric_specifications = metric_specifications
class Sku(msrest.serialization.Model):
"""The SKU of the storage account.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param name: Required. Gets or sets the sku name. Required for account creation; optional for
update. Note that in older versions, sku name was called accountType. Possible values include:
"Standard_LRS", "Standard_GRS", "Standard_RAGRS", "Standard_ZRS", "Premium_LRS".
:type name: str or ~azure.mgmt.storage.v2018_02_01.models.SkuName
:ivar tier: Gets the sku tier. This is based on the SKU name. Possible values include:
"Standard", "Premium".
:vartype tier: str or ~azure.mgmt.storage.v2018_02_01.models.SkuTier
:ivar resource_type: The type of the resource, usually it is 'storageAccounts'.
:vartype resource_type: str
:ivar kind: Indicates the type of storage account. Possible values include: "Storage",
"StorageV2", "BlobStorage".
:vartype kind: str or ~azure.mgmt.storage.v2018_02_01.models.Kind
:ivar locations: The set of locations that the SKU is available. This will be supported and
registered Azure Geo Regions (e.g. West US, East US, Southeast Asia, etc.).
:vartype locations: list[str]
:ivar capabilities: The capability information in the specified sku, including file encryption,
network acls, change notification, etc.
:vartype capabilities: list[~azure.mgmt.storage.v2018_02_01.models.SKUCapability]
:param restrictions: The restrictions because of which SKU cannot be used. This is empty if
there are no restrictions.
:type restrictions: list[~azure.mgmt.storage.v2018_02_01.models.Restriction]
"""
_validation = {
'name': {'required': True},
'tier': {'readonly': True},
'resource_type': {'readonly': True},
'kind': {'readonly': True},
'locations': {'readonly': True},
'capabilities': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'resource_type': {'key': 'resourceType', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'locations': {'key': 'locations', 'type': '[str]'},
'capabilities': {'key': 'capabilities', 'type': '[SKUCapability]'},
'restrictions': {'key': 'restrictions', 'type': '[Restriction]'},
}
def __init__(
self,
*,
name: Union[str, "SkuName"],
restrictions: Optional[List["Restriction"]] = None,
**kwargs
):
super(Sku, self).__init__(**kwargs)
self.name = name
self.tier = None
self.resource_type = None
self.kind = None
self.locations = None
self.capabilities = None
self.restrictions = restrictions
class SKUCapability(msrest.serialization.Model):
"""The capability information in the specified sku, including file encryption, network acls, change notification, etc.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of capability, The capability information in the specified sku, including
file encryption, network acls, change notification, etc.
:vartype name: str
:ivar value: A string value to indicate states of given capability. Possibly 'true' or 'false'.
:vartype value: str
"""
_validation = {
'name': {'readonly': True},
'value': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SKUCapability, self).__init__(**kwargs)
self.name = None
self.value = None
class TrackedResource(Resource):
"""The resource model definition for an Azure Resource Manager tracked top level resource which has 'tags' and a 'location'.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(TrackedResource, self).__init__(**kwargs)
self.tags = tags
self.location = location
class StorageAccount(TrackedResource):
"""The storage account.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
:ivar sku: Gets the SKU.
:vartype sku: ~azure.mgmt.storage.v2018_02_01.models.Sku
:ivar kind: Gets the Kind. Possible values include: "Storage", "StorageV2", "BlobStorage".
:vartype kind: str or ~azure.mgmt.storage.v2018_02_01.models.Kind
:param identity: The identity of the resource.
:type identity: ~azure.mgmt.storage.v2018_02_01.models.Identity
:ivar provisioning_state: Gets the status of the storage account at the time the operation was
called. Possible values include: "Creating", "ResolvingDNS", "Succeeded".
:vartype provisioning_state: str or ~azure.mgmt.storage.v2018_02_01.models.ProvisioningState
:ivar primary_endpoints: Gets the URLs that are used to perform a retrieval of a public blob,
queue, or table object. Note that Standard_ZRS and Premium_LRS accounts only return the blob
endpoint.
:vartype primary_endpoints: ~azure.mgmt.storage.v2018_02_01.models.Endpoints
:ivar primary_location: Gets the location of the primary data center for the storage account.
:vartype primary_location: str
:ivar status_of_primary: Gets the status indicating whether the primary location of the storage
account is available or unavailable. Possible values include: "available", "unavailable".
:vartype status_of_primary: str or ~azure.mgmt.storage.v2018_02_01.models.AccountStatus
:ivar last_geo_failover_time: Gets the timestamp of the most recent instance of a failover to
the secondary location. Only the most recent timestamp is retained. This element is not
returned if there has never been a failover instance. Only available if the accountType is
Standard_GRS or Standard_RAGRS.
:vartype last_geo_failover_time: ~datetime.datetime
:ivar secondary_location: Gets the location of the geo-replicated secondary for the storage
account. Only available if the accountType is Standard_GRS or Standard_RAGRS.
:vartype secondary_location: str
:ivar status_of_secondary: Gets the status indicating whether the secondary location of the
storage account is available or unavailable. Only available if the SKU name is Standard_GRS or
Standard_RAGRS. Possible values include: "available", "unavailable".
:vartype status_of_secondary: str or ~azure.mgmt.storage.v2018_02_01.models.AccountStatus
:ivar creation_time: Gets the creation date and time of the storage account in UTC.
:vartype creation_time: ~datetime.datetime
:ivar custom_domain: Gets the custom domain the user assigned to this storage account.
:vartype custom_domain: ~azure.mgmt.storage.v2018_02_01.models.CustomDomain
:ivar secondary_endpoints: Gets the URLs that are used to perform a retrieval of a public blob,
queue, or table object from the secondary location of the storage account. Only available if
the SKU name is Standard_RAGRS.
:vartype secondary_endpoints: ~azure.mgmt.storage.v2018_02_01.models.Endpoints
:ivar encryption: Gets the encryption settings on the account. If unspecified, the account is
unencrypted.
:vartype encryption: ~azure.mgmt.storage.v2018_02_01.models.Encryption
:ivar access_tier: Required for storage accounts where kind = BlobStorage. The access tier used
for billing. Possible values include: "Hot", "Cool".
:vartype access_tier: str or ~azure.mgmt.storage.v2018_02_01.models.AccessTier
:param enable_https_traffic_only: Allows https traffic only to storage service if sets to true.
:type enable_https_traffic_only: bool
:ivar network_rule_set: Network rule set.
:vartype network_rule_set: ~azure.mgmt.storage.v2018_02_01.models.NetworkRuleSet
:param is_hns_enabled: Account HierarchicalNamespace enabled if sets to true.
:type is_hns_enabled: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'sku': {'readonly': True},
'kind': {'readonly': True},
'provisioning_state': {'readonly': True},
'primary_endpoints': {'readonly': True},
'primary_location': {'readonly': True},
'status_of_primary': {'readonly': True},
'last_geo_failover_time': {'readonly': True},
'secondary_location': {'readonly': True},
'status_of_secondary': {'readonly': True},
'creation_time': {'readonly': True},
'custom_domain': {'readonly': True},
'secondary_endpoints': {'readonly': True},
'encryption': {'readonly': True},
'access_tier': {'readonly': True},
'network_rule_set': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'kind': {'key': 'kind', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'Identity'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'primary_endpoints': {'key': 'properties.primaryEndpoints', 'type': 'Endpoints'},
'primary_location': {'key': 'properties.primaryLocation', 'type': 'str'},
'status_of_primary': {'key': 'properties.statusOfPrimary', 'type': 'str'},
'last_geo_failover_time': {'key': 'properties.lastGeoFailoverTime', 'type': 'iso-8601'},
'secondary_location': {'key': 'properties.secondaryLocation', 'type': 'str'},
'status_of_secondary': {'key': 'properties.statusOfSecondary', 'type': 'str'},
'creation_time': {'key': 'properties.creationTime', 'type': 'iso-8601'},
'custom_domain': {'key': 'properties.customDomain', 'type': 'CustomDomain'},
'secondary_endpoints': {'key': 'properties.secondaryEndpoints', 'type': 'Endpoints'},
'encryption': {'key': 'properties.encryption', 'type': 'Encryption'},
'access_tier': {'key': 'properties.accessTier', 'type': 'str'},
'enable_https_traffic_only': {'key': 'properties.supportsHttpsTrafficOnly', 'type': 'bool'},
'network_rule_set': {'key': 'properties.networkAcls', 'type': 'NetworkRuleSet'},
'is_hns_enabled': {'key': 'properties.isHnsEnabled', 'type': 'bool'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
identity: Optional["Identity"] = None,
enable_https_traffic_only: Optional[bool] = False,
is_hns_enabled: Optional[bool] = False,
**kwargs
):
super(StorageAccount, self).__init__(tags=tags, location=location, **kwargs)
self.sku = None
self.kind = None
self.identity = identity
self.provisioning_state = None
self.primary_endpoints = None
self.primary_location = None
self.status_of_primary = None
self.last_geo_failover_time = None
self.secondary_location = None
self.status_of_secondary = None
self.creation_time = None
self.custom_domain = None
self.secondary_endpoints = None
self.encryption = None
self.access_tier = None
self.enable_https_traffic_only = enable_https_traffic_only
self.network_rule_set = None
self.is_hns_enabled = is_hns_enabled
class StorageAccountCheckNameAvailabilityParameters(msrest.serialization.Model):
"""The parameters used to check the availability of the storage account name.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param name: Required. The storage account name.
:type name: str
:ivar type: Required. The type of resource, Microsoft.Storage/storageAccounts. Default value:
"Microsoft.Storage/storageAccounts".
:vartype type: str
"""
_validation = {
'name': {'required': True},
'type': {'required': True, 'constant': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
type = "Microsoft.Storage/storageAccounts"
def __init__(
self,
*,
name: str,
**kwargs
):
super(StorageAccountCheckNameAvailabilityParameters, self).__init__(**kwargs)
self.name = name
class StorageAccountCreateParameters(msrest.serialization.Model):
"""The parameters used when creating a storage account.
All required parameters must be populated in order to send to Azure.
:param sku: Required. Required. Gets or sets the sku name.
:type sku: ~azure.mgmt.storage.v2018_02_01.models.Sku
:param kind: Required. Required. Indicates the type of storage account. Possible values
include: "Storage", "StorageV2", "BlobStorage".
:type kind: str or ~azure.mgmt.storage.v2018_02_01.models.Kind
:param location: Required. Required. Gets or sets the location of the resource. This will be
one of the supported and registered Azure Geo Regions (e.g. West US, East US, Southeast Asia,
etc.). The geo region of a resource cannot be changed once it is created, but if an identical
geo region is specified on update, the request will succeed.
:type location: str
:param tags: A set of tags. Gets or sets a list of key value pairs that describe the resource.
These tags can be used for viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key with a length no
greater than 128 characters and a value with a length no greater than 256 characters.
:type tags: dict[str, str]
:param identity: The identity of the resource.
:type identity: ~azure.mgmt.storage.v2018_02_01.models.Identity
:param custom_domain: User domain assigned to the storage account. Name is the CNAME source.
Only one custom domain is supported per storage account at this time. To clear the existing
custom domain, use an empty string for the custom domain name property.
:type custom_domain: ~azure.mgmt.storage.v2018_02_01.models.CustomDomain
:param encryption: Provides the encryption settings on the account. If left unspecified the
account encryption settings will remain the same. The default setting is unencrypted.
:type encryption: ~azure.mgmt.storage.v2018_02_01.models.Encryption
:param network_rule_set: Network rule set.
:type network_rule_set: ~azure.mgmt.storage.v2018_02_01.models.NetworkRuleSet
:param access_tier: Required for storage accounts where kind = BlobStorage. The access tier
used for billing. Possible values include: "Hot", "Cool".
:type access_tier: str or ~azure.mgmt.storage.v2018_02_01.models.AccessTier
:param enable_https_traffic_only: Allows https traffic only to storage service if sets to true.
:type enable_https_traffic_only: bool
:param is_hns_enabled: Account HierarchicalNamespace enabled if sets to true.
:type is_hns_enabled: bool
"""
_validation = {
'sku': {'required': True},
'kind': {'required': True},
'location': {'required': True},
}
_attribute_map = {
'sku': {'key': 'sku', 'type': 'Sku'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'Identity'},
'custom_domain': {'key': 'properties.customDomain', 'type': 'CustomDomain'},
'encryption': {'key': 'properties.encryption', 'type': 'Encryption'},
'network_rule_set': {'key': 'properties.networkAcls', 'type': 'NetworkRuleSet'},
'access_tier': {'key': 'properties.accessTier', 'type': 'str'},
'enable_https_traffic_only': {'key': 'properties.supportsHttpsTrafficOnly', 'type': 'bool'},
'is_hns_enabled': {'key': 'properties.isHnsEnabled', 'type': 'bool'},
}
def __init__(
self,
*,
sku: "Sku",
kind: Union[str, "Kind"],
location: str,
tags: Optional[Dict[str, str]] = None,
identity: Optional["Identity"] = None,
custom_domain: Optional["CustomDomain"] = None,
encryption: Optional["Encryption"] = None,
network_rule_set: Optional["NetworkRuleSet"] = None,
access_tier: Optional[Union[str, "AccessTier"]] = None,
enable_https_traffic_only: Optional[bool] = False,
is_hns_enabled: Optional[bool] = False,
**kwargs
):
super(StorageAccountCreateParameters, self).__init__(**kwargs)
self.sku = sku
self.kind = kind
self.location = location
self.tags = tags
self.identity = identity
self.custom_domain = custom_domain
self.encryption = encryption
self.network_rule_set = network_rule_set
self.access_tier = access_tier
self.enable_https_traffic_only = enable_https_traffic_only
self.is_hns_enabled = is_hns_enabled
class StorageAccountKey(msrest.serialization.Model):
"""An access key for the storage account.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar key_name: Name of the key.
:vartype key_name: str
:ivar value: Base 64-encoded value of the key.
:vartype value: str
:ivar permissions: Permissions for the key -- read-only or full permissions. Possible values
include: "Read", "Full".
:vartype permissions: str or ~azure.mgmt.storage.v2018_02_01.models.KeyPermission
"""
_validation = {
'key_name': {'readonly': True},
'value': {'readonly': True},
'permissions': {'readonly': True},
}
_attribute_map = {
'key_name': {'key': 'keyName', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
'permissions': {'key': 'permissions', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StorageAccountKey, self).__init__(**kwargs)
self.key_name = None
self.value = None
self.permissions = None
class StorageAccountListKeysResult(msrest.serialization.Model):
"""The response from the ListKeys operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar keys: Gets the list of storage account keys and their properties for the specified
storage account.
:vartype keys: list[~azure.mgmt.storage.v2018_02_01.models.StorageAccountKey]
"""
_validation = {
'keys': {'readonly': True},
}
_attribute_map = {
'keys': {'key': 'keys', 'type': '[StorageAccountKey]'},
}
def __init__(
self,
**kwargs
):
super(StorageAccountListKeysResult, self).__init__(**kwargs)
self.keys = None
class StorageAccountListResult(msrest.serialization.Model):
"""The response from the List Storage Accounts operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Gets the list of storage accounts and their properties.
:vartype value: list[~azure.mgmt.storage.v2018_02_01.models.StorageAccount]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[StorageAccount]'},
}
def __init__(
self,
**kwargs
):
super(StorageAccountListResult, self).__init__(**kwargs)
self.value = None
class StorageAccountRegenerateKeyParameters(msrest.serialization.Model):
"""The parameters used to regenerate the storage account key.
All required parameters must be populated in order to send to Azure.
:param key_name: Required. The name of storage keys that want to be regenerated, possible
values are key1, key2.
:type key_name: str
"""
_validation = {
'key_name': {'required': True},
}
_attribute_map = {
'key_name': {'key': 'keyName', 'type': 'str'},
}
def __init__(
self,
*,
key_name: str,
**kwargs
):
super(StorageAccountRegenerateKeyParameters, self).__init__(**kwargs)
self.key_name = key_name
class StorageAccountUpdateParameters(msrest.serialization.Model):
"""The parameters that can be provided when updating the storage account properties.
:param sku: Gets or sets the SKU name. Note that the SKU name cannot be updated to Standard_ZRS
or Premium_LRS, nor can accounts of those sku names be updated to any other value.
:type sku: ~azure.mgmt.storage.v2018_02_01.models.Sku
:param tags: A set of tags. Gets or sets a list of key value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater in
length than 128 characters and a value no greater in length than 256 characters.
:type tags: dict[str, str]
:param identity: The identity of the resource.
:type identity: ~azure.mgmt.storage.v2018_02_01.models.Identity
:param kind: Optional. Indicates the type of storage account. Currently only StorageV2 value
supported by server. Possible values include: "Storage", "StorageV2", "BlobStorage".
:type kind: str or ~azure.mgmt.storage.v2018_02_01.models.Kind
:param custom_domain: Custom domain assigned to the storage account by the user. Name is the
CNAME source. Only one custom domain is supported per storage account at this time. To clear
the existing custom domain, use an empty string for the custom domain name property.
:type custom_domain: ~azure.mgmt.storage.v2018_02_01.models.CustomDomain
:param encryption: Provides the encryption settings on the account. The default setting is
unencrypted.
:type encryption: ~azure.mgmt.storage.v2018_02_01.models.Encryption
:param access_tier: Required for storage accounts where kind = BlobStorage. The access tier
used for billing. Possible values include: "Hot", "Cool".
:type access_tier: str or ~azure.mgmt.storage.v2018_02_01.models.AccessTier
:param enable_https_traffic_only: Allows https traffic only to storage service if sets to true.
:type enable_https_traffic_only: bool
:param network_rule_set: Network rule set.
:type network_rule_set: ~azure.mgmt.storage.v2018_02_01.models.NetworkRuleSet
"""
_attribute_map = {
'sku': {'key': 'sku', 'type': 'Sku'},
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'Identity'},
'kind': {'key': 'kind', 'type': 'str'},
'custom_domain': {'key': 'properties.customDomain', 'type': 'CustomDomain'},
'encryption': {'key': 'properties.encryption', 'type': 'Encryption'},
'access_tier': {'key': 'properties.accessTier', 'type': 'str'},
'enable_https_traffic_only': {'key': 'properties.supportsHttpsTrafficOnly', 'type': 'bool'},
'network_rule_set': {'key': 'properties.networkAcls', 'type': 'NetworkRuleSet'},
}
def __init__(
self,
*,
sku: Optional["Sku"] = None,
tags: Optional[Dict[str, str]] = None,
identity: Optional["Identity"] = None,
kind: Optional[Union[str, "Kind"]] = None,
custom_domain: Optional["CustomDomain"] = None,
encryption: Optional["Encryption"] = None,
access_tier: Optional[Union[str, "AccessTier"]] = None,
enable_https_traffic_only: Optional[bool] = False,
network_rule_set: Optional["NetworkRuleSet"] = None,
**kwargs
):
super(StorageAccountUpdateParameters, self).__init__(**kwargs)
self.sku = sku
self.tags = tags
self.identity = identity
self.kind = kind
self.custom_domain = custom_domain
self.encryption = encryption
self.access_tier = access_tier
self.enable_https_traffic_only = enable_https_traffic_only
self.network_rule_set = network_rule_set
class StorageSkuListResult(msrest.serialization.Model):
"""The response from the List Storage SKUs operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Get the list result of storage SKUs and their properties.
:vartype value: list[~azure.mgmt.storage.v2018_02_01.models.Sku]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Sku]'},
}
def __init__(
self,
**kwargs
):
super(StorageSkuListResult, self).__init__(**kwargs)
self.value = None
class TagProperty(msrest.serialization.Model):
"""A tag of the LegalHold of a blob container.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar tag: The tag value.
:vartype tag: str
:ivar timestamp: Returns the date and time the tag was added.
:vartype timestamp: ~datetime.datetime
:ivar object_identifier: Returns the Object ID of the user who added the tag.
:vartype object_identifier: str
:ivar tenant_id: Returns the Tenant ID that issued the token for the user who added the tag.
:vartype tenant_id: str
:ivar upn: Returns the User Principal Name of the user who added the tag.
:vartype upn: str
"""
_validation = {
'tag': {'readonly': True},
'timestamp': {'readonly': True},
'object_identifier': {'readonly': True},
'tenant_id': {'readonly': True},
'upn': {'readonly': True},
}
_attribute_map = {
'tag': {'key': 'tag', 'type': 'str'},
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'object_identifier': {'key': 'objectIdentifier', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'upn': {'key': 'upn', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TagProperty, self).__init__(**kwargs)
self.tag = None
self.timestamp = None
self.object_identifier = None
self.tenant_id = None
self.upn = None
class UpdateHistoryProperty(msrest.serialization.Model):
"""An update history of the ImmutabilityPolicy of a blob container.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar update: The ImmutabilityPolicy update type of a blob container, possible values include:
put, lock and extend. Possible values include: "put", "lock", "extend".
:vartype update: str or ~azure.mgmt.storage.v2018_02_01.models.ImmutabilityPolicyUpdateType
:ivar immutability_period_since_creation_in_days: The immutability period for the blobs in the
container since the policy creation, in days.
:vartype immutability_period_since_creation_in_days: int
:ivar timestamp: Returns the date and time the ImmutabilityPolicy was updated.
:vartype timestamp: ~datetime.datetime
:ivar object_identifier: Returns the Object ID of the user who updated the ImmutabilityPolicy.
:vartype object_identifier: str
:ivar tenant_id: Returns the Tenant ID that issued the token for the user who updated the
ImmutabilityPolicy.
:vartype tenant_id: str
:ivar upn: Returns the User Principal Name of the user who updated the ImmutabilityPolicy.
:vartype upn: str
"""
_validation = {
'update': {'readonly': True},
'immutability_period_since_creation_in_days': {'readonly': True},
'timestamp': {'readonly': True},
'object_identifier': {'readonly': True},
'tenant_id': {'readonly': True},
'upn': {'readonly': True},
}
_attribute_map = {
'update': {'key': 'update', 'type': 'str'},
'immutability_period_since_creation_in_days': {'key': 'immutabilityPeriodSinceCreationInDays', 'type': 'int'},
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'object_identifier': {'key': 'objectIdentifier', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'upn': {'key': 'upn', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UpdateHistoryProperty, self).__init__(**kwargs)
self.update = None
self.immutability_period_since_creation_in_days = None
self.timestamp = None
self.object_identifier = None
self.tenant_id = None
self.upn = None
class Usage(msrest.serialization.Model):
"""Describes Storage Resource Usage.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar unit: Gets the unit of measurement. Possible values include: "Count", "Bytes", "Seconds",
"Percent", "CountsPerSecond", "BytesPerSecond".
:vartype unit: str or ~azure.mgmt.storage.v2018_02_01.models.UsageUnit
:ivar current_value: Gets the current count of the allocated resources in the subscription.
:vartype current_value: int
:ivar limit: Gets the maximum count of the resources that can be allocated in the subscription.
:vartype limit: int
:ivar name: Gets the name of the type of usage.
:vartype name: ~azure.mgmt.storage.v2018_02_01.models.UsageName
"""
_validation = {
'unit': {'readonly': True},
'current_value': {'readonly': True},
'limit': {'readonly': True},
'name': {'readonly': True},
}
_attribute_map = {
'unit': {'key': 'unit', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'int'},
'limit': {'key': 'limit', 'type': 'int'},
'name': {'key': 'name', 'type': 'UsageName'},
}
def __init__(
self,
**kwargs
):
super(Usage, self).__init__(**kwargs)
self.unit = None
self.current_value = None
self.limit = None
self.name = None
class UsageListResult(msrest.serialization.Model):
"""The response from the List Usages operation.
:param value: Gets or sets the list of Storage Resource Usages.
:type value: list[~azure.mgmt.storage.v2018_02_01.models.Usage]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Usage]'},
}
def __init__(
self,
*,
value: Optional[List["Usage"]] = None,
**kwargs
):
super(UsageListResult, self).__init__(**kwargs)
self.value = value
class UsageName(msrest.serialization.Model):
"""The usage names that can be used; currently limited to StorageAccount.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Gets a string describing the resource name.
:vartype value: str
:ivar localized_value: Gets a localized string describing the resource name.
:vartype localized_value: str
"""
_validation = {
'value': {'readonly': True},
'localized_value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UsageName, self).__init__(**kwargs)
self.value = None
self.localized_value = None
class VirtualNetworkRule(msrest.serialization.Model):
"""Virtual Network rule.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param virtual_network_resource_id: Required. Resource ID of a subnet, for example:
/subscriptions/{subscriptionId}/resourceGroups/{groupName}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName}.
:type virtual_network_resource_id: str
:ivar action: The action of virtual network rule. Default value: "Allow".
:vartype action: str
:param state: Gets the state of virtual network rule. Possible values include: "provisioning",
"deprovisioning", "succeeded", "failed", "networkSourceDeleted".
:type state: str or ~azure.mgmt.storage.v2018_02_01.models.State
"""
_validation = {
'virtual_network_resource_id': {'required': True},
'action': {'constant': True},
}
_attribute_map = {
'virtual_network_resource_id': {'key': 'id', 'type': 'str'},
'action': {'key': 'action', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
}
action = "Allow"
def __init__(
self,
*,
virtual_network_resource_id: str,
state: Optional[Union[str, "State"]] = None,
**kwargs
):
super(VirtualNetworkRule, self).__init__(**kwargs)
self.virtual_network_resource_id = virtual_network_resource_id
self.state = state
| mit |
sudheesh001/RFID-DBSync | venv/lib/python2.7/site-packages/setuptools/command/build_py.py | 110 | 8495 | import os
import sys
import fnmatch
import textwrap
import distutils.command.build_py as orig
from distutils.util import convert_path
from glob import glob
try:
from setuptools.lib2to3_ex import Mixin2to3
except ImportError:
class Mixin2to3:
def run_2to3(self, files, doctests=True):
"do nothing"
class build_py(orig.build_py, Mixin2to3):
"""Enhanced 'build_py' command that includes data files with packages
The data files are specified via a 'package_data' argument to 'setup()'.
See 'setuptools.dist.Distribution' for more details.
Also, this version of the 'build_py' command allows you to specify both
'py_modules' and 'packages' in the same setup operation.
"""
def finalize_options(self):
orig.build_py.finalize_options(self)
self.package_data = self.distribution.package_data
self.exclude_package_data = self.distribution.exclude_package_data or {}
if 'data_files' in self.__dict__: del self.__dict__['data_files']
self.__updated_files = []
self.__doctests_2to3 = []
def run(self):
"""Build modules, packages, and copy data files to build directory"""
if not self.py_modules and not self.packages:
return
if self.py_modules:
self.build_modules()
if self.packages:
self.build_packages()
self.build_package_data()
self.run_2to3(self.__updated_files, False)
self.run_2to3(self.__updated_files, True)
self.run_2to3(self.__doctests_2to3, True)
# Only compile actual .py files, using our base class' idea of what our
# output files are.
self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0))
def __getattr__(self, attr):
if attr=='data_files': # lazily compute data files
self.data_files = files = self._get_data_files()
return files
return orig.build_py.__getattr__(self,attr)
def build_module(self, module, module_file, package):
outfile, copied = orig.build_py.build_module(self, module, module_file, package)
if copied:
self.__updated_files.append(outfile)
return outfile, copied
def _get_data_files(self):
"""Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
self.analyze_manifest()
data = []
for package in self.packages or ():
# Locate package source directory
src_dir = self.get_package_dir(package)
# Compute package build directory
build_dir = os.path.join(*([self.build_lib] + package.split('.')))
# Length of path to strip from found files
plen = len(src_dir)+1
# Strip directory from globbed filenames
filenames = [
file[plen:] for file in self.find_data_files(package, src_dir)
]
data.append((package, src_dir, build_dir, filenames))
return data
def find_data_files(self, package, src_dir):
"""Return filenames for package's data files in 'src_dir'"""
globs = (self.package_data.get('', [])
+ self.package_data.get(package, []))
files = self.manifest_files.get(package, [])[:]
for pattern in globs:
# Each pattern has to be converted to a platform-specific path
files.extend(glob(os.path.join(src_dir, convert_path(pattern))))
return self.exclude_data_files(package, src_dir, files)
def build_package_data(self):
"""Copy data files into build directory"""
for package, src_dir, build_dir, filenames in self.data_files:
for filename in filenames:
target = os.path.join(build_dir, filename)
self.mkpath(os.path.dirname(target))
srcfile = os.path.join(src_dir, filename)
outf, copied = self.copy_file(srcfile, target)
srcfile = os.path.abspath(srcfile)
if copied and srcfile in self.distribution.convert_2to3_doctests:
self.__doctests_2to3.append(outf)
def analyze_manifest(self):
self.manifest_files = mf = {}
if not self.distribution.include_package_data:
return
src_dirs = {}
for package in self.packages or ():
# Locate package source directory
src_dirs[assert_relative(self.get_package_dir(package))] = package
self.run_command('egg_info')
ei_cmd = self.get_finalized_command('egg_info')
for path in ei_cmd.filelist.files:
d,f = os.path.split(assert_relative(path))
prev = None
oldf = f
while d and d!=prev and d not in src_dirs:
prev = d
d, df = os.path.split(d)
f = os.path.join(df, f)
if d in src_dirs:
if path.endswith('.py') and f==oldf:
continue # it's a module, not data
mf.setdefault(src_dirs[d],[]).append(path)
def get_data_files(self): pass # kludge 2.4 for lazy computation
if sys.version<"2.4": # Python 2.4 already has this code
def get_outputs(self, include_bytecode=1):
"""Return complete list of files copied to the build directory
This includes both '.py' files and data files, as well as '.pyc'
and '.pyo' files if 'include_bytecode' is true. (This method is
needed for the 'install_lib' command to do its job properly, and to
generate a correct installation manifest.)
"""
return orig.build_py.get_outputs(self, include_bytecode) + [
os.path.join(build_dir, filename)
for package, src_dir, build_dir,filenames in self.data_files
for filename in filenames
]
def check_package(self, package, package_dir):
"""Check namespace packages' __init__ for declare_namespace"""
try:
return self.packages_checked[package]
except KeyError:
pass
init_py = orig.build_py.check_package(self, package, package_dir)
self.packages_checked[package] = init_py
if not init_py or not self.distribution.namespace_packages:
return init_py
for pkg in self.distribution.namespace_packages:
if pkg==package or pkg.startswith(package+'.'):
break
else:
return init_py
f = open(init_py,'rbU')
if 'declare_namespace'.encode() not in f.read():
from distutils.errors import DistutilsError
raise DistutilsError(
"Namespace package problem: %s is a namespace package, but its\n"
"__init__.py does not call declare_namespace()! Please fix it.\n"
'(See the setuptools manual under "Namespace Packages" for '
"details.)\n" % (package,)
)
f.close()
return init_py
def initialize_options(self):
self.packages_checked={}
orig.build_py.initialize_options(self)
def get_package_dir(self, package):
res = orig.build_py.get_package_dir(self, package)
if self.distribution.src_root is not None:
return os.path.join(self.distribution.src_root, res)
return res
def exclude_data_files(self, package, src_dir, files):
"""Filter filenames for package's data files in 'src_dir'"""
globs = (self.exclude_package_data.get('', [])
+ self.exclude_package_data.get(package, []))
bad = []
for pattern in globs:
bad.extend(
fnmatch.filter(
files, os.path.join(src_dir, convert_path(pattern))
)
)
bad = dict.fromkeys(bad)
seen = {}
return [
f for f in files if f not in bad
and f not in seen and seen.setdefault(f,1) # ditch dupes
]
def assert_relative(path):
if not os.path.isabs(path):
return path
from distutils.errors import DistutilsSetupError
msg = textwrap.dedent("""
Error: setup script specifies an absolute path:
%s
setup() arguments must *always* be /-separated paths relative to the
setup.py directory, *never* absolute paths.
""").lstrip() % path
raise DistutilsSetupError(msg)
| gpl-2.0 |
jhunufa/ArduWatchRaspSerial | virtualenv/lib/python3.4/site-packages/pip/_vendor/distlib/markers.py | 1261 | 6282 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Parser for the environment markers micro-language defined in PEP 345."""
import ast
import os
import sys
import platform
from .compat import python_implementation, string_types
from .util import in_venv
__all__ = ['interpret']
class Evaluator(object):
"""
A limited evaluator for Python expressions.
"""
operators = {
'eq': lambda x, y: x == y,
'gt': lambda x, y: x > y,
'gte': lambda x, y: x >= y,
'in': lambda x, y: x in y,
'lt': lambda x, y: x < y,
'lte': lambda x, y: x <= y,
'not': lambda x: not x,
'noteq': lambda x, y: x != y,
'notin': lambda x, y: x not in y,
}
allowed_values = {
'sys_platform': sys.platform,
'python_version': '%s.%s' % sys.version_info[:2],
# parsing sys.platform is not reliable, but there is no other
# way to get e.g. 2.7.2+, and the PEP is defined with sys.version
'python_full_version': sys.version.split(' ', 1)[0],
'os_name': os.name,
'platform_in_venv': str(in_venv()),
'platform_release': platform.release(),
'platform_version': platform.version(),
'platform_machine': platform.machine(),
'platform_python_implementation': python_implementation(),
}
def __init__(self, context=None):
"""
Initialise an instance.
:param context: If specified, names are looked up in this mapping.
"""
self.context = context or {}
self.source = None
def get_fragment(self, offset):
"""
Get the part of the source which is causing a problem.
"""
fragment_len = 10
s = '%r' % (self.source[offset:offset + fragment_len])
if offset + fragment_len < len(self.source):
s += '...'
return s
def get_handler(self, node_type):
"""
Get a handler for the specified AST node type.
"""
return getattr(self, 'do_%s' % node_type, None)
def evaluate(self, node, filename=None):
"""
Evaluate a source string or node, using ``filename`` when
displaying errors.
"""
if isinstance(node, string_types):
self.source = node
kwargs = {'mode': 'eval'}
if filename:
kwargs['filename'] = filename
try:
node = ast.parse(node, **kwargs)
except SyntaxError as e:
s = self.get_fragment(e.offset)
raise SyntaxError('syntax error %s' % s)
node_type = node.__class__.__name__.lower()
handler = self.get_handler(node_type)
if handler is None:
if self.source is None:
s = '(source not available)'
else:
s = self.get_fragment(node.col_offset)
raise SyntaxError("don't know how to evaluate %r %s" % (
node_type, s))
return handler(node)
def get_attr_key(self, node):
assert isinstance(node, ast.Attribute), 'attribute node expected'
return '%s.%s' % (node.value.id, node.attr)
def do_attribute(self, node):
if not isinstance(node.value, ast.Name):
valid = False
else:
key = self.get_attr_key(node)
valid = key in self.context or key in self.allowed_values
if not valid:
raise SyntaxError('invalid expression: %s' % key)
if key in self.context:
result = self.context[key]
else:
result = self.allowed_values[key]
return result
def do_boolop(self, node):
result = self.evaluate(node.values[0])
is_or = node.op.__class__ is ast.Or
is_and = node.op.__class__ is ast.And
assert is_or or is_and
if (is_and and result) or (is_or and not result):
for n in node.values[1:]:
result = self.evaluate(n)
if (is_or and result) or (is_and and not result):
break
return result
def do_compare(self, node):
def sanity_check(lhsnode, rhsnode):
valid = True
if isinstance(lhsnode, ast.Str) and isinstance(rhsnode, ast.Str):
valid = False
#elif (isinstance(lhsnode, ast.Attribute)
# and isinstance(rhsnode, ast.Attribute)):
# klhs = self.get_attr_key(lhsnode)
# krhs = self.get_attr_key(rhsnode)
# valid = klhs != krhs
if not valid:
s = self.get_fragment(node.col_offset)
raise SyntaxError('Invalid comparison: %s' % s)
lhsnode = node.left
lhs = self.evaluate(lhsnode)
result = True
for op, rhsnode in zip(node.ops, node.comparators):
sanity_check(lhsnode, rhsnode)
op = op.__class__.__name__.lower()
if op not in self.operators:
raise SyntaxError('unsupported operation: %r' % op)
rhs = self.evaluate(rhsnode)
result = self.operators[op](lhs, rhs)
if not result:
break
lhs = rhs
lhsnode = rhsnode
return result
def do_expression(self, node):
return self.evaluate(node.body)
def do_name(self, node):
valid = False
if node.id in self.context:
valid = True
result = self.context[node.id]
elif node.id in self.allowed_values:
valid = True
result = self.allowed_values[node.id]
if not valid:
raise SyntaxError('invalid expression: %s' % node.id)
return result
def do_str(self, node):
return node.s
def interpret(marker, execution_context=None):
"""
Interpret a marker and return a result depending on environment.
:param marker: The marker to interpret.
:type marker: str
:param execution_context: The context used for name lookup.
:type execution_context: mapping
"""
return Evaluator(execution_context).evaluate(marker.strip())
| mit |
XiaodunServerGroup/xiaodun-platform | i18n/validate.py | 8 | 7185 | """Tests that validate .po files."""
import argparse
import codecs
import logging
import os
import sys
import textwrap
import polib
from i18n.config import LOCALE_DIR
from i18n.execute import call
from i18n.converter import Converter
log = logging.getLogger(__name__)
def validate_po_files(root, report_empty=False):
"""
Validate all of the po files found in the root directory.
"""
for dirpath, __, filenames in os.walk(root):
for name in filenames:
__, ext = os.path.splitext(name)
if ext.lower() == '.po':
filename = os.path.join(dirpath, name)
# First validate the format of this file
msgfmt_check_po_file(filename)
# Now, check that the translated strings are valid, and optionally check for empty translations
check_messages(filename, report_empty)
def msgfmt_check_po_file(filename):
"""
Call GNU msgfmt -c on each .po file to validate its format.
Any errors caught by msgfmt are logged to log.
"""
# Use relative paths to make output less noisy.
rfile = os.path.relpath(filename, LOCALE_DIR)
out, err = call('msgfmt -c {}'.format(rfile), working_directory=LOCALE_DIR)
if err != '':
log.info('\n' + out)
log.warn('\n' + err)
assert not err
def tags_in_string(msg):
"""
Return the set of tags in a message string.
Tags includes HTML tags, data placeholders, etc.
Skips tags that might change due to translations: HTML entities, <abbr>,
and so on.
"""
def is_linguistic_tag(tag):
"""Is this tag one that can change with the language?"""
if tag.startswith("&"):
return True
if any(x in tag for x in ["<abbr>", "<abbr ", "</abbr>"]):
return True
return False
__, tags = Converter().detag_string(msg)
return set(t for t in tags if not is_linguistic_tag(t))
def astral(msg):
"""Does `msg` have characters outside the Basic Multilingual Plane?"""
return any(ord(c) > 0xFFFF for c in msg)
def check_messages(filename, report_empty=False):
"""
Checks messages in various ways:
Translations must have the same slots as the English. Messages can't have astral
characters in them.
If report_empty is True, will also report empty translation strings.
"""
# Don't check English files.
if "/locale/en/" in filename:
return
# problems will be a list of tuples. Each is a description, and a msgid,
# and then zero or more translations.
problems = []
pomsgs = polib.pofile(filename)
for msg in pomsgs:
# Check for characters Javascript can't support.
# https://code.djangoproject.com/ticket/21725
if astral(msg.msgstr):
problems.append(("Non-BMP char", msg.msgid, msg.msgstr))
if msg.msgid_plural:
# Plurals: two strings in, N strings out.
source = msg.msgid + " | " + msg.msgid_plural
translation = " | ".join(v for k, v in sorted(msg.msgstr_plural.items()))
empty = any(not t.strip() for t in msg.msgstr_plural.values())
else:
# Singular: just one string in and one string out.
source = msg.msgid
translation = msg.msgstr
empty = not msg.msgstr.strip()
if empty:
if report_empty:
problems.append(("Empty translation", source))
else:
id_tags = tags_in_string(source)
tx_tags = tags_in_string(translation)
# Check if tags don't match
if id_tags != tx_tags:
id_has = u", ".join(u'"{}"'.format(t) for t in id_tags - tx_tags)
tx_has = u", ".join(u'"{}"'.format(t) for t in tx_tags - id_tags)
if id_has and tx_has:
diff = u"{} vs {}".format(id_has, tx_has)
elif id_has:
diff = u"{} missing".format(id_has)
else:
diff = u"{} added".format(tx_has)
problems.append((
"Different tags in source and translation",
source,
translation,
diff
))
if problems:
problem_file = filename.replace(".po", ".prob")
id_filler = textwrap.TextWrapper(width=79, initial_indent=" msgid: ", subsequent_indent=" " * 9)
tx_filler = textwrap.TextWrapper(width=79, initial_indent=" -----> ", subsequent_indent=" " * 9)
with codecs.open(problem_file, "w", encoding="utf8") as prob_file:
for problem in problems:
desc, msgid = problem[:2]
prob_file.write(u"{}\n{}\n".format(desc, id_filler.fill(msgid)))
for translation in problem[2:]:
prob_file.write(u"{}\n".format(tx_filler.fill(translation)))
prob_file.write(u"\n")
log.error(" {0} problems in {1}, details in .prob file".format(len(problems), filename))
else:
log.info(" No problems found in {0}".format(filename))
def parse_args(argv):
"""
Parse command line arguments, returning a dict of
valid options:
{
'empty': BOOLEAN,
'verbose': BOOLEAN,
'language': str
}
where 'language' is a language code, eg "fr"
"""
parser = argparse.ArgumentParser(description="Automatically finds translation errors in all edx-platform *.po files, for all languages, unless one or more language(s) is specified to check.")
parser.add_argument(
'-l', '--language',
type=str,
nargs='*',
help="Specify one or more specific language code(s) to check (eg 'ko_KR')."
)
parser.add_argument(
'-e', '--empty',
action='store_true',
help="Includes empty translation strings in .prob files."
)
parser.add_argument(
'-v', '--verbose',
action='store_true',
help="Turns on info-level logging."
)
return vars(parser.parse_args(argv))
def main():
"""Main entry point for the tool."""
args_dict = parse_args(sys.argv[1:])
if args_dict['verbose']:
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
else:
logging.basicConfig(stream=sys.stdout, level=logging.WARNING)
langs = args_dict['language']
if langs is not None:
# lang will be a list of language codes; test each language.
for lang in langs:
root = LOCALE_DIR / lang
# Assert that a directory for this language code exists on the system
if not os.path.isdir(root):
log.error(" {0} is not a valid directory.\nSkipping language '{1}'".format(root, lang))
continue
# If we found the language code's directory, validate the files.
validate_po_files(root, args_dict['empty'])
else:
# If lang is None, we walk all of the .po files under root, and test each one.
root = LOCALE_DIR
validate_po_files(root, args_dict['empty'])
if __name__ == '__main__':
main()
| agpl-3.0 |
Integral-Technology-Solutions/ConfigNOW | Lib/jreload.py | 14 | 3632 | # java classes reload support (experimental)
# Copyright 2000 Samuele Pedroni
# ?? could have problem with import pkg.jclass.inner (this should not be used in any case)
# ?? using import * with a load-set together with reloading can be confusing
# cannot be fixed => anyway import * is not for production code
__version__ = "0.3"
import sys
from org.python.core import imp,PyJavaPackage,PyJavaClass
from _jython import is_lazy as _is_lazy
import jxxload_help
class _LoaderFactory(jxxload_help.JavaLoaderFactory):
def __init__(self,path):
vfs = jxxload_help.PathVFS()
for fname in path:
vfs.addVFS(fname)
self.vfs = vfs
def makeLoader(self):
return jxxload_help.PathVFSJavaLoader(self.vfs,imp.getSyspathJavaLoader())
class _Unload:
def __init__(self,ls):
self.ls = ls
self.ls_name = ls._name
self.loader = ls._mgr.loader
def do_unload(self,pkg):
for n in pkg.__dict__.keys():
e = pkg.__dict__[n]
if isinstance(e,PyJavaClass):
if _is_lazy(e): continue
if e.classLoader is self.loader:
del pkg.__dict__[n]
if pkg.__name__:
n = self.ls_name + '.' + pkg.__name__ + '.' +n
else:
n = self.ls_name + '.' + n
if sys.modules.has_key(n): del sys.modules[n]
elif isinstance(e,PyJavaPackage):
self.do_unload(e)
def __call__(self):
if self.loader:
if self.ls._mgr.checkLoader() is self.loader:
self.do_unload(self.ls._top)
self.ls._mgr.resetLoader()
loader = self.loader
jxxload_help.DiscardHelp.discard(loader,loader.interfaces)
self.loader = None
class LoadSet:
# ?? for the moment from import * and dir do not work for LoadSet, but work for
# contained pkgs
# need java impl as PyObject
def __init__(self,name,path):
mgr = jxxload_help.PackageManager(path,_LoaderFactory(path))
self._name = name
self._mgr = mgr
self._top = mgr.topLevelPackage
def __getattr__(self,name):
try:
return getattr(self._top,name)
except:
if name == 'unload': return _Unload(self)
raise
def __repr__(self):
return "<java load-set %s>" % self._name
def unloadf(ls):
if not isinstance(ls,LoadSet): raise TypeError,"unloadf(): arg is not a load-set"
return _Unload(ls)
def makeLoadSet(name,path):
if sys.modules.has_key(name): return sys.modules[name]
sys.modules[name] = ls = LoadSet(name,path)
return ls
_reload = reload
def _do_reload(ls_name,mgr,pkg):
pkg_name = pkg.__name__
for n in pkg.__dict__.keys():
e = pkg.__dict__[n]
if isinstance(e,PyJavaClass):
if _is_lazy(e): continue
del pkg.__dict__[n]
try :
c = mgr.findClass(pkg_name,n);
if c:
pkg.__dict__[n] = c
if pkg_name:
n = ls_name + '.' + pkg_name + '.' + n
else:
n = ls_name + '.' + n
if sys.modules.has_key(n): sys.modules[n] = c
except:
pass
elif isinstance(e,PyJavaPackage):
_do_reload(ls_name,mgr,e)
def reload(ls):
if isinstance(ls,LoadSet):
ls._mgr.resetLoader()
_do_reload(ls._name,ls._mgr,ls._top)
return ls
else:
return _reload(ls)
| mit |
ksmit799/Toontown-Source | toontown/hood/BRHoodDataAI.py | 1 | 1237 | from direct.directnotify import DirectNotifyGlobal
import HoodDataAI
from toontown.toonbase import ToontownGlobals
from toontown.safezone import DistributedTrolleyAI
from toontown.safezone import BRTreasurePlannerAI
from toontown.classicchars import DistributedPlutoAI
from toontown.toon import DistributedNPCFishermanAI
class BRHoodDataAI(HoodDataAI.HoodDataAI):
notify = DirectNotifyGlobal.directNotify.newCategory('BRHoodDataAI')
def __init__(self, air, zoneId = None):
hoodId = ToontownGlobals.TheBrrrgh
if zoneId == None:
zoneId = hoodId
HoodDataAI.HoodDataAI.__init__(self, air, zoneId, hoodId)
return
def startup(self):
HoodDataAI.HoodDataAI.startup(self)
trolley = DistributedTrolleyAI.DistributedTrolleyAI(self.air)
trolley.generateWithRequired(self.zoneId)
trolley.start()
self.addDistObj(trolley)
self.treasurePlanner = BRTreasurePlannerAI.BRTreasurePlannerAI(self.zoneId)
self.treasurePlanner.start()
self.classicChar = DistributedPlutoAI.DistributedPlutoAI(self.air)
self.classicChar.generateWithRequired(self.zoneId)
self.classicChar.start()
self.addDistObj(self.classicChar)
| mit |
matplo/rootutils | python/2.7/tdraw_cfg.py | 1 | 22331 | #!/usr/bin/env python
from time import sleep
import sys
sys.argv.append( '-b' )
import tutils
import ROOT as r
import IPython
import argparse
import os
import fnmatch
from configobj import ConfigObj
import eval_string
from tqdm import tqdm
from tabulate import tabulate
from string import atoi
from string import atof
def dump_example():
sexample = '''
[options]
libs =
[histogram]
# will draw only if varexp defined (here or in the parent tree)
input_dir =
active = True
output_file = default_output.root
input_file = job3/Tree_AnalysisResults.root
tree_name = t
varexp = muons.Phi()
selection =
option = e
nentries =
firstentry =
x = -PI,PI
nbinsx = 100
x_title = '#varphi (rad)'
y_title = counts
title = muons phi
name = muons_phi
[[another]]
selection = (pt>10)
[[another1]]
selection = +(pt<20)
[special]
# this will copy all the features of the [another]
# but change only the one specified here (note: copy IS RECURSIVE - will copy tree of sections)
copy = another
nbinsx = 20
[histogram_from_dir]
active = True
output_file = +_output
input_file = Tree_AnalysisResults.root
input_dir = .
tree_name = t
varexp = muons.Phi()
selection =
option = e
nentries =
firstentry =
x = -PI,PI
nbinsx = 2*PI*11
x_title = '#varphi (rad)'
y_title = counts
title = muons phi
name = muons_phi
'''
with open('tdraw_example.cfg', 'w') as f:
print >> f, sexample
print '[i] tdraw_example.cfg written.'
def get_value(s, op=None, vdefault=None):
if type(s) != str:
s = '{}'.format(s)
retval = 0
try:
np = eval_string.NumericStringParser()
retval = np.eval(s)
except:
if vdefault is None:
print >> sys.stderr, '[e] unable to convert to a value:[',s,']',type(s), len(s)
else:
retval = vdefault
if op != None:
if op == int:
rest = retval - op(retval)
if rest > 0.5:
rest = int(1)
else:
rest = 0
retval = op(retval) + rest
if op == bool:
retval = op(retval)
return retval
def find_files(rootdir='.', pattern='*'):
return [os.path.join(rootdir, filename)
for rootdir, dirnames, filenames in os.walk(rootdir)
for filename in filenames
if fnmatch.fnmatch(filename, pattern)]
def quick_check_section(s, sname):
once_per_section = 0
opts= ['active', 'output_file', 'input_file', 'input_dir', 'tree_name', 'varexp', 'selection', 'option', 'nentries', 'firstentry', 'x', 'nbinsx', 'x_title', 'y_title', 'title', 'name']
retval = True
for o in opts:
try:
s[o]
except:
print >> sys.stderr, '[e] option [', o, '] missing in section [', sname, ']'
if once_per_section == 0:
once_per_section = 1
print ' note: some options can be blank but present anyhow'
retval = False
return retval
def section_has_setting(what, section, recursive=True):
retval = None
try:
retval = section[what]
except:
# check the parent whether setting exists
retval = None
if retval is None and recursive is True:
if section.parent.name:
retval = section_has_setting(what, section.parent, recursive)
return retval
class TDrawEntry(object):
def __init__(self, section):
self.fields = ['name', 'title', 'active', 'input_dir',
'input_file', 'tree_name', 'varexp',
'selection', 'nentries', 'firstentry',
'x', 'nbinsx', 'x_title', 'y_title',
'option', 'output_file']
self.section = section
self.parents = self.get_parents()
self.title = self.setting('title', section, '')
self.active = get_value(str(self.setting('active', section, True)), bool, 1)
self.input_dir = self.setting('input_dir', section, '')
if '$' in self.input_dir:
self.input_dir = os.path.expandvars(self.input_dir)
self.input_file = self.setting('input_file', section, '')
self.output_file = self.setting('output_file', section, 'tdraw_out.root')
self.tree_name = self.setting('tree_name', section, 't')
self.varexp = self.setting('varexp', section, '')
self.selection = self.setting('selection', section, '')
self.option = self.setting('option', section, 'e')
self.nentries = self.setting('nentries', section, 1000000000)
self.firstentry = self.setting('firstentry', section, 0)
self.nbinsx = self.setting('nbinsx', section, 10)
self.nbinsy = self.setting('nbinsy', section, 10)
self.x_title = self.setting('x_title', section, 'default x title')
self.y_title = self.setting('y_title', section, 'default y title')
self.name = self.make_name(section) # section.name
self.x = []
self.selection = self.get_selection(section)
self.x.append(get_value(self.setting('x', section, [-1, 1])[0], float))
self.x.append(get_value(self.setting('x', section, [-1, 1])[1], float))
self.y = []
self.y.append(get_value(self.setting('y', section, [-1, 1])[0], float))
self.y.append(get_value(self.setting('y', section, [-1, 1])[1], float))
if not self.title:
# self.title = self.name
if len(self.selection) > 1:
self.title = '{} w/ {}'.format(self.varexp, self.selection)
else:
self.title = '{}'.format(self.varexp)
def copy_fields(self, t):
for f in self.fields:
self.__setattr__(f, t.__getattribute__(f))
if len(self.title) < 1:
if len(self.selection) > 1:
self.title = '{} w/ {}'.format(self.varexp, self.selection)
else:
self.title = '{}'.format(self.varexp)
def get_selection(self, section):
sel = self.setting('selection', section, '')
if len(sel) > 0:
if sel[0] == '+':
if len(sel) > 1:
if len(self.get_selection(section.parent)) > 0:
sel = self.get_selection(section.parent) + ' && ' + sel[1:]
else:
sel = sel[1:]
else:
sel = self.get_selection(section.parent)
return sel
def is_iterable(self, o):
retval = False
try:
iter(o)
retval = True
except TypeError:
retval = False
return retval
def _setting(self, what, section):
retval = None
try:
retval = section[what]
except:
# check the parent whether setting exists
retval = None
if retval is None:
if section.parent.name:
retval = self._setting(what, section.parent)
return retval
def _setting_self(self, what, section):
retval = None
try:
retval = section[what]
except:
# check the parent whether setting exists
retval = None
return retval
def setting(self, what, section, vdefault):
retval = self._setting(what, section)
if retval is None:
if vdefault is None:
retval = ''
else:
retval = vdefault
else:
if vdefault is None:
pass
else:
if self.is_iterable(vdefault):
if type(vdefault) == str:
pass
else:
if type(retval) == str:
retval = retval.split(',')
if self.is_iterable(vdefault) and len(vdefault) > 0:
if type(vdefault[0]) == int:
retval = [int(get_value(x, int, vdefault)) for x in retval]
if type(vdefault[0]) == float:
retval = [float(get_value(x, float, vdefault)) for x in retval]
if type(vdefault[0]) == bool:
retval = [bool(get_value(x, bool, vdefault)) for x in retval]
else:
if type(vdefault) == int:
retval = int(get_value(retval, int, vdefault))
if type(vdefault) == float:
retval = float(get_value(retval, float, vdefault))
if type(vdefault) == bool:
retval = bool(get_value(retval, bool, vdefault))
return retval
def make_name(self, section):
s = section
name = [section.name]
while s:
if s.parent.name:
name.append(s.parent.name)
else:
break
s = s.parent
name.reverse()
return '_'.join(name)
def get_parents(self):
s = self.section
name = [self.section.name]
while s:
if s.parent.name:
name.append(s.parent.name)
else:
break
s = s.parent
name.reverse()
return ' '.join(name)
def row_full(self):
return [self.name, self.title, self.active, self.input_dir, self.input_file, self.tree_name, self.varexp, self.selection, self.nentries, self.firstentry, str(self.x), self.nbinsx, self.x_title, self.y_title, self.option, self.output_file]
def row_head_full(self):
return ['name', 'title', 'active', 'input_dir', 'input_file', 'tree_name', 'varexp', 'selection', 'nentries', 'firstentry', 'x-range', 'nbinsx', 'x_title', 'y_title', 'option', 'output_file']
def row_more(self):
return [self.val_and_type(x) for x in [self.name, self.active, self.input_dir, self.input_file, self.tree_name, self.varexp, self.selection, self.x, self.nentries, self.option, self.output_file]]
def row_head_more(self):
return ['name', 'active', 'in_dir', 'in_file', 'tree', 'varexp', 'sel.', 'x-range', 'NE', 'opt', 'output_file']
def row(self):
return [self.val_and_type(x) for x in [self.name, self.active, self.input_dir, self.input_file, self.tree_name, self.varexp, self.selection]]
def row_head(self):
return ['name', 'active', 'dir', 'in_file', 'tree', 'varexp', 'sel.']
def row_commented(self):
return [x for x in ['#', self.name, self.active, self.input_dir, self.input_file, self.tree_name, self.varexp, self.selection]]
def row_head_commented(self):
return ['#', 'name', 'active', 'dir', 'in_file', 'tree', 'varexp', 'sel.']
def val_and_type(self, x):
if type(x) == str:
return '"{}"'.format(x)
else:
return str(x)
def __repr__(self):
return self.parents + '\n' + ' | '.join([self.val_and_type(x) for x in [self.name, self.title, self.active, self.input_dir, self.input_file, self.tree_name, self.varexp, self.selection, self.nentries, self.firstentry, self.x, self.nbinsx, self.x_title, self.y_title, self.option, self.output_file]])
class TDrawConfig(object):
def __init__(self, fname, opts=None):
self.fname = fname
self.config = ConfigObj(fname, raise_errors=True)
self.recreate = False
self.clean = True
if opts:
self.recreate = opts.recreate
self.clean = opts.clean
self.cleaned_files = []
self.entries = []
self.copies = []
self.process()
def process_section(self, section):
if section.name == 'config':
return
if len(section.sections):
for s in section.sections:
self.process_section(section[s])
if section_has_setting('varexp', section, recursive=True):
tde = TDrawEntry(section)
self.entries.append(tde)
else:
if self.is_copy(section):
self.process_copy(section)
else:
if section_has_setting('varexp', section, recursive=True):
tde = TDrawEntry(section)
self.entries.append(tde)
def is_copy(self, s):
try:
if len(s['copy']) > 0:
return True
except:
return False
def process_copy(self, s):
scopy = s['copy']
copy_names = []
if type(scopy) is str:
copy_names.append(scopy)
else:
for scp in scopy:
copy_names.append(scp)
#print 'to copy..',copy_names
for scopy in copy_names:
model = TDrawEntry(s)
current_entries = list(self.entries)
for se in current_entries:
docopy = False
#if type(se.parents) is str:
# if scopy in se.parents.split(' '):
# docopy = True
#else:
# if scopy in se.parents:
# docopy = True
if scopy == se.name[:len(scopy)]:
docopy = True
#print 'copy: ', scopy, se.name, docopy, type(se.parents), se.parents
if docopy:
# print '[i] use for copy:', se.name
newtde = TDrawEntry(se.section)
newtde.copy_fields(se)
#newtde.name = se.name
#newtde.parents = se.parents
for sf in model.fields:
setting = model._setting_self(sf, model.section)
if setting:
if sf == 'selection':
if setting[0] == '+':
if len(setting.strip()) > 1:
setting = '({}) && ({})'.format(newtde.selection, setting[1:])
newtde.__setattr__(sf, setting)
newtde.parents = '{} {}'.format(model.name, newtde.parents)
newtde.name = '{}_{}'.format(model.name, newtde.name)
newtde.title = '{} {}'.format(newtde.title, s.name)
#print 'new name:', newtde.name
#print
#self.copies.append(newtde)
self.entries.append(newtde)
def load_lib(self, libpath):
#sexplib = r.gSystem.ExpandPathName(libpath.strip())
sexplib = r.gSystem.DynamicPathName(libpath.strip())
sexplib_lib = os.path.basename(sexplib)
sexplib_dir = os.path.dirname(sexplib)
sexplib_fullpath = os.path.join(sexplib_dir, sexplib_lib)
#s = r.TString(sexplib_fullpath)
#sp = r.gSystem.FindDynamicLibrary(s)
#print sp
print '[i] loading', sexplib_fullpath
r.gSystem.AddDynamicPath(sexplib_dir)
retval = r.gSystem.Load(sexplib_lib)
print ' status', retval
def process(self):
for s in self.config.sections:
if s == 'options':
try:
slibs = self.config[s]['libs']
if type(slibs) == list:
for slib in slibs:
self.load_lib(slib)
else:
self.load_lib(slibs)
except:
pass
continue
#if self.is_copy(self.config[s]):
# #print '[i]', s, 'is a copy'
# continue
self.process_section(self.config[s])
# now add copies
# for s in self.config.sections:
# if s != 'options':
# if self.is_copy(self.config[s]):
# self.process_copy(s)
#for e in self.copies:
# self.entries.append(e)
for e in self.entries:
if len(e.input_file) < 1:
e.active = False
if len(e.output_file) < 1:
e.active = False
if len(e.varexp) < 1:
e.active = False
def __repr__(self):
#return '\n'.join(['[i] {} {}'.format(i, str(s)) for i,s in enumerate(self.entries)])
return tabulate([e.row() for e in self.entries], headers=self.entries[0].row_head())
def tab_comment(self):
print tabulate([e.row_commented() for e in self.entries], headers=self.entries[0].row_head_commented(), tablefmt='plain')
#print tabulate([e.row_commented() for e in self.entries])
def dump_class_config(self, fout):
outs = sys.stdout
sys.stdout = fout
self.tab_comment()
for e in self.entries:
print e.name,'=',e.name
print '{}_file = {}'.format(e.name, e.output_file)
print '{}_title = {}'.format(e.name, e.title)
print '{}_varexp = {}'.format(e.name, e.varexp)
print '{}_selection = {}'.format(e.name, e.selection)
print 'histograms = {}'.format(','.join([e.name for e in self.entries]))
print 'files = {}'.format(','.join([e.output_file for e in self.entries]))
print 'titles = {}'.format(','.join([e.title for e in self.entries]))
sys.stdout = outs
def run(self):
print '[i] run...'
cleaned = []
errors = []
errors.append('[e] errors:')
if len(self.entries)<1:
print '[i] no entries?'
return
pbare = tqdm(self.entries, desc=' entry')
for e in pbare:
# pbare.set_description(' {}:{}'.format(pbare.n, e.name))
# pbare.update(0)
if not e.active:
continue
foutname = e.output_file
if not foutname:
foutname = '+out'
if e.input_dir:
input_files = find_files(e.input_dir, pattern=e.input_file)
#print ' e.input_dir:',e.input_dir, 'input_file:',e.input_file
else:
input_files = [e.input_file]
pbar = tqdm(input_files, desc=' file')
for fn in pbar:
ifn = input_files.index(fn)
#pbar.set_description(' file #{}'.format(pbar.n))
sfn = fn
if len(fn) > 40:
sfn = fn[:18] + '..' + fn[len(fn)-20:]
if foutname[0] == '+':
sfoutname = fn.replace('.root', foutname[1:].replace('.root', '') + '.root')
else:
if (len(input_files) > 1):
sfoutname = foutname.replace('.root', '_{}.root'.format(ifn))
else:
sfoutname = foutname
#if sfoutname in cleaned:
# pbar.set_description(' {} : {}'.format(e.name, sfn))
#else:
# if self.clean:
# pbar.set_description(' {} : (c:{}) {}'.format(e.name, sfoutname, sfn))
# else:
# pbar.set_description(' {} : (o:{}) {}'.format(e.name, sfoutname, sfn))
fin = r.TFile(fn)
if not fin:
continue
errors.append('[e] file {} unable to open'.format(fn))
dopt = e.option
if 'norange' in dopt:
hstring = 'htmp'
dopt = e.option.replace('norange', '')
else:
# check if drawing in 2D
_varexp_tmp = e.varexp.replace('::', '__static__')
# if ':' in e.varexp:
if ':' in _varexp_tmp:
hstring = 'htmp({0},{1},{2},{3},{4},{5})'.format(e.nbinsx, e.x[0], e.x[1], e.nbinsy, e.y[0], e.y[1])
else:
hstring = 'htmp({0},{1},{2})'.format(e.nbinsx, e.x[0], e.x[1])
#print e.name, dopt, e.option
t = fin.Get(e.tree_name)
hout = None
if t:
# print e.varexp, e.selection, e.option, e.nentries, e.firstentry
nentr = t.Draw(e.varexp + '>>{}'.format(hstring), e.selection, dopt, e.nentries, e.firstentry)
# print '[i] number of entries drawn:',nentr
hout = r.gDirectory.Get('htmp')
if hout:
hout.SetDirectory(0)
hout.SetName(e.name)
hout.SetTitle(e.title)
hout.GetXaxis().SetTitle(e.x_title)
hout.GetYaxis().SetTitle(e.y_title)
else:
errors.append('[e] histogram {} does not exist; tree {} Nentries={}'.format('htmp', t.GetName(), t.GetEntries()))
else:
errors.append('[e] tree {} not found - file {}'.format(e.tree_name, fn))
continue
if hout:
if self.clean is True:
if sfoutname in cleaned:
pass
else:
# print '[i] clean', sfoutname, 'requested'
try:
os.remove(sfoutname)
except:
pass
if sfoutname not in cleaned:
cleaned.append(sfoutname)
fout = r.TFile(sfoutname, 'UPDATE')
fout.cd()
hout.Write()
fout.Purge()
fout.Close()
fin.Close()
else:
errors.append('[e] output histogram {} {} not made'.format(e.name, hstring))
print
print '[i] output files:'
for fn in cleaned:
print ' '+fn
if len(errors) > 1:
for i, er in enumerate(errors):
if i > 0:
print er.replace('[e] ', ' ')
else:
print er
print '[i] done.'
def tdraw_from_file(fname, recreate=False, clean_first=False):
cleaned = []
smode = 'UPDATE'
if recreate == True:
smode = 'RECREATE'
if fname == None:
return
print '[i] file write mode is:',smode
print '[i] config file:', fname
config = ConfigObj(fname, raise_errors = True)
for s in config.sections:
if s == 'options':
try:
slibs = config[s]['libs']
if type(slibs) == list:
for slib in slibs:
sexplib = r.gSystem.ExpandPathName(slib.strip())
print '[i] loading',sexplib
r.gSystem.Load(sexplib)
else:
sexplib = r.gSystem.ExpandPathName(slibs)
print '[i] loading',sexplib
r.gSystem.Load(sexplib)
except:
pass
continue
if quick_check_section(config[s], s) == False:
continue
if get_value(config[s]['active']) == 0:
continue
print '[i] section [',s,']'
input_fname = config[s]['input_file']
foutname = config[s]['output_file']
if not foutname:
foutname = '+out'
sdir = config[s]['input_dir']
if sdir:
input_files = find_files(sdir, pattern=input_fname)
print ' sdir is:',sdir
else:
input_files = [input_fname]
print ' tdraw:', config[s]['name'], ';'.join([config[s]['title'], config[s]['x_title'], config[s]['y_title']])
pbar = tqdm(input_files)
for fn in pbar:
# pbar.set_description(' processing file: %s' % fn)
nchars = 0
sfn = fn
if len(fn) > 40:
sfn = fn[:18] + '..' + fn[len(fn)-20:]
pbar.set_description(' {} : {}'.format(s, sfn))
if foutname[0] == '+':
sfoutname = fn.replace('.root', foutname[1:] + '.root')
else:
sfoutname = foutname
#print ' output:',sfoutname
fin = r.TFile(fn)
if not fin:
continue
hstring = 'htmp({0},{1},{2})'.format(int(get_value(config[s]['nbinsx'])), get_value(config[s]['x'][0]), get_value(config[s]['x'][1]))
t = fin.Get(config[s]['tree_name'])
if t:
#t.MakeClass('Correlations')
nentries = config[s]['nentries']
if not nentries:
nentries = '1000000000'
firstentry = config[s]['firstentry']
if not firstentry:
firstentry = '0'
t.Draw(config[s]['varexp'] + '>>{}'.format(hstring), config[s]['selection'], config[s]['option'], int(get_value(nentries)), int(get_value(firstentry)))
hout = r.gDirectory.Get('htmp')
hout.SetDirectory(0)
hout.SetName(config[s]['name'])
hout.SetTitle(config[s]['title'])
hout.GetXaxis().SetTitle(config[s]['x_title'])
hout.GetYaxis().SetTitle(config[s]['y_title'])
if clean_first == True:
if sfoutname in cleaned:
pass
else:
# print '[i] clean',sfoutname,'requested'
fout = r.TFile(sfoutname, 'recreate')
fout.Close()
cleaned.append(sfoutname)
fout = r.TFile(sfoutname, smode)
fout.cd()
hout.Write()
fout.Purge()
fout.Close()
fin.Close()
print ' done.'
if __name__=="__main__":
parser = argparse.ArgumentParser(description='execute tdraw based on the config file', prog=os.path.basename(__file__))
#parser.add_argument('-w', '--write', help='dump the contents', action='store_true')
#parser.add_argument('-f', '--fname', help='file name to operate on', type=str)
#parser.add_argument('-r', '--read', help='read a file', type=str)
parser.add_argument('-b', '--batch', help='batchmode - do not end with IPython prompt', action='store_true')
parser.add_argument('-i', '--ipython', help='end with IPython prompt', action='store_true')
parser.add_argument('-g', '--example', help='dump an example file and exit', action='store_true')
parser.add_argument('--recreate', help='write files with "recreate" instead of "update"', action='store_true')
parser.add_argument('--clean', help='remove output file - once before start...', action='store_true')
parser.add_argument('fname', type=str, nargs='*')
parser.add_argument('--old', help='old implementation', action='store_true')
parser.add_argument('--test', help='show what we get from the config...', action='store_true')
parser.add_argument('--configobj', help='show what we get from the config...', action='store_true')
args = parser.parse_args()
if args.example:
dump_example()
sys.exit(0)
tutils.setup_basic_root()
if args.fname:
tc = r.TCanvas('ctmp', 'ctmp')
for fn in args.fname:
tc.cd()
if args.old:
tdraw_from_file(fn, args.recreate, args.clean)
else:
cfg = TDrawConfig(fn, args)
if args.configobj:
cfg.dump_class_config()
else:
print cfg
if not args.test:
cfg.run()
fconfobj = fn.replace('.cfg', '_out.confobj')
with open(fconfobj, 'w') as f:
cfg.dump_class_config(f)
print '[i]',fconfobj,'written.'
if args.ipython:
IPython.embed()
| mit |
motion2015/a3 | lms/djangoapps/courseware/management/commands/tests/test_clean_history.py | 72 | 18737 | """Test the clean_history management command."""
import fnmatch
from mock import Mock
import os.path
import textwrap
import dateutil.parser
from django.test import TransactionTestCase
from django.db import connection
from courseware.management.commands.clean_history import StudentModuleHistoryCleaner
# In lots of places in this file, smhc == StudentModuleHistoryCleaner
def parse_date(sdate):
"""Parse a string date into a datetime."""
parsed = dateutil.parser.parse(sdate)
parsed = parsed.replace(tzinfo=dateutil.tz.gettz('UTC'))
return parsed
class SmhcSayStubbed(StudentModuleHistoryCleaner):
"""StudentModuleHistoryCleaner, but with .say() stubbed for testing."""
def __init__(self, **kwargs):
super(SmhcSayStubbed, self).__init__(**kwargs)
self.said_lines = []
def say(self, msg):
self.said_lines.append(msg)
class SmhcDbMocked(SmhcSayStubbed):
"""StudentModuleHistoryCleaner, but with db access mocked."""
def __init__(self, **kwargs):
super(SmhcDbMocked, self).__init__(**kwargs)
self.get_history_for_student_modules = Mock()
self.delete_history = Mock()
def set_rows(self, rows):
"""Set the mocked history rows."""
rows = [(row_id, parse_date(created)) for row_id, created in rows]
self.get_history_for_student_modules.return_value = rows
class HistoryCleanerTest(TransactionTestCase):
"""Base class for all history cleaner tests."""
maxDiff = None
def setUp(self):
super(HistoryCleanerTest, self).setUp()
self.addCleanup(self.clean_up_state_file)
def write_state_file(self, state):
"""Write the string `state` into the state file read by StudentModuleHistoryCleaner."""
with open(StudentModuleHistoryCleaner.STATE_FILE, "w") as state_file:
state_file.write(state)
def read_state_file(self):
"""Return the string contents of the state file read by StudentModuleHistoryCleaner."""
with open(StudentModuleHistoryCleaner.STATE_FILE) as state_file:
return state_file.read()
def clean_up_state_file(self):
"""Remove any state file lying around."""
if os.path.exists(StudentModuleHistoryCleaner.STATE_FILE):
os.remove(StudentModuleHistoryCleaner.STATE_FILE)
def assert_said(self, smhc, *msgs):
"""Fail if the `smhc` didn't say `msgs`.
The messages passed here are `fnmatch`-style patterns: "*" means anything.
"""
for said, pattern in zip(smhc.said_lines, msgs):
if not fnmatch.fnmatch(said, pattern):
fmt = textwrap.dedent("""\
Messages:
{msgs}
don't match patterns:
{patterns}
Failed at {said!r} and {pattern!r}
""")
msg = fmt.format(
msgs="\n".join(smhc.said_lines),
patterns="\n".join(msgs),
said=said,
pattern=pattern
)
self.fail(msg)
def parse_rows(self, rows):
"""Parse convenient rows into real data."""
rows = [
(row_id, parse_date(created), student_module_id)
for row_id, created, student_module_id in rows
]
return rows
def write_history(self, rows):
"""Write history rows to the db.
Each row should be (id, created, student_module_id).
"""
cursor = connection.cursor()
cursor.executemany(
"""
INSERT INTO courseware_studentmodulehistory
(id, created, student_module_id)
VALUES (%s, %s, %s)
""",
self.parse_rows(rows),
)
def read_history(self):
"""Read the history from the db, and return it as a list of tuples.
Returns [(id, created, student_module_id), ...]
"""
cursor = connection.cursor()
cursor.execute("""
SELECT id, created, student_module_id FROM courseware_studentmodulehistory
""")
return cursor.fetchall()
def assert_history(self, rows):
"""Assert that the history rows are the same as `rows`."""
self.assertEqual(self.parse_rows(rows), self.read_history())
class HistoryCleanerNoDbTest(HistoryCleanerTest):
"""Tests of StudentModuleHistoryCleaner with db access mocked."""
def test_empty(self):
smhc = SmhcDbMocked()
smhc.set_rows([])
smhc.clean_one_student_module(1)
self.assert_said(smhc, "No history for student_module_id 1")
# Nothing to delete, so delete_history wasn't called.
self.assertFalse(smhc.delete_history.called)
def test_one_row(self):
smhc = SmhcDbMocked()
smhc.set_rows([
(1, "2013-07-13 12:11:10.987"),
])
smhc.clean_one_student_module(1)
self.assert_said(smhc, "Deleting 0 rows of 1 for student_module_id 1")
# Nothing to delete, so delete_history wasn't called.
self.assertFalse(smhc.delete_history.called)
def test_one_row_dry_run(self):
smhc = SmhcDbMocked(dry_run=True)
smhc.set_rows([
(1, "2013-07-13 12:11:10.987"),
])
smhc.clean_one_student_module(1)
self.assert_said(smhc, "Would have deleted 0 rows of 1 for student_module_id 1")
# Nothing to delete, so delete_history wasn't called.
self.assertFalse(smhc.delete_history.called)
def test_two_rows_close(self):
smhc = SmhcDbMocked()
smhc.set_rows([
(7, "2013-07-13 12:34:56.789"),
(9, "2013-07-13 12:34:56.987"),
])
smhc.clean_one_student_module(1)
self.assert_said(smhc, "Deleting 1 rows of 2 for student_module_id 1")
smhc.delete_history.assert_called_once_with([7])
def test_two_rows_far(self):
smhc = SmhcDbMocked()
smhc.set_rows([
(7, "2013-07-13 12:34:56.789"),
(9, "2013-07-13 12:34:57.890"),
])
smhc.clean_one_student_module(1)
self.assert_said(smhc, "Deleting 0 rows of 2 for student_module_id 1")
self.assertFalse(smhc.delete_history.called)
def test_a_bunch_of_rows(self):
smhc = SmhcDbMocked()
smhc.set_rows([
(4, "2013-07-13 16:30:00.000"), # keep
(8, "2013-07-13 16:30:01.100"),
(15, "2013-07-13 16:30:01.200"),
(16, "2013-07-13 16:30:01.300"), # keep
(23, "2013-07-13 16:30:02.400"),
(42, "2013-07-13 16:30:02.500"),
(98, "2013-07-13 16:30:02.600"), # keep
(99, "2013-07-13 16:30:59.000"), # keep
])
smhc.clean_one_student_module(17)
self.assert_said(smhc, "Deleting 4 rows of 8 for student_module_id 17")
smhc.delete_history.assert_called_once_with([42, 23, 15, 8])
class HistoryCleanerWitDbTest(HistoryCleanerTest):
"""Tests of StudentModuleHistoryCleaner with a real db."""
def test_no_history(self):
# Cleaning a student_module_id with no history leaves the db unchanged.
smhc = SmhcSayStubbed()
self.write_history([
(4, "2013-07-13 16:30:00.000", 11), # keep
(8, "2013-07-13 16:30:01.100", 11),
(15, "2013-07-13 16:30:01.200", 11),
(16, "2013-07-13 16:30:01.300", 11), # keep
(23, "2013-07-13 16:30:02.400", 11),
(42, "2013-07-13 16:30:02.500", 11),
(98, "2013-07-13 16:30:02.600", 11), # keep
(99, "2013-07-13 16:30:59.000", 11), # keep
])
smhc.clean_one_student_module(22)
self.assert_said(smhc, "No history for student_module_id 22")
self.assert_history([
(4, "2013-07-13 16:30:00.000", 11), # keep
(8, "2013-07-13 16:30:01.100", 11),
(15, "2013-07-13 16:30:01.200", 11),
(16, "2013-07-13 16:30:01.300", 11), # keep
(23, "2013-07-13 16:30:02.400", 11),
(42, "2013-07-13 16:30:02.500", 11),
(98, "2013-07-13 16:30:02.600", 11), # keep
(99, "2013-07-13 16:30:59.000", 11), # keep
])
def test_a_bunch_of_rows(self):
# Cleaning a student_module_id with 8 records, 4 to delete.
smhc = SmhcSayStubbed()
self.write_history([
(4, "2013-07-13 16:30:00.000", 11), # keep
(8, "2013-07-13 16:30:01.100", 11),
(15, "2013-07-13 16:30:01.200", 11),
(16, "2013-07-13 16:30:01.300", 11), # keep
(17, "2013-07-13 16:30:01.310", 22), # other student_module_id!
(23, "2013-07-13 16:30:02.400", 11),
(42, "2013-07-13 16:30:02.500", 11),
(98, "2013-07-13 16:30:02.600", 11), # keep
(99, "2013-07-13 16:30:59.000", 11), # keep
])
smhc.clean_one_student_module(11)
self.assert_said(smhc, "Deleting 4 rows of 8 for student_module_id 11")
self.assert_history([
(4, "2013-07-13 16:30:00.000", 11), # keep
(16, "2013-07-13 16:30:01.300", 11), # keep
(17, "2013-07-13 16:30:01.310", 22), # other student_module_id!
(98, "2013-07-13 16:30:02.600", 11), # keep
(99, "2013-07-13 16:30:59.000", 11), # keep
])
def test_a_bunch_of_rows_dry_run(self):
# Cleaning a student_module_id with 8 records, 4 to delete,
# but don't really do it.
smhc = SmhcSayStubbed(dry_run=True)
self.write_history([
(4, "2013-07-13 16:30:00.000", 11), # keep
(8, "2013-07-13 16:30:01.100", 11),
(15, "2013-07-13 16:30:01.200", 11),
(16, "2013-07-13 16:30:01.300", 11), # keep
(23, "2013-07-13 16:30:02.400", 11),
(42, "2013-07-13 16:30:02.500", 11),
(98, "2013-07-13 16:30:02.600", 11), # keep
(99, "2013-07-13 16:30:59.000", 11), # keep
])
smhc.clean_one_student_module(11)
self.assert_said(smhc, "Would have deleted 4 rows of 8 for student_module_id 11")
self.assert_history([
(4, "2013-07-13 16:30:00.000", 11), # keep
(8, "2013-07-13 16:30:01.100", 11),
(15, "2013-07-13 16:30:01.200", 11),
(16, "2013-07-13 16:30:01.300", 11), # keep
(23, "2013-07-13 16:30:02.400", 11),
(42, "2013-07-13 16:30:02.500", 11),
(98, "2013-07-13 16:30:02.600", 11), # keep
(99, "2013-07-13 16:30:59.000", 11), # keep
])
def test_a_bunch_of_rows_in_jumbled_order(self):
# Cleaning a student_module_id with 8 records, 4 to delete.
smhc = SmhcSayStubbed()
self.write_history([
(23, "2013-07-13 16:30:01.100", 11),
(24, "2013-07-13 16:30:01.300", 11), # keep
(27, "2013-07-13 16:30:02.500", 11),
(30, "2013-07-13 16:30:01.350", 22), # other student_module_id!
(32, "2013-07-13 16:30:59.000", 11), # keep
(50, "2013-07-13 16:30:02.400", 11),
(51, "2013-07-13 16:30:02.600", 11), # keep
(56, "2013-07-13 16:30:00.000", 11), # keep
(57, "2013-07-13 16:30:01.200", 11),
])
smhc.clean_one_student_module(11)
self.assert_said(smhc, "Deleting 4 rows of 8 for student_module_id 11")
self.assert_history([
(24, "2013-07-13 16:30:01.300", 11), # keep
(30, "2013-07-13 16:30:01.350", 22), # other student_module_id!
(32, "2013-07-13 16:30:59.000", 11), # keep
(51, "2013-07-13 16:30:02.600", 11), # keep
(56, "2013-07-13 16:30:00.000", 11), # keep
])
def test_a_bunch_of_rows_with_timestamp_ties(self):
# Sometimes rows are written with identical timestamps. The one with
# the greater id is the winner in that case.
smhc = SmhcSayStubbed()
self.write_history([
(21, "2013-07-13 16:30:01.100", 11),
(24, "2013-07-13 16:30:01.100", 11), # keep
(22, "2013-07-13 16:30:01.100", 11),
(23, "2013-07-13 16:30:01.100", 11),
(27, "2013-07-13 16:30:02.500", 11),
(30, "2013-07-13 16:30:01.350", 22), # other student_module_id!
(32, "2013-07-13 16:30:59.000", 11), # keep
(50, "2013-07-13 16:30:02.500", 11), # keep
])
smhc.clean_one_student_module(11)
self.assert_said(smhc, "Deleting 4 rows of 7 for student_module_id 11")
self.assert_history([
(24, "2013-07-13 16:30:01.100", 11), # keep
(30, "2013-07-13 16:30:01.350", 22), # other student_module_id!
(32, "2013-07-13 16:30:59.000", 11), # keep
(50, "2013-07-13 16:30:02.500", 11), # keep
])
def test_get_last_student_module(self):
# Can we find the last student_module_id properly?
smhc = SmhcSayStubbed()
self.write_history([
(23, "2013-07-13 16:30:01.100", 11),
(24, "2013-07-13 16:30:01.300", 44),
(27, "2013-07-13 16:30:02.500", 11),
(30, "2013-07-13 16:30:01.350", 22),
(32, "2013-07-13 16:30:59.000", 11),
(51, "2013-07-13 16:30:02.600", 33),
(56, "2013-07-13 16:30:00.000", 11),
])
last = smhc.get_last_student_module_id()
self.assertEqual(last, 44)
self.assert_said(smhc, "Last student_module_id is 44")
def test_load_state_with_no_stored_state(self):
smhc = SmhcSayStubbed()
self.assertFalse(os.path.exists(smhc.STATE_FILE))
smhc.load_state()
self.assertEqual(smhc.next_student_module_id, 0)
self.assert_said(smhc, "No stored state")
def test_load_stored_state(self):
self.write_state_file('{"next_student_module_id": 23}')
smhc = SmhcSayStubbed()
smhc.load_state()
self.assertEqual(smhc.next_student_module_id, 23)
self.assert_said(smhc, 'Loaded stored state: {"next_student_module_id": 23}')
def test_save_state(self):
smhc = SmhcSayStubbed()
smhc.next_student_module_id = 47
smhc.save_state()
state = self.read_state_file()
self.assertEqual(state, '{"next_student_module_id": 47}')
class SmhcForTestingMain(SmhcSayStubbed):
"""A StudentModuleHistoryCleaner with a few function stubbed for testing main."""
def __init__(self, *args, **kwargs):
self.exception_smids = kwargs.pop('exception_smids', ())
super(SmhcForTestingMain, self).__init__(*args, **kwargs)
def clean_one_student_module(self, smid):
self.say("(not really cleaning {})".format(smid))
if smid in self.exception_smids:
raise Exception("Something went wrong!")
def commit(self):
self.say("(not really committing)")
class HistoryCleanerMainTest(HistoryCleanerTest):
"""Tests of StudentModuleHistoryCleaner.main(), using SmhcForTestingMain."""
def test_only_one_record(self):
smhc = SmhcForTestingMain()
self.write_history([
(1, "2013-07-15 11:47:00.000", 1),
])
smhc.main()
self.assert_said(
smhc,
'Last student_module_id is 1',
'No stored state',
'(not really cleaning 0)',
'(not really cleaning 1)',
'(not really committing)',
'Saved state: {"next_student_module_id": 2}',
)
def test_already_processed_some(self):
smhc = SmhcForTestingMain()
self.write_state_file('{"next_student_module_id": 25}')
self.write_history([
(1, "2013-07-15 15:04:00.000", 23),
(2, "2013-07-15 15:04:11.000", 23),
(3, "2013-07-15 15:04:01.000", 24),
(4, "2013-07-15 15:04:00.000", 25),
(5, "2013-07-15 15:04:00.000", 26),
])
smhc.main()
self.assert_said(
smhc,
'Last student_module_id is 26',
'Loaded stored state: {"next_student_module_id": 25}',
'(not really cleaning 25)',
'(not really cleaning 26)',
'(not really committing)',
'Saved state: {"next_student_module_id": 27}'
)
def test_working_in_batches(self):
smhc = SmhcForTestingMain()
self.write_state_file('{"next_student_module_id": 25}')
self.write_history([
(3, "2013-07-15 15:04:01.000", 24),
(4, "2013-07-15 15:04:00.000", 25),
(5, "2013-07-15 15:04:00.000", 26),
(6, "2013-07-15 15:04:00.000", 27),
(7, "2013-07-15 15:04:00.000", 28),
(8, "2013-07-15 15:04:00.000", 29),
])
smhc.main(batch_size=3)
self.assert_said(
smhc,
'Last student_module_id is 29',
'Loaded stored state: {"next_student_module_id": 25}',
'(not really cleaning 25)',
'(not really cleaning 26)',
'(not really cleaning 27)',
'(not really committing)',
'Saved state: {"next_student_module_id": 28}',
'(not really cleaning 28)',
'(not really cleaning 29)',
'(not really committing)',
'Saved state: {"next_student_module_id": 30}',
)
def test_something_failing_while_cleaning(self):
smhc = SmhcForTestingMain(exception_smids=[26])
self.write_state_file('{"next_student_module_id": 25}')
self.write_history([
(3, "2013-07-15 15:04:01.000", 24),
(4, "2013-07-15 15:04:00.000", 25),
(5, "2013-07-15 15:04:00.000", 26),
(6, "2013-07-15 15:04:00.000", 27),
(7, "2013-07-15 15:04:00.000", 28),
(8, "2013-07-15 15:04:00.000", 29),
])
smhc.main(batch_size=3)
self.assert_said(
smhc,
'Last student_module_id is 29',
'Loaded stored state: {"next_student_module_id": 25}',
'(not really cleaning 25)',
'(not really cleaning 26)',
"Couldn't clean student_module_id 26:\nTraceback*Exception: Something went wrong!\n",
'(not really cleaning 27)',
'(not really committing)',
'Saved state: {"next_student_module_id": 28}',
'(not really cleaning 28)',
'(not really cleaning 29)',
'(not really committing)',
'Saved state: {"next_student_module_id": 30}',
)
| agpl-3.0 |
shsingh/ansible | lib/ansible/modules/cloud/google/gcp_runtimeconfig_config_info.py | 13 | 5400 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_runtimeconfig_config_info
description:
- Gather info for GCP Config
short_description: Gather info for GCP Config
version_added: '2.10'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- for authentication, you can set service_account_file using the C(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: get info on a config
gcp_runtimeconfig_config_info:
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
resources:
description: List of resources
returned: always
type: complex
contains:
description:
description:
- The description to associate with the runtime config.
returned: success
type: str
name:
description:
- The name of the runtime config.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict())
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/cloudruntimeconfig']
return_value = {'resources': fetch_list(module, collection(module))}
module.exit_json(**return_value)
def collection(module):
return "https://runtimeconfig.googleapis.com/v1beta1/projects/{project}/configs".format(**module.params)
def fetch_list(module, link):
auth = GcpSession(module, 'runtimeconfig')
return auth.list(link, return_if_object, array_name='configs')
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| gpl-3.0 |
Codefans-fan/odoo | openerp/addons/base/tests/test_osv.py | 446 | 4722 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 OpenERP S.A. http://www.openerp.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import unittest
from openerp.osv.query import Query
class QueryTestCase(unittest.TestCase):
def test_basic_query(self):
query = Query()
query.tables.extend(['"product_product"', '"product_template"'])
query.where_clause.append("product_product.template_id = product_template.id")
query.add_join(("product_template", "product_category", "categ_id", "id", "categ_id"), implicit=False, outer=False) # add normal join
query.add_join(("product_product", "res_user", "user_id", "id", "user_id"), implicit=False, outer=True) # outer join
self.assertEquals(query.get_sql()[0].strip(),
""""product_product" LEFT JOIN "res_user" as "product_product__user_id" ON ("product_product"."user_id" = "product_product__user_id"."id"),"product_template" JOIN "product_category" as "product_template__categ_id" ON ("product_template"."categ_id" = "product_template__categ_id"."id") """.strip())
self.assertEquals(query.get_sql()[1].strip(), """product_product.template_id = product_template.id""".strip())
def test_query_chained_explicit_joins(self):
query = Query()
query.tables.extend(['"product_product"', '"product_template"'])
query.where_clause.append("product_product.template_id = product_template.id")
query.add_join(("product_template", "product_category", "categ_id", "id", "categ_id"), implicit=False, outer=False) # add normal join
query.add_join(("product_template__categ_id", "res_user", "user_id", "id", "user_id"), implicit=False, outer=True) # CHAINED outer join
self.assertEquals(query.get_sql()[0].strip(),
""""product_product","product_template" JOIN "product_category" as "product_template__categ_id" ON ("product_template"."categ_id" = "product_template__categ_id"."id") LEFT JOIN "res_user" as "product_template__categ_id__user_id" ON ("product_template__categ_id"."user_id" = "product_template__categ_id__user_id"."id")""".strip())
self.assertEquals(query.get_sql()[1].strip(), """product_product.template_id = product_template.id""".strip())
def test_mixed_query_chained_explicit_implicit_joins(self):
query = Query()
query.tables.extend(['"product_product"', '"product_template"'])
query.where_clause.append("product_product.template_id = product_template.id")
query.add_join(("product_template", "product_category", "categ_id", "id", "categ_id"), implicit=False, outer=False) # add normal join
query.add_join(("product_template__categ_id", "res_user", "user_id", "id", "user_id"), implicit=False, outer=True) # CHAINED outer join
query.tables.append('"account.account"')
query.where_clause.append("product_category.expense_account_id = account_account.id") # additional implicit join
self.assertEquals(query.get_sql()[0].strip(),
""""product_product","product_template" JOIN "product_category" as "product_template__categ_id" ON ("product_template"."categ_id" = "product_template__categ_id"."id") LEFT JOIN "res_user" as "product_template__categ_id__user_id" ON ("product_template__categ_id"."user_id" = "product_template__categ_id__user_id"."id"),"account.account" """.strip())
self.assertEquals(query.get_sql()[1].strip(), """product_product.template_id = product_template.id AND product_category.expense_account_id = account_account.id""".strip())
def test_raise_missing_lhs(self):
query = Query()
query.tables.append('"product_product"')
self.assertRaises(AssertionError, query.add_join, ("product_template", "product_category", "categ_id", "id", "categ_id"), implicit=False, outer=False)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mbrubeck/servo | tests/wpt/web-platform-tests/tools/wpt/markdown.py | 17 | 1592 | def format_comment_title(product):
"""Produce a Markdown-formatted string based on a given "product"--a string
containing a browser identifier optionally followed by a colon and a
release channel. (For example: "firefox" or "chrome:dev".) The generated
title string is used both to create new comments and to locate (and
subsequently update) previously-submitted comments."""
parts = product.split(":")
title = parts[0].title()
if len(parts) > 1:
title += " (%s)" % parts[1]
return "# %s #" % title
def markdown_adjust(s):
"""Escape problematic markdown sequences."""
s = s.replace('\t', u'\\t')
s = s.replace('\n', u'\\n')
s = s.replace('\r', u'\\r')
s = s.replace('`', u'')
s = s.replace('|', u'\\|')
return s
def table(headings, data, log):
"""Create and log data to specified logger in tabular format."""
cols = range(len(headings))
assert all(len(item) == len(cols) for item in data)
max_widths = reduce(lambda prev, cur: [(len(cur[i]) + 2)
if (len(cur[i]) + 2) > prev[i]
else prev[i]
for i in cols],
data,
[len(item) + 2 for item in headings])
log("|%s|" % "|".join(item.center(max_widths[i]) for i, item in enumerate(headings)))
log("|%s|" % "|".join("-" * max_widths[i] for i in cols))
for row in data:
log("|%s|" % "|".join(" %s" % row[i].ljust(max_widths[i] - 1) for i in cols))
log("")
| mpl-2.0 |
SLACNationalAcceleratorLaboratory/net-config | bin/config_daemon.py | 3 | 4534 | #!/usr/local/bin/python
import logging
import getopt
import sys
import os
import time
import sys
import resource
import slac.netconfig
from slac.netconfig import NetConfig, NetConfigStorage, ConfigWorker
from slac.netconfig.StorageQueue import StorageQueue
import Queue
THREADS=1
WORKING_DIR = '/tmp'
UMASK = 0
MAX_FD = 1023
def usage( message ):
print "Daemon to collect and store device configurations"
print "Usage: "
print " net-config config_daemon DIR [--threads=INT]"
print
print " options:"
print " --threads=INT number of threads to get configs"
print " --quiet only report summary report"
print
if not message == None:
print " Error: " + message
class ConfigQueue( threading.Thread ):
""" simple thread that watches a directory for new tickets and inserts into queue for ConfigWorkers """
store = None
queue = None
def __init__( self, config_parser, queue ):
# init parents class
threading.Thread.__init__(self)
# don't bother connecting
self.store = slac.netconfig.NetConfigStorage( config_parser )
# queue for workers
self.queue = queue
def run( self ):
""" just loops and watches the directory and inserts the result into a queue for workers """
while True:
tickets = self.store.listQueue()
for t in tickets:
# retrieve info from ticket and dequeue
ticket = self.store.dequeue( t )
# put it into the worker job queue for processing
self.queue.put( ticket )
time.sleep(1)
if __name__ == "__main__":
config_file = slac.netconfig.getConfigFile()
args = {
'-h' : 'help',
'--help' : 'help',
'--threads' : 'threads=',
'-v' : 'verbose',
'--verbose' : 'verbose'
}
# parse arguments
lists, options = slac.netconfig.parseArgs( args )
if (options.has_key( 'help' ) and options['help'] == 1) or len(lists) < 1:
usage( None )
sys.exit()
# logging
if options.has_key( 'verbose' ) and options['verbose'] == 1:
logging.basicConfig( level=logging.DEBUG, format="%(relativeCreated)7d %(levelname)-7s %(thread)-12d %(module)-28s %(lineno)-5d %(message)s" )
else:
logging.basicConfig( level=logging.INFO, format="%(thread)-12d %(message)s" )
# we want to be able to fork off
if not options.has_key( 'threads'):
options['threads'] = THREADS
options['threads'] = int( options['threads'] )
# quiet
if options.has_key('quiet'):
options['quiet'] = True
else:
options['quiet'] = False
# preload the config so that threads dont' accidentially lock it
config_parser = slac.netconfig.getConfig( config_file )
###
# start doing stuff!
###
# queues for synchronisation of results
in_queue = Queue.Queue()
changed_queue = Queue.Queue()
error_queue = Queue.Queue()
# add hosts to queue
section = 'net-config'
config_ticket_queue = None
if config_parser.has_section( section ):
config_ticket_queue = config_parser.get( section, 'config_ticket_queue' )
if config_ticket_queue == None:
raise Exception, 'config ticket directory is not defined'
# create the instance to monitor the config ticket queue
p = ConfigQueue( config_ticket_queue, in_queue )
p.start
# spawn pool of threads to get configs
for i in range( options['threads'] ):
t = ConfigWorker( config_parser, options, in_queue, changed_queue, error_queue )
#t.setDaemon(True)
t.start()
time.sleep(1)
###
# daemonise
###
# close fd's
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAX_FD
# Iterate through and close all file descriptors.
for fd in range(0, maxfd):
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open to begin with (ignored)
pass
# This call to open is guaranteed to return the lowest file descriptor,
# which will be 0 (stdin), since it was closed above.
os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)
# Duplicate standard input to standard output and standard error.
os.dup2(0, 1) # standard output (1)
os.dup2(0, 2) # standard error (2)
# done
sys.exit(0)
| gpl-2.0 |
fangxingli/hue | desktop/core/ext-py/Paste-2.0.1/tests/test_auth/test_auth_cookie.py | 47 | 1527 | # (c) 2005 Clark C. Evans
# This module is part of the Python Paste Project and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from six.moves import xrange
import six
from paste.auth import cookie
from paste.wsgilib import raw_interactive, dump_environ
from paste.response import header_value
from paste.httpexceptions import *
def build(application,setenv, *args, **kwargs):
def setter(environ, start_response):
save = environ['paste.auth.cookie'].append
for (k,v) in setenv.items():
save(k)
environ[k] = v
return application(environ, start_response)
return cookie.middleware(setter,*args,**kwargs)
def test_noop():
app = build(dump_environ,{})
(status,headers,content,errors) = \
raw_interactive(app)
assert not header_value(headers,'Set-Cookie')
def test_basic(key='key', val='bingles'):
app = build(dump_environ,{key:val})
(status,headers,content,errors) = \
raw_interactive(app)
value = header_value(headers,'Set-Cookie')
assert "Path=/;" in value
assert "expires=" not in value
cookie = value.split(";")[0]
(status,headers,content,errors) = \
raw_interactive(app,{'HTTP_COOKIE': cookie})
expected = ("%s: %s" % (key,val.replace("\n","\n ")))
if six.PY3:
expected = expected.encode('utf8')
assert expected in content
def test_roundtrip():
roundtrip = str('').join(map(chr, xrange(256)))
test_basic(roundtrip,roundtrip)
| apache-2.0 |
kantai/passe-framework-prototype | django/contrib/admin/__init__.py | 246 | 1608 | # ACTION_CHECKBOX_NAME is unused, but should stay since its import from here
# has been referenced in documentation.
from django.contrib.admin.helpers import ACTION_CHECKBOX_NAME
from django.contrib.admin.options import ModelAdmin, HORIZONTAL, VERTICAL
from django.contrib.admin.options import StackedInline, TabularInline
from django.contrib.admin.sites import AdminSite, site
def autodiscover():
"""
Auto-discover INSTALLED_APPS admin.py modules and fail silently when
not present. This forces an import on them to register any admin bits they
may want.
"""
import copy
from django.conf import settings
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
for app in settings.INSTALLED_APPS:
mod = import_module(app)
# Attempt to import the app's admin module.
try:
before_import_registry = copy.copy(site._registry)
import_module('%s.admin' % app)
except:
# Reset the model registry to the state before the last import as
# this import will have to reoccur on the next request and this
# could raise NotRegistered and AlreadyRegistered exceptions
# (see #8245).
site._registry = before_import_registry
# Decide whether to bubble up this error. If the app just
# doesn't have an admin module, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(mod, 'admin'):
raise
| bsd-3-clause |
aerialhedgehog/VyPy | trunk/VyPy/tools/redirect.py | 1 | 8439 | ## \file redirect.py
# \brief python package for file redirection
# \author Trent Lukaczyk, Aerospace Design Laboratory (Stanford University)
# \version 0.0.0
#
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import os, sys, shutil, copy, glob
# -------------------------------------------------------------------
# Output Redirection
# -------------------------------------------------------------------
# original source: http://stackoverflow.com/questions/6796492/python-temporarily-redirect-stdout-stderr
class output(object):
''' redirect.output(stdout,stderr)
Temporarily redirects sys.stdout and sys.stderr when used in
a 'with' contextmanager
Example:
with SU2.io.redirect_output('stdout.txt','stderr.txt'):
sys.stdout.write("standard out")
sys.stderr.write("stanrard error")
# code
#: with output redirection
Inputs:
stdout - None, a filename, or a file stream
stderr - None, a filename, or a file stream
None will not redirect outptu
'''
def __init__(self, stdout=None, stderr=None):
_newout = False
_newerr = False
if isinstance(stdout,str):
stdout = open(stdout,'a')
_newout = True
if isinstance(stderr,str):
stderr = open(stderr,'a')
_newerr = True
self._stdout = stdout or sys.stdout
self._stderr = stderr or sys.stderr
self._newout = _newout
self._newerr = _newerr
def __enter__(self):
self.old_stdout, self.old_stderr = sys.stdout, sys.stderr
self.old_stdout.flush(); self.old_stderr.flush()
sys.stdout, sys.stderr = self._stdout, self._stderr
def __exit__(self, exc_type, exc_value, traceback):
self._stdout.flush(); self._stderr.flush()
sys.stdout = self.old_stdout
sys.stderr = self.old_stderr
if self._newout:
self._stdout.close()
if self._newerr:
self._stderr.close()
#: class output()
# -------------------------------------------------------------------
# Folder Redirection
# -------------------------------------------------------------------
class folder(object):
''' redirect.folder(folder,pull,link,force) as push
Temporarily redirects to a working folder, pulling
and pushing needed files
Example:
folder = 'temp'
pull = ['file1.txt','file2.txt']
link = ['file3.big']
force = True
# original path
import os
print os.getcwd()
# enter folder
with SU2.io.redirect_folder(folder,pull,link,force) as push:
print os.getcwd()
# code
push.append('file4.txt')
#: with folder redirection
# returned to original path
print os.getcwd()
Inputs:
folder - working folder, relative or absolute
pull - list of files to pull (copy to working folder)
link - list of files to link (symbolic link in working folder)
force - True/False overwrite existing files in working folder
Targets:
push - list of files to push (copy to originating path)
Notes:
push must be appended or extended, not overwritten
links in Windows not supported, will simply copy
'''
def __init__(self, folder, pull=None, link=None, force=True ):
''' folder redirection initialization
see help( folder ) for more info
'''
if pull is None: pull = []
if link is None: link = []
if not isinstance(pull,list) : pull = [pull]
if not isinstance(link,list) : link = [link]
origin = os.getcwd()
origin = os.path.abspath(origin).rstrip('/')+'/'
folder = os.path.abspath(folder).rstrip('/')+'/'
self.origin = origin
self.folder = folder
self.pull = copy.deepcopy(pull)
self.push = []
self.link = copy.deepcopy(link)
self.force = force
def __enter__(self):
origin = self.origin # absolute path
folder = self.folder # absolute path
pull = self.pull
push = self.push
link = self.link
force = self.force
# check for no folder change
if folder == origin:
return []
# relative folder path
#relative = os.path.relpath(folder,origin)
# check, make folder
if not os.path.exists(folder):
os.makedirs(folder)
# copy pull files
for name in pull:
old_name = os.path.abspath(name)
new_name = os.path.split(name)[-1]
new_name = os.path.join(folder,new_name)
if old_name == new_name: continue
if os.path.exists( new_name ):
if force: os.remove( new_name )
else: continue
shutil.copy(old_name,new_name)
# make links
for name in link:
old_name = os.path.abspath(name)
new_name = os.path.split(name)[-1]
new_name = os.path.join(folder,new_name)
if old_name == new_name: continue
if os.path.exists( new_name ):
if force: os.remove( new_name )
else: continue
make_link(old_name,new_name)
# change directory
os.chdir(folder)
# return empty list to append with files to push to super folder
return push
def __exit__(self, exc_type, exc_value, traceback):
origin = self.origin
folder = self.folder
push = self.push
force = self.force
# check for no folder change
if folder == origin:
return
# move assets
for name in push:
old_name = os.path.abspath(name)
name = os.path.split(name)[-1]
new_name = os.path.join(origin,name)
# links
if os.path.islink(old_name):
source = os.path.realpath(old_name)
if source == new_name: continue
if os.path.exists( new_name ):
if force: os.remove( new_name )
else: continue
make_link(source,new_name)
# moves
else:
if old_name == new_name: continue
if os.path.exists( new_name ):
if force: os.remove( new_name )
else: continue
shutil.move(old_name,new_name)
# change directory
os.chdir(origin)
#: class folder()
def make_link(src,dst):
""" make_link(src,dst)
makes a relative link
Inputs:
src - source file
dst - destination to place link
Windows links currently unsupported, will copy file instead
"""
assert os.path.exists(src) , 'source file does not exist \n%s' % src
if os.name == 'nt':
# can't make a link in windows, need to look for other options
if os.path.exists(dst): os.remove(dst)
shutil.copy(src,dst)
else:
# find real file, incase source itself is a link
src = os.path.realpath(src)
# normalize paths
src = os.path.normpath(src)
dst = os.path.normpath(dst)
# check for self referencing
if src == dst: return
# find relative folder path
srcfolder = os.path.join( os.path.split(src)[0] ) + '/'
dstfolder = os.path.join( os.path.split(dst)[0] ) + '/'
srcfolder = os.path.relpath(srcfolder,dstfolder)
src = os.path.join( srcfolder, os.path.split(src)[1] )
# make unix link
if os.path.exists(dst): os.remove(dst)
os.symlink(src,dst)
| bsd-3-clause |
wrouesnel/ansible | lib/ansible/modules/cloud/openstack/os_subnet.py | 5 | 12775 | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2013, Benno Joy <benno@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_subnet
short_description: Add/Remove subnet to an OpenStack network
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Add or Remove a subnet to an OpenStack network
options:
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
required: false
default: present
network_name:
description:
- Name of the network to which the subnet should be attached
- Required when I(state) is 'present'
required: false
name:
description:
- The name of the subnet that should be created. Although Neutron
allows for non-unique subnet names, this module enforces subnet
name uniqueness.
required: true
cidr:
description:
- The CIDR representation of the subnet that should be assigned to
the subnet. Required when I(state) is 'present' and a subnetpool
is not specified.
required: false
default: None
ip_version:
description:
- The IP version of the subnet 4 or 6
required: false
default: 4
enable_dhcp:
description:
- Whether DHCP should be enabled for this subnet.
required: false
default: true
gateway_ip:
description:
- The ip that would be assigned to the gateway for this subnet
required: false
default: None
no_gateway_ip:
description:
- The gateway IP would not be assigned for this subnet
required: false
default: false
version_added: "2.2"
dns_nameservers:
description:
- List of DNS nameservers for this subnet.
required: false
default: None
allocation_pool_start:
description:
- From the subnet pool the starting address from which the IP should
be allocated.
required: false
default: None
allocation_pool_end:
description:
- From the subnet pool the last IP that should be assigned to the
virtual machines.
required: false
default: None
host_routes:
description:
- A list of host route dictionaries for the subnet.
required: false
default: None
ipv6_ra_mode:
description:
- IPv6 router advertisement mode
choices: ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac']
required: false
default: None
ipv6_address_mode:
description:
- IPv6 address mode
choices: ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac']
required: false
default: None
use_default_subnetpool:
description:
- Use the default subnetpool for I(ip_version) to obtain a CIDR.
required: false
default: false
project:
description:
- Project name or ID containing the subnet (name admin-only)
required: false
default: None
version_added: "2.1"
availability_zone:
description:
- Ignored. Present for backwards compatibility
required: false
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Create a new (or update an existing) subnet on the specified network
- os_subnet:
state: present
network_name: network1
name: net1subnet
cidr: 192.168.0.0/24
dns_nameservers:
- 8.8.8.7
- 8.8.8.8
host_routes:
- destination: 0.0.0.0/0
nexthop: 12.34.56.78
- destination: 192.168.0.0/24
nexthop: 192.168.0.1
# Delete a subnet
- os_subnet:
state: absent
name: net1subnet
# Create an ipv6 stateless subnet
- os_subnet:
state: present
name: intv6
network_name: internal
ip_version: 6
cidr: 2db8:1::/64
dns_nameservers:
- 2001:4860:4860::8888
- 2001:4860:4860::8844
ipv6_ra_mode: dhcpv6-stateless
ipv6_address_mode: dhcpv6-stateless
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _can_update(subnet, module, cloud):
"""Check for differences in non-updatable values"""
network_name = module.params['network_name']
ip_version = int(module.params['ip_version'])
ipv6_ra_mode = module.params['ipv6_ra_mode']
ipv6_a_mode = module.params['ipv6_address_mode']
if network_name:
network = cloud.get_network(network_name)
if network:
netid = network['id']
else:
module.fail_json(msg='No network found for %s' % network_name)
if netid != subnet['network_id']:
module.fail_json(msg='Cannot update network_name in existing \
subnet')
if ip_version and subnet['ip_version'] != ip_version:
module.fail_json(msg='Cannot update ip_version in existing subnet')
if ipv6_ra_mode and subnet.get('ipv6_ra_mode', None) != ipv6_ra_mode:
module.fail_json(msg='Cannot update ipv6_ra_mode in existing subnet')
if ipv6_a_mode and subnet.get('ipv6_address_mode', None) != ipv6_a_mode:
module.fail_json(msg='Cannot update ipv6_address_mode in existing \
subnet')
def _needs_update(subnet, module, cloud):
"""Check for differences in the updatable values."""
# First check if we are trying to update something we're not allowed to
_can_update(subnet, module, cloud)
# now check for the things we are allowed to update
enable_dhcp = module.params['enable_dhcp']
subnet_name = module.params['name']
pool_start = module.params['allocation_pool_start']
pool_end = module.params['allocation_pool_end']
gateway_ip = module.params['gateway_ip']
no_gateway_ip = module.params['no_gateway_ip']
dns = module.params['dns_nameservers']
host_routes = module.params['host_routes']
curr_pool = subnet['allocation_pools'][0]
if subnet['enable_dhcp'] != enable_dhcp:
return True
if subnet_name and subnet['name'] != subnet_name:
return True
if pool_start and curr_pool['start'] != pool_start:
return True
if pool_end and curr_pool['end'] != pool_end:
return True
if gateway_ip and subnet['gateway_ip'] != gateway_ip:
return True
if dns and sorted(subnet['dns_nameservers']) != sorted(dns):
return True
if host_routes:
curr_hr = sorted(subnet['host_routes'], key=lambda t: t.keys())
new_hr = sorted(host_routes, key=lambda t: t.keys())
if sorted(curr_hr) != sorted(new_hr):
return True
if no_gateway_ip and subnet['gateway_ip']:
return True
return False
def _system_state_change(module, subnet, cloud):
state = module.params['state']
if state == 'present':
if not subnet:
return True
return _needs_update(subnet, module, cloud)
if state == 'absent' and subnet:
return True
return False
def main():
ipv6_mode_choices = ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac']
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
network_name=dict(default=None),
cidr=dict(default=None),
ip_version=dict(default='4', choices=['4', '6']),
enable_dhcp=dict(default='true', type='bool'),
gateway_ip=dict(default=None),
no_gateway_ip=dict(default=False, type='bool'),
dns_nameservers=dict(default=None, type='list'),
allocation_pool_start=dict(default=None),
allocation_pool_end=dict(default=None),
host_routes=dict(default=None, type='list'),
ipv6_ra_mode=dict(default=None, choice=ipv6_mode_choices),
ipv6_address_mode=dict(default=None, choice=ipv6_mode_choices),
use_default_subnetpool=dict(default=False, type='bool'),
state=dict(default='present', choices=['absent', 'present']),
project=dict(default=None)
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
state = module.params['state']
network_name = module.params['network_name']
cidr = module.params['cidr']
ip_version = module.params['ip_version']
enable_dhcp = module.params['enable_dhcp']
subnet_name = module.params['name']
gateway_ip = module.params['gateway_ip']
no_gateway_ip = module.params['no_gateway_ip']
dns = module.params['dns_nameservers']
pool_start = module.params['allocation_pool_start']
pool_end = module.params['allocation_pool_end']
host_routes = module.params['host_routes']
ipv6_ra_mode = module.params['ipv6_ra_mode']
ipv6_a_mode = module.params['ipv6_address_mode']
use_default_subnetpool = module.params['use_default_subnetpool']
project = module.params.pop('project')
min_version = None
if use_default_subnetpool:
min_version = '1.16.0'
# Check for required parameters when state == 'present'
if state == 'present':
if not module.params['network_name']:
module.fail_json(msg='network_name required with present state')
if not module.params['cidr'] and not use_default_subnetpool:
module.fail_json(msg='cidr or use_default_subnetpool required '
'with present state')
if pool_start and pool_end:
pool = [dict(start=pool_start, end=pool_end)]
elif pool_start or pool_end:
module.fail_json(msg='allocation pool requires start and end values')
else:
pool = None
if no_gateway_ip and gateway_ip:
module.fail_json(msg='no_gateway_ip is not allowed with gateway_ip')
shade, cloud = openstack_cloud_from_module(module, min_version=min_version)
try:
if project is not None:
proj = cloud.get_project(project)
if proj is None:
module.fail_json(msg='Project %s could not be found' % project)
project_id = proj['id']
filters = {'tenant_id': project_id}
else:
project_id = None
filters = None
subnet = cloud.get_subnet(subnet_name, filters=filters)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, subnet,
cloud))
if state == 'present':
if not subnet:
kwargs = dict(
ip_version=ip_version,
enable_dhcp=enable_dhcp,
subnet_name=subnet_name,
gateway_ip=gateway_ip,
disable_gateway_ip=no_gateway_ip,
dns_nameservers=dns,
allocation_pools=pool,
host_routes=host_routes,
ipv6_ra_mode=ipv6_ra_mode,
ipv6_address_mode=ipv6_a_mode,
tenant_id=project_id)
if use_default_subnetpool:
kwargs['use_default_subnetpool'] = use_default_subnetpool
subnet = cloud.create_subnet(network_name, cidr, **kwargs)
changed = True
else:
if _needs_update(subnet, module, cloud):
cloud.update_subnet(subnet['id'],
subnet_name=subnet_name,
enable_dhcp=enable_dhcp,
gateway_ip=gateway_ip,
disable_gateway_ip=no_gateway_ip,
dns_nameservers=dns,
allocation_pools=pool,
host_routes=host_routes)
changed = True
else:
changed = False
module.exit_json(changed=changed,
subnet=subnet,
id=subnet['id'])
elif state == 'absent':
if not subnet:
changed = False
else:
changed = True
cloud.delete_subnet(subnet_name)
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
anushbmx/kitsune | kitsune/notifications/models.py | 1 | 2432 | from datetime import datetime
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from actstream.models import Action
from kitsune.sumo.models import ModelBase
class Notification(ModelBase):
owner = models.ForeignKey(User, db_index=True)
action = models.ForeignKey(Action)
read_at = models.DateTimeField(blank=True, null=True)
@property
def is_read(self):
return self.read_at is not None
@is_read.setter
def is_read(self, newval):
oldval = self.read_at is not None
if not oldval and newval:
self.read_at = datetime.now()
elif oldval and not newval:
self.read_at = None
class PushNotificationRegistration(ModelBase):
creator = models.ForeignKey(User, db_index=True)
created = models.DateTimeField(default=datetime.now)
push_url = models.CharField(max_length=256)
@receiver(post_save, sender=Action, dispatch_uid='action_create_notifications')
def add_notification_for_action(sender, instance, created, **kwargs):
"""When an Action is created, notify every user following something in the Action."""
if not created:
return
from kitsune.notifications import tasks # avoid circular import
tasks.add_notification_for_action.delay(instance.id)
@receiver(post_save, sender=Notification, dispatch_uid='send_notification')
def send_notification(sender, instance, created, **kwargs):
if not created:
return
from kitsune.notifications import tasks # avoid circular import
tasks.send_notification.delay(instance.id)
class RealtimeRegistration(ModelBase):
creator = models.ForeignKey(User)
created = models.DateTimeField(default=datetime.now)
endpoint = models.CharField(max_length=256)
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
target = GenericForeignKey('content_type', 'object_id')
@receiver(post_save, sender=Action, dispatch_uid='action_send_realtimes')
def send_realtimes_for_action(sender, instance, created, **kwargs):
if not created:
return
from kitsune.notifications import tasks # avoid circular import
tasks.send_realtimes_for_action.delay(instance.id)
| bsd-3-clause |
libyal/winevt-kb | tests/resources.py | 1 | 3379 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the Windows Event Log resources."""
import unittest
from winevtrc import resources
from tests import test_lib
class EventLogProviderTest(test_lib.BaseTestCase):
"""Tests for the Windows Event Log provider."""
def testSetCategoryMessageFilenames(self):
"""Tests the SetCategoryMessageFilenames function."""
event_log_provider = resources.EventLogProvider(
'log_type', 'log_source', 'provider_guid')
expected_category_message_files = ['test1', 'test2', 'test3']
event_log_provider.SetCategoryMessageFilenames(
expected_category_message_files)
self.assertEqual(
event_log_provider.category_message_files,
expected_category_message_files)
event_log_provider.SetCategoryMessageFilenames('test1;test2;test3')
self.assertEqual(
event_log_provider.category_message_files,
expected_category_message_files)
def testSetEventMessageFilenames(self):
"""Tests the SetEventMessageFilenames function."""
event_log_provider = resources.EventLogProvider(
'log_type', 'log_source', 'provider_guid')
expected_event_message_files = ['test1', 'test2', 'test3']
event_log_provider.SetEventMessageFilenames(
expected_event_message_files)
self.assertEqual(
event_log_provider.event_message_files,
expected_event_message_files)
event_log_provider.SetEventMessageFilenames('test1;test2;test3')
self.assertEqual(
event_log_provider.event_message_files,
expected_event_message_files)
def testSetParameterMessageFilenames(self):
"""Tests the SetParameterMessageFilenames function."""
event_log_provider = resources.EventLogProvider(
'log_type', 'log_source', 'provider_guid')
expected_parameter_message_files = ['test1', 'test2', 'test3']
event_log_provider.SetParameterMessageFilenames(
expected_parameter_message_files)
self.assertEqual(
event_log_provider.parameter_message_files,
expected_parameter_message_files)
event_log_provider.SetParameterMessageFilenames('test1;test2;test3')
self.assertEqual(
event_log_provider.parameter_message_files,
expected_parameter_message_files)
class MessageFileTest(test_lib.BaseTestCase):
"""Tests for the Windows Event Log message file."""
def testMessageTable(self):
"""Tests the AppendMessageTable and GetMessageTable functions."""
message_file = resources.MessageFile('test')
message_file.AppendMessageTable(5, '1.2.3.4')
message_table = message_file.GetMessageTable(5)
self.assertIsNotNone(message_table)
message_table = message_file.GetMessageTable(6)
self.assertIsNone(message_table)
message_tables = list(message_file.GetMessageTables())
self.assertEqual(len(message_tables), 1)
def testStringTable(self):
"""Tests the AppendStringTable and GetStringTable functions."""
message_file = resources.MessageFile('test')
message_file.AppendStringTable(5, '1.2.3.4')
string_table = message_file.GetStringTable(5)
self.assertIsNotNone(string_table)
string_table = message_file.GetStringTable(6)
self.assertIsNone(string_table)
string_tables = list(message_file.GetStringTables())
self.assertEqual(len(string_tables), 1)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
brainelectronics/towerdefense | pyglet/window/xlib/__init__.py | 8 | 51830 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from ctypes import *
import unicodedata
import warnings
import pyglet
from pyglet.window import WindowException, NoSuchDisplayException, \
MouseCursorException, MouseCursor, \
DefaultMouseCursor, ImageMouseCursor, BaseWindow, _PlatformEventHandler, \
_ViewEventHandler
from pyglet.window import key
from pyglet.window import mouse
from pyglet.event import EventDispatcher
from pyglet.canvas.xlib import XlibCanvas
from pyglet.libs.x11 import xlib
from pyglet.libs.x11 import cursorfont
from pyglet.compat import asbytes
try:
from pyglet.libs.x11 import xsync
_have_xsync = True
except:
_have_xsync = False
class mwmhints_t(Structure):
_fields_ = [
('flags', c_uint32),
('functions', c_uint32),
('decorations', c_uint32),
('input_mode', c_int32),
('status', c_uint32)
]
# XXX: wraptypes can't parse the header this function is in yet
XkbSetDetectableAutoRepeat = xlib._lib.XkbSetDetectableAutoRepeat
XkbSetDetectableAutoRepeat.restype = c_int
XkbSetDetectableAutoRepeat.argtypes = [POINTER(xlib.Display), c_int, POINTER(c_int)]
_can_detect_autorepeat = None
XA_CARDINAL = 6 # Xatom.h:14
# Do we have the November 2000 UTF8 extension?
_have_utf8 = hasattr(xlib._lib, 'Xutf8TextListToTextProperty')
# symbol,ctrl -> motion mapping
_motion_map = {
(key.UP, False): key.MOTION_UP,
(key.RIGHT, False): key.MOTION_RIGHT,
(key.DOWN, False): key.MOTION_DOWN,
(key.LEFT, False): key.MOTION_LEFT,
(key.RIGHT, True): key.MOTION_NEXT_WORD,
(key.LEFT, True): key.MOTION_PREVIOUS_WORD,
(key.HOME, False): key.MOTION_BEGINNING_OF_LINE,
(key.END, False): key.MOTION_END_OF_LINE,
(key.PAGEUP, False): key.MOTION_PREVIOUS_PAGE,
(key.PAGEDOWN, False): key.MOTION_NEXT_PAGE,
(key.HOME, True): key.MOTION_BEGINNING_OF_FILE,
(key.END, True): key.MOTION_END_OF_FILE,
(key.BACKSPACE, False): key.MOTION_BACKSPACE,
(key.DELETE, False): key.MOTION_DELETE,
}
class XlibException(WindowException):
'''An X11-specific exception. This exception is probably a programming
error in pyglet.'''
pass
class XlibMouseCursor(MouseCursor):
drawable = False
def __init__(self, cursor):
self.cursor = cursor
# Platform event data is single item, so use platform event handler directly.
XlibEventHandler = _PlatformEventHandler
ViewEventHandler = _ViewEventHandler
class XlibWindow(BaseWindow):
_x_display = None # X display connection
_x_screen_id = None # X screen index
_x_ic = None # X input context
_window = None # Xlib window handle
_minimum_size = None
_maximum_size = None
_override_redirect = False
_x = 0
_y = 0 # Last known window position
_width = 0
_height = 0 # Last known window size
_mouse_exclusive_client = None # x,y of "real" mouse during exclusive
_mouse_buttons = [False] * 6 # State of each xlib button
_keyboard_exclusive = False
_active = True
_applied_mouse_exclusive = False
_applied_keyboard_exclusive = False
_mapped = False
_lost_context = False
_lost_context_state = False
_enable_xsync = False
_current_sync_value = None
_current_sync_valid = False
_default_event_mask = (0x1ffffff
& ~xlib.PointerMotionHintMask
& ~xlib.ResizeRedirectMask
& ~xlib.SubstructureNotifyMask)
def __init__(self, *args, **kwargs):
# Bind event handlers
self._event_handlers = {}
self._view_event_handlers = {}
for name in self._platform_event_names:
if not hasattr(self, name):
continue
func = getattr(self, name)
for message in func._platform_event_data:
if hasattr(func, '_view'):
self._view_event_handlers[message] = func
else:
self._event_handlers[message] = func
super(XlibWindow, self).__init__(*args, **kwargs)
global _can_detect_autorepeat
if _can_detect_autorepeat == None:
supported_rtrn = c_int()
_can_detect_autorepeat = XkbSetDetectableAutoRepeat(self.display._display, c_int(1), byref(supported_rtrn))
if _can_detect_autorepeat:
self.pressed_keys = set()
def _recreate(self, changes):
# If flipping to/from fullscreen, need to recreate the window. (This
# is the case with both override_redirect method and
# _NET_WM_STATE_FULLSCREEN).
#
# A possible improvement could be to just hide the top window,
# destroy the GLX window, and reshow it again when leaving fullscreen.
# This would prevent the floating window from being moved by the
# WM.
if ('fullscreen' in changes or 'resizable' in changes):
# clear out the GLX context
self.context.detach()
xlib.XDestroyWindow(self._x_display, self._window)
del self.display._window_map[self._window]
del self.display._window_map[self._view]
self._window = None
self._mapped = False
# TODO: detect state loss only by examining context share.
if 'context' in changes:
self._lost_context = True
self._lost_context_state = True
self._create()
def _create(self):
# Unmap existing window if necessary while we fiddle with it.
if self._window and self._mapped:
self._unmap()
self._x_display = self.display._display
self._x_screen_id = self.display.x_screen
# Create X window if not already existing.
if not self._window:
root = xlib.XRootWindow(self._x_display, self._x_screen_id)
visual_info = self.config.get_visual_info()
visual = visual_info.visual
visual_id = xlib.XVisualIDFromVisual(visual)
default_visual = xlib.XDefaultVisual(
self._x_display, self._x_screen_id)
default_visual_id = xlib.XVisualIDFromVisual(default_visual)
window_attributes = xlib.XSetWindowAttributes()
if visual_id != default_visual_id:
window_attributes.colormap = xlib.XCreateColormap(
self._x_display, root, visual, xlib.AllocNone)
else:
window_attributes.colormap = xlib.XDefaultColormap(
self._x_display, self._x_screen_id)
window_attributes.bit_gravity = xlib.StaticGravity
# Issue 287: Compiz on Intel/Mesa doesn't draw window decoration
# unless CWBackPixel is given in mask. Should have
# no effect on other systems, so it's set
# unconditionally.
mask = xlib.CWColormap | xlib.CWBitGravity | xlib.CWBackPixel
if self._fullscreen:
width, height = self.screen.width, self.screen.height
self._view_x = (width - self._width) // 2
self._view_y = (height - self._height) // 2
else:
width, height = self._width, self._height
self._view_x = self._view_y = 0
self._window = xlib.XCreateWindow(self._x_display, root,
0, 0, width, height, 0, visual_info.depth,
xlib.InputOutput, visual, mask,
byref(window_attributes))
self._view = xlib.XCreateWindow(self._x_display,
self._window, self._view_x, self._view_y,
self._width, self._height, 0, visual_info.depth,
xlib.InputOutput, visual, mask,
byref(window_attributes));
xlib.XMapWindow(self._x_display, self._view)
xlib.XSelectInput(
self._x_display, self._view, self._default_event_mask)
self.display._window_map[self._window] = \
self.dispatch_platform_event
self.display._window_map[self._view] = \
self.dispatch_platform_event_view
self.canvas = XlibCanvas(self.display, self._view)
self.context.attach(self.canvas)
self.context.set_vsync(self._vsync) # XXX ?
# Setting null background pixmap disables drawing the background,
# preventing flicker while resizing (in theory).
#
# Issue 287: Compiz on Intel/Mesa doesn't draw window decoration if
# this is called. As it doesn't seem to have any
# effect anyway, it's just commented out.
#xlib.XSetWindowBackgroundPixmap(self._x_display, self._window, 0)
self._enable_xsync = (pyglet.options['xsync'] and
self.display._enable_xsync and
self.config.double_buffer)
# Set supported protocols
protocols = []
protocols.append(xlib.XInternAtom(self._x_display,
asbytes('WM_DELETE_WINDOW'), False))
if self._enable_xsync:
protocols.append(xlib.XInternAtom(self._x_display,
asbytes('_NET_WM_SYNC_REQUEST'),
False))
protocols = (c_ulong * len(protocols))(*protocols)
xlib.XSetWMProtocols(self._x_display, self._window,
protocols, len(protocols))
# Create window resize sync counter
if self._enable_xsync:
value = xsync.XSyncValue()
self._sync_counter = xlib.XID(
xsync.XSyncCreateCounter(self._x_display, value))
atom = xlib.XInternAtom(self._x_display,
asbytes('_NET_WM_SYNC_REQUEST_COUNTER'), False)
ptr = pointer(self._sync_counter)
xlib.XChangeProperty(self._x_display, self._window,
atom, XA_CARDINAL, 32,
xlib.PropModeReplace,
cast(ptr, POINTER(c_ubyte)), 1)
# Set window attributes
attributes = xlib.XSetWindowAttributes()
attributes_mask = 0
self._override_redirect = False
if self._fullscreen:
if pyglet.options['xlib_fullscreen_override_redirect']:
# Try not to use this any more, it causes problems; disabled
# by default in favour of _NET_WM_STATE_FULLSCREEN.
attributes.override_redirect = self._fullscreen
attributes_mask |= xlib.CWOverrideRedirect
self._override_redirect = True
else:
self._set_wm_state('_NET_WM_STATE_FULLSCREEN')
if self._fullscreen:
xlib.XMoveResizeWindow(self._x_display, self._window,
self.screen.x, self.screen.y,
self.screen.width, self.screen.height)
else:
xlib.XResizeWindow(self._x_display, self._window,
self._width, self._height)
xlib.XChangeWindowAttributes(self._x_display, self._window,
attributes_mask, byref(attributes))
# Set style
styles = {
self.WINDOW_STYLE_DEFAULT: '_NET_WM_WINDOW_TYPE_NORMAL',
self.WINDOW_STYLE_DIALOG: '_NET_WM_WINDOW_TYPE_DIALOG',
self.WINDOW_STYLE_TOOL: '_NET_WM_WINDOW_TYPE_UTILITY',
}
if self._style in styles:
self._set_atoms_property('_NET_WM_WINDOW_TYPE',
(styles[self._style],))
elif self._style == self.WINDOW_STYLE_BORDERLESS:
MWM_HINTS_DECORATIONS = 1 << 1
PROP_MWM_HINTS_ELEMENTS = 5
mwmhints = mwmhints_t()
mwmhints.flags = MWM_HINTS_DECORATIONS
mwmhints.decorations = 0
name = xlib.XInternAtom(self._x_display, asbytes('_MOTIF_WM_HINTS'), False)
xlib.XChangeProperty(self._x_display, self._window,
name, name, 32, xlib.PropModeReplace,
cast(pointer(mwmhints), POINTER(c_ubyte)),
PROP_MWM_HINTS_ELEMENTS)
# Set resizeable
if not self._resizable and not self._fullscreen:
self.set_minimum_size(self._width, self._height)
self.set_maximum_size(self._width, self._height)
# Set caption
self.set_caption(self._caption)
# this is supported by some compositors (ie gnome-shell), and more to come
# see: http://standards.freedesktop.org/wm-spec/wm-spec-latest.html#idp6357888
_NET_WM_BYPASS_COMPOSITOR_HINT_ON = c_ulong(int(self._fullscreen))
name = xlib.XInternAtom(self._x_display,
asbytes('_NET_WM_BYPASS_COMPOSITOR'), False)
ptr = pointer(_NET_WM_BYPASS_COMPOSITOR_HINT_ON)
xlib.XChangeProperty(self._x_display, self._window,
name, XA_CARDINAL, 32,
xlib.PropModeReplace,
cast(ptr, POINTER(c_ubyte)), 1)
# Create input context. A good but very outdated reference for this
# is http://www.sbin.org/doc/Xlib/chapt_11.html
if _have_utf8 and not self._x_ic:
if not self.display._x_im:
xlib.XSetLocaleModifiers(asbytes('@im=none'))
self.display._x_im = \
xlib.XOpenIM(self._x_display, None, None, None)
xlib.XFlush(self._x_display);
# Need to set argtypes on this function because it's vararg,
# and ctypes guesses wrong.
xlib.XCreateIC.argtypes = [xlib.XIM,
c_char_p, c_int,
c_char_p, xlib.Window,
c_char_p, xlib.Window,
c_void_p]
self._x_ic = xlib.XCreateIC(self.display._x_im,
asbytes('inputStyle'), xlib.XIMPreeditNothing|xlib.XIMStatusNothing,
asbytes('clientWindow'), self._window,
asbytes('focusWindow'), self._window,
None)
filter_events = c_ulong()
xlib.XGetICValues(self._x_ic,
'filterEvents', byref(filter_events),
None)
self._default_event_mask |= filter_events.value
xlib.XSetICFocus(self._x_ic)
self.switch_to()
if self._visible:
self.set_visible(True)
self.set_mouse_platform_visible()
self._applied_mouse_exclusive = None
self._update_exclusivity()
def _map(self):
if self._mapped:
return
# Map the window, wait for map event before continuing.
xlib.XSelectInput(
self._x_display, self._window, xlib.StructureNotifyMask)
xlib.XMapRaised(self._x_display, self._window)
e = xlib.XEvent()
while True:
xlib.XNextEvent(self._x_display, e)
if e.type == xlib.ConfigureNotify:
self._width = e.xconfigure.width
self._height = e.xconfigure.height
elif e.type == xlib.MapNotify:
break
xlib.XSelectInput(
self._x_display, self._window, self._default_event_mask)
self._mapped = True
if self._override_redirect:
# Possibly an override_redirect issue.
self.activate()
self._update_view_size()
self.dispatch_event('on_resize', self._width, self._height)
self.dispatch_event('on_show')
self.dispatch_event('on_expose')
def _unmap(self):
if not self._mapped:
return
xlib.XSelectInput(
self._x_display, self._window, xlib.StructureNotifyMask)
xlib.XUnmapWindow(self._x_display, self._window)
e = xlib.XEvent()
while True:
xlib.XNextEvent(self._x_display, e)
if e.type == xlib.UnmapNotify:
break
xlib.XSelectInput(
self._x_display, self._window, self._default_event_mask)
self._mapped = False
def _get_root(self):
attributes = xlib.XWindowAttributes()
xlib.XGetWindowAttributes(self._x_display, self._window,
byref(attributes))
return attributes.root
def _is_reparented(self):
root = c_ulong()
parent = c_ulong()
children = pointer(c_ulong())
n_children = c_uint()
xlib.XQueryTree(self._x_display, self._window,
byref(root), byref(parent), byref(children),
byref(n_children))
return root.value != parent.value
def close(self):
if not self._window:
return
self.context.destroy()
self._unmap()
if self._window:
xlib.XDestroyWindow(self._x_display, self._window)
del self.display._window_map[self._window]
self._window = None
if _have_utf8:
xlib.XDestroyIC(self._x_ic)
self._x_ic = None
super(XlibWindow, self).close()
def switch_to(self):
if self.context:
self.context.set_current()
def flip(self):
self.draw_mouse_cursor()
# TODO canvas.flip?
if self.context:
self.context.flip()
self._sync_resize()
def set_vsync(self, vsync):
if pyglet.options['vsync'] is not None:
vsync = pyglet.options['vsync']
self._vsync = vsync
self.context.set_vsync(vsync)
def set_caption(self, caption):
if caption is None:
caption = ''
self._caption = caption
self._set_text_property('WM_NAME', caption, allow_utf8=False)
self._set_text_property('WM_ICON_NAME', caption, allow_utf8=False)
self._set_text_property('_NET_WM_NAME', caption)
self._set_text_property('_NET_WM_ICON_NAME', caption)
def get_caption(self):
return self._caption
def set_size(self, width, height):
if self._fullscreen:
raise WindowException('Cannot set size of fullscreen window.')
self._width = width
self._height = height
if not self._resizable:
self.set_minimum_size(width, height)
self.set_maximum_size(width, height)
xlib.XResizeWindow(self._x_display, self._window, width, height)
self._update_view_size()
self.dispatch_event('on_resize', width, height)
def _update_view_size(self):
xlib.XResizeWindow(self._x_display, self._view,
self._width, self._height)
def get_size(self):
# XGetGeometry and XWindowAttributes seem to always return the
# original size of the window, which is wrong after the user
# has resized it.
# XXX this is probably fixed now, with fix of resize.
return self._width, self._height
def set_location(self, x, y):
if self._is_reparented():
# Assume the window manager has reparented our top-level window
# only once, in which case attributes.x/y give the offset from
# the frame to the content window. Better solution would be
# to use _NET_FRAME_EXTENTS, where supported.
attributes = xlib.XWindowAttributes()
xlib.XGetWindowAttributes(self._x_display, self._window,
byref(attributes))
# XXX at least under KDE's WM these attrs are both 0
x -= attributes.x
y -= attributes.y
xlib.XMoveWindow(self._x_display, self._window, x, y)
def get_location(self):
child = xlib.Window()
x = c_int()
y = c_int()
xlib.XTranslateCoordinates(self._x_display,
self._window,
self._get_root(),
0, 0,
byref(x),
byref(y),
byref(child))
return x.value, y.value
def activate(self):
xlib.XSetInputFocus(self._x_display, self._window,
xlib.RevertToParent, xlib.CurrentTime)
def set_visible(self, visible=True):
if visible:
self._map()
else:
self._unmap()
self._visible = visible
def set_minimum_size(self, width, height):
self._minimum_size = width, height
self._set_wm_normal_hints()
def set_maximum_size(self, width, height):
self._maximum_size = width, height
self._set_wm_normal_hints()
def minimize(self):
xlib.XIconifyWindow(self._x_display, self._window, self._x_screen_id)
def maximize(self):
self._set_wm_state('_NET_WM_STATE_MAXIMIZED_HORZ',
'_NET_WM_STATE_MAXIMIZED_VERT')
def set_mouse_platform_visible(self, platform_visible=None):
if not self._window:
return
if platform_visible is None:
platform_visible = self._mouse_visible and \
not self._mouse_cursor.drawable
if not platform_visible:
# Hide pointer by creating an empty cursor
black = xlib.XBlackPixel(self._x_display, self._x_screen_id)
black = xlib.XColor()
bmp = xlib.XCreateBitmapFromData(self._x_display, self._window,
c_buffer(8), 8, 8)
cursor = xlib.XCreatePixmapCursor(self._x_display, bmp, bmp,
black, black, 0, 0)
xlib.XDefineCursor(self._x_display, self._window, cursor)
xlib.XFreeCursor(self._x_display, cursor)
xlib.XFreePixmap(self._x_display, bmp)
else:
# Restore cursor
if isinstance(self._mouse_cursor, XlibMouseCursor):
xlib.XDefineCursor(self._x_display, self._window,
self._mouse_cursor.cursor)
else:
xlib.XUndefineCursor(self._x_display, self._window)
def set_mouse_position(self, x, y):
xlib.XWarpPointer(self._x_display,
0, # src window
self._window, # dst window
0, 0, # src x, y
0, 0, # src w, h
x, self._height - y,
)
def _update_exclusivity(self):
mouse_exclusive = self._active and self._mouse_exclusive
keyboard_exclusive = self._active and self._keyboard_exclusive
if mouse_exclusive != self._applied_mouse_exclusive:
if mouse_exclusive:
self.set_mouse_platform_visible(False)
# Restrict to client area
xlib.XGrabPointer(self._x_display, self._window,
True,
0,
xlib.GrabModeAsync,
xlib.GrabModeAsync,
self._window,
0,
xlib.CurrentTime)
# Move pointer to center of window
x = self._width // 2
y = self._height // 2
self._mouse_exclusive_client = x, y
self.set_mouse_position(x, y)
elif self._fullscreen and not self.screen._xinerama:
# Restrict to fullscreen area (prevent viewport scrolling)
self.set_mouse_position(0, 0)
r = xlib.XGrabPointer(self._x_display, self._view,
True, 0,
xlib.GrabModeAsync,
xlib.GrabModeAsync,
self._view,
0,
xlib.CurrentTime)
if r:
# Failed to grab, try again later
self._applied_mouse_exclusive = None
return
self.set_mouse_platform_visible()
else:
# Unclip
xlib.XUngrabPointer(self._x_display, xlib.CurrentTime)
self.set_mouse_platform_visible()
self._applied_mouse_exclusive = mouse_exclusive
if keyboard_exclusive != self._applied_keyboard_exclusive:
if keyboard_exclusive:
xlib.XGrabKeyboard(self._x_display,
self._window,
False,
xlib.GrabModeAsync,
xlib.GrabModeAsync,
xlib.CurrentTime)
else:
xlib.XUngrabKeyboard(self._x_display, xlib.CurrentTime)
self._applied_keyboard_exclusive = keyboard_exclusive
def set_exclusive_mouse(self, exclusive=True):
if exclusive == self._mouse_exclusive:
return
self._mouse_exclusive = exclusive
self._update_exclusivity()
def set_exclusive_keyboard(self, exclusive=True):
if exclusive == self._keyboard_exclusive:
return
self._keyboard_exclusive = exclusive
self._update_exclusivity()
def get_system_mouse_cursor(self, name):
if name == self.CURSOR_DEFAULT:
return DefaultMouseCursor()
# NQR means default shape is not pretty... surely there is another
# cursor font?
cursor_shapes = {
self.CURSOR_CROSSHAIR: cursorfont.XC_crosshair,
self.CURSOR_HAND: cursorfont.XC_hand2,
self.CURSOR_HELP: cursorfont.XC_question_arrow, # NQR
self.CURSOR_NO: cursorfont.XC_pirate, # NQR
self.CURSOR_SIZE: cursorfont.XC_fleur,
self.CURSOR_SIZE_UP: cursorfont.XC_top_side,
self.CURSOR_SIZE_UP_RIGHT: cursorfont.XC_top_right_corner,
self.CURSOR_SIZE_RIGHT: cursorfont.XC_right_side,
self.CURSOR_SIZE_DOWN_RIGHT: cursorfont.XC_bottom_right_corner,
self.CURSOR_SIZE_DOWN: cursorfont.XC_bottom_side,
self.CURSOR_SIZE_DOWN_LEFT: cursorfont.XC_bottom_left_corner,
self.CURSOR_SIZE_LEFT: cursorfont.XC_left_side,
self.CURSOR_SIZE_UP_LEFT: cursorfont.XC_top_left_corner,
self.CURSOR_SIZE_UP_DOWN: cursorfont.XC_sb_v_double_arrow,
self.CURSOR_SIZE_LEFT_RIGHT: cursorfont.XC_sb_h_double_arrow,
self.CURSOR_TEXT: cursorfont.XC_xterm,
self.CURSOR_WAIT: cursorfont.XC_watch,
self.CURSOR_WAIT_ARROW: cursorfont.XC_watch, # NQR
}
if name not in cursor_shapes:
raise MouseCursorException('Unknown cursor name "%s"' % name)
cursor = xlib.XCreateFontCursor(self._x_display, cursor_shapes[name])
return XlibMouseCursor(cursor)
def set_icon(self, *images):
# Careful! XChangeProperty takes an array of long when data type
# is 32-bit (but long can be 64 bit!), so pad high bytes of format if
# necessary.
import sys
format = {
('little', 4): 'BGRA',
('little', 8): 'BGRAAAAA',
('big', 4): 'ARGB',
('big', 8): 'AAAAARGB'
}[(sys.byteorder, sizeof(c_ulong))]
data = asbytes('')
for image in images:
image = image.get_image_data()
pitch = -(image.width * len(format))
s = c_buffer(sizeof(c_ulong) * 2)
memmove(s, cast((c_ulong * 2)(image.width, image.height),
POINTER(c_ubyte)), len(s))
data += s.raw + image.get_data(format, pitch)
buffer = (c_ubyte * len(data))()
memmove(buffer, data, len(data))
atom = xlib.XInternAtom(self._x_display, asbytes('_NET_WM_ICON'), False)
xlib.XChangeProperty(self._x_display, self._window, atom, XA_CARDINAL,
32, xlib.PropModeReplace, buffer, len(data)//sizeof(c_ulong))
# Private utility
def _set_wm_normal_hints(self):
hints = xlib.XAllocSizeHints().contents
if self._minimum_size:
hints.flags |= xlib.PMinSize
hints.min_width, hints.min_height = self._minimum_size
if self._maximum_size:
hints.flags |= xlib.PMaxSize
hints.max_width, hints.max_height = self._maximum_size
xlib.XSetWMNormalHints(self._x_display, self._window, byref(hints))
def _set_text_property(self, name, value, allow_utf8=True):
atom = xlib.XInternAtom(self._x_display, asbytes(name), False)
if not atom:
raise XlibException('Undefined atom "%s"' % name)
assert type(value) in (str, unicode)
property = xlib.XTextProperty()
if _have_utf8 and allow_utf8:
buf = create_string_buffer(value.encode('utf8'))
result = xlib.Xutf8TextListToTextProperty(self._x_display,
cast(pointer(buf), c_char_p), 1, xlib.XUTF8StringStyle,
byref(property))
if result < 0:
raise XlibException('Could not create UTF8 text property')
else:
buf = create_string_buffer(value.encode('ascii', 'ignore'))
result = xlib.XStringListToTextProperty(
cast(pointer(buf), c_char_p), 1, byref(property))
if result < 0:
raise XlibException('Could not create text property')
xlib.XSetTextProperty(self._x_display,
self._window, byref(property), atom)
# XXX <rj> Xlib doesn't like us freeing this
#xlib.XFree(property.value)
def _set_atoms_property(self, name, values, mode=xlib.PropModeReplace):
name_atom = xlib.XInternAtom(self._x_display, asbytes(name), False)
atoms = []
for value in values:
atoms.append(xlib.XInternAtom(self._x_display, asbytes(value), False))
atom_type = xlib.XInternAtom(self._x_display, asbytes('ATOM'), False)
if len(atoms):
atoms_ar = (xlib.Atom * len(atoms))(*atoms)
xlib.XChangeProperty(self._x_display, self._window,
name_atom, atom_type, 32, mode,
cast(pointer(atoms_ar), POINTER(c_ubyte)), len(atoms))
else:
xlib.XDeleteProperty(self._x_display, self._window, net_wm_state)
def _set_wm_state(self, *states):
# Set property
net_wm_state = xlib.XInternAtom(self._x_display, asbytes('_NET_WM_STATE'), False)
atoms = []
for state in states:
atoms.append(xlib.XInternAtom(self._x_display, asbytes(state), False))
atom_type = xlib.XInternAtom(self._x_display, asbytes('ATOM'), False)
if len(atoms):
atoms_ar = (xlib.Atom * len(atoms))(*atoms)
xlib.XChangeProperty(self._x_display, self._window,
net_wm_state, atom_type, 32, xlib.PropModePrepend,
cast(pointer(atoms_ar), POINTER(c_ubyte)), len(atoms))
else:
xlib.XDeleteProperty(self._x_display, self._window, net_wm_state)
# Nudge the WM
e = xlib.XEvent()
e.xclient.type = xlib.ClientMessage
e.xclient.message_type = net_wm_state
e.xclient.display = cast(self._x_display, POINTER(xlib.Display))
e.xclient.window = self._window
e.xclient.format = 32
e.xclient.data.l[0] = xlib.PropModePrepend
for i, atom in enumerate(atoms):
e.xclient.data.l[i + 1] = atom
xlib.XSendEvent(self._x_display, self._get_root(),
False, xlib.SubstructureRedirectMask, byref(e))
# Event handling
def dispatch_events(self):
self.dispatch_pending_events()
self._allow_dispatch_event = True
e = xlib.XEvent()
# Cache these in case window is closed from an event handler
_x_display = self._x_display
_window = self._window
_view = self._view
# Check for the events specific to this window
while xlib.XCheckWindowEvent(_x_display, _window,
0x1ffffff, byref(e)):
# Key events are filtered by the xlib window event
# handler so they get a shot at the prefiltered event.
if e.xany.type not in (xlib.KeyPress, xlib.KeyRelease):
if xlib.XFilterEvent(e, 0):
continue
self.dispatch_platform_event(e)
# Check for the events specific to this view
while xlib.XCheckWindowEvent(_x_display, _view,
0x1ffffff, byref(e)):
# Key events are filtered by the xlib window event
# handler so they get a shot at the prefiltered event.
if e.xany.type not in (xlib.KeyPress, xlib.KeyRelease):
if xlib.XFilterEvent(e, 0):
continue
self.dispatch_platform_event_view(e)
# Generic events for this window (the window close event).
while xlib.XCheckTypedWindowEvent(_x_display, _window,
xlib.ClientMessage, byref(e)):
self.dispatch_platform_event(e)
self._allow_dispatch_event = False
def dispatch_pending_events(self):
while self._event_queue:
EventDispatcher.dispatch_event(self, *self._event_queue.pop(0))
# Dispatch any context-related events
if self._lost_context:
self._lost_context = False
EventDispatcher.dispatch_event(self, 'on_context_lost')
if self._lost_context_state:
self._lost_context_state = False
EventDispatcher.dispatch_event(self, 'on_context_state_lost')
def dispatch_platform_event(self, e):
if self._applied_mouse_exclusive is None:
self._update_exclusivity()
event_handler = self._event_handlers.get(e.type)
if event_handler:
event_handler(e)
def dispatch_platform_event_view(self, e):
event_handler = self._view_event_handlers.get(e.type)
if event_handler:
event_handler(e)
@staticmethod
def _translate_modifiers(state):
modifiers = 0
if state & xlib.ShiftMask:
modifiers |= key.MOD_SHIFT
if state & xlib.ControlMask:
modifiers |= key.MOD_CTRL
if state & xlib.LockMask:
modifiers |= key.MOD_CAPSLOCK
if state & xlib.Mod1Mask:
modifiers |= key.MOD_ALT
if state & xlib.Mod2Mask:
modifiers |= key.MOD_NUMLOCK
if state & xlib.Mod4Mask:
modifiers |= key.MOD_WINDOWS
if state & xlib.Mod5Mask:
modifiers |= key.MOD_SCROLLLOCK
return modifiers
# Event handlers
'''
def _event_symbol(self, event):
# pyglet.self.key keysymbols are identical to X11 keysymbols, no
# need to map the keysymbol.
symbol = xlib.XKeycodeToKeysym(self._x_display, event.xkey.keycode, 0)
if symbol == 0:
# XIM event
return None
elif symbol not in key._key_names.keys():
symbol = key.user_key(event.xkey.keycode)
return symbol
'''
def _event_text_symbol(self, ev):
text = None
symbol = xlib.KeySym()
buffer = create_string_buffer(128)
# Look up raw keysym before XIM filters it (default for keypress and
# keyrelease)
count = xlib.XLookupString(ev.xkey,
buffer, len(buffer) - 1,
byref(symbol), None)
# Give XIM a shot
filtered = xlib.XFilterEvent(ev, ev.xany.window)
if ev.type == xlib.KeyPress and not filtered:
status = c_int()
if _have_utf8:
encoding = 'utf8'
count = xlib.Xutf8LookupString(self._x_ic,
ev.xkey,
buffer, len(buffer) - 1,
byref(symbol), byref(status))
if status.value == xlib.XBufferOverflow:
raise NotImplementedError('TODO: XIM buffer resize')
else:
encoding = 'ascii'
count = xlib.XLookupString(ev.xkey,
buffer, len(buffer) - 1,
byref(symbol), None)
if count:
status.value = xlib.XLookupBoth
if status.value & (xlib.XLookupChars | xlib.XLookupBoth):
text = buffer.value[:count].decode(encoding)
# Don't treat Unicode command codepoints as text, except Return.
if text and unicodedata.category(text) == 'Cc' and text != '\r':
text = None
symbol = symbol.value
# If the event is a XIM filtered event, the keysym will be virtual
# (e.g., aacute instead of A after a dead key). Drop it, we don't
# want these kind of key events.
if ev.xkey.keycode == 0 and not filtered:
symbol = None
# pyglet.self.key keysymbols are identical to X11 keysymbols, no
# need to map the keysymbol. For keysyms outside the pyglet set, map
# raw key code to a user key.
if symbol and symbol not in key._key_names and ev.xkey.keycode:
# Issue 353: Symbol is uppercase when shift key held down.
try:
symbol = ord(unichr(symbol).lower())
except ValueError:
# Not a valid unichr, use the keycode
symbol = key.user_key(ev.xkey.keycode)
else:
# If still not recognised, use the keycode
if symbol not in key._key_names:
symbol = key.user_key(ev.xkey.keycode)
if filtered:
# The event was filtered, text must be ignored, but the symbol is
# still good.
return None, symbol
return text, symbol
def _event_text_motion(self, symbol, modifiers):
if modifiers & key.MOD_ALT:
return None
ctrl = modifiers & key.MOD_CTRL != 0
return _motion_map.get((symbol, ctrl), None)
@ViewEventHandler
@XlibEventHandler(xlib.KeyPress)
@XlibEventHandler(xlib.KeyRelease)
def _event_key_view(self, ev):
# Try to detect autorepeat ourselves if the server doesn't support it
# XXX: Doesn't always work, better off letting the server do it
global _can_detect_autorepeat
if not _can_detect_autorepeat and ev.type == xlib.KeyRelease:
# Look in the queue for a matching KeyPress with same timestamp,
# indicating an auto-repeat rather than actual key event.
saved = []
while True:
auto_event = xlib.XEvent()
result = xlib.XCheckWindowEvent(self._x_display,
self._window, xlib.KeyPress|xlib.KeyRelease,
byref(auto_event))
if not result:
break
saved.append(auto_event)
if auto_event.type == xlib.KeyRelease:
# just save this off for restoration back to the queue
continue
if ev.xkey.keycode == auto_event.xkey.keycode:
# Found a key repeat: dispatch EVENT_TEXT* event
text, symbol = self._event_text_symbol(auto_event)
modifiers = self._translate_modifiers(ev.xkey.state)
modifiers_ctrl = modifiers & (key.MOD_CTRL | key.MOD_ALT)
motion = self._event_text_motion(symbol, modifiers)
if motion:
if modifiers & key.MOD_SHIFT:
self.dispatch_event(
'on_text_motion_select', motion)
else:
self.dispatch_event('on_text_motion', motion)
elif text and not modifiers_ctrl:
self.dispatch_event('on_text', text)
ditched = saved.pop()
for auto_event in reversed(saved):
xlib.XPutBackEvent(self._x_display, byref(auto_event))
return
else:
# Key code of press did not match, therefore no repeating
# is going on, stop searching.
break
# Whoops, put the events back, it's for real.
for auto_event in reversed(saved):
xlib.XPutBackEvent(self._x_display, byref(auto_event))
text, symbol = self._event_text_symbol(ev)
modifiers = self._translate_modifiers(ev.xkey.state)
modifiers_ctrl = modifiers & (key.MOD_CTRL | key.MOD_ALT)
motion = self._event_text_motion(symbol, modifiers)
if ev.type == xlib.KeyPress:
if symbol and (not _can_detect_autorepeat or symbol not in self.pressed_keys):
self.dispatch_event('on_key_press', symbol, modifiers)
if _can_detect_autorepeat:
self.pressed_keys.add(symbol)
if motion:
if modifiers & key.MOD_SHIFT:
self.dispatch_event('on_text_motion_select', motion)
else:
self.dispatch_event('on_text_motion', motion)
elif text and not modifiers_ctrl:
self.dispatch_event('on_text', text)
elif ev.type == xlib.KeyRelease:
if symbol:
self.dispatch_event('on_key_release', symbol, modifiers)
if _can_detect_autorepeat and symbol in self.pressed_keys:
self.pressed_keys.remove(symbol)
@XlibEventHandler(xlib.KeyPress)
@XlibEventHandler(xlib.KeyRelease)
def _event_key(self, ev):
return self._event_key_view(ev)
@ViewEventHandler
@XlibEventHandler(xlib.MotionNotify)
def _event_motionnotify_view(self, ev):
x = ev.xmotion.x
y = self.height - ev.xmotion.y
if self._mouse_in_window:
dx = x - self._mouse_x
dy = y - self._mouse_y
else:
dx = dy = 0
if self._applied_mouse_exclusive and \
(ev.xmotion.x, ev.xmotion.y) == self._mouse_exclusive_client:
# Ignore events caused by XWarpPointer
self._mouse_x = x
self._mouse_y = y
return
if self._applied_mouse_exclusive:
# Reset pointer position
ex, ey = self._mouse_exclusive_client
xlib.XWarpPointer(self._x_display,
0,
self._window,
0, 0,
0, 0,
ex, ey)
self._mouse_x = x
self._mouse_y = y
self._mouse_in_window = True
buttons = 0
if ev.xmotion.state & xlib.Button1MotionMask:
buttons |= mouse.LEFT
if ev.xmotion.state & xlib.Button2MotionMask:
buttons |= mouse.MIDDLE
if ev.xmotion.state & xlib.Button3MotionMask:
buttons |= mouse.RIGHT
if buttons:
# Drag event
modifiers = self._translate_modifiers(ev.xmotion.state)
self.dispatch_event('on_mouse_drag',
x, y, dx, dy, buttons, modifiers)
else:
# Motion event
self.dispatch_event('on_mouse_motion', x, y, dx, dy)
@XlibEventHandler(xlib.MotionNotify)
def _event_motionnotify(self, ev):
# Window motion looks for drags that are outside the view but within
# the window.
buttons = 0
if ev.xmotion.state & xlib.Button1MotionMask:
buttons |= mouse.LEFT
if ev.xmotion.state & xlib.Button2MotionMask:
buttons |= mouse.MIDDLE
if ev.xmotion.state & xlib.Button3MotionMask:
buttons |= mouse.RIGHT
if buttons:
# Drag event
x = ev.xmotion.x - self._view_x
y = self._height - (ev.xmotion.y - self._view_y)
if self._mouse_in_window:
dx = x - self._mouse_x
dy = y - self._mouse_y
else:
dx = dy = 0
self._mouse_x = x
self._mouse_y = y
modifiers = self._translate_modifiers(ev.xmotion.state)
self.dispatch_event('on_mouse_drag',
x, y, dx, dy, buttons, modifiers)
@XlibEventHandler(xlib.ClientMessage)
def _event_clientmessage(self, ev):
atom = ev.xclient.data.l[0]
if atom == xlib.XInternAtom(ev.xclient.display,
asbytes('WM_DELETE_WINDOW'), False):
self.dispatch_event('on_close')
elif (self._enable_xsync and
atom == xlib.XInternAtom(ev.xclient.display,
asbytes('_NET_WM_SYNC_REQUEST'), False)):
lo = ev.xclient.data.l[2]
hi = ev.xclient.data.l[3]
self._current_sync_value = xsync.XSyncValue(hi, lo)
def _sync_resize(self):
if self._enable_xsync and self._current_sync_valid:
if xsync.XSyncValueIsZero(self._current_sync_value):
self._current_sync_valid = False
return
xsync.XSyncSetCounter(self._x_display,
self._sync_counter,
self._current_sync_value)
self._current_sync_value = None
self._current_sync_valid = False
@ViewEventHandler
@XlibEventHandler(xlib.ButtonPress)
@XlibEventHandler(xlib.ButtonRelease)
def _event_button(self, ev):
x = ev.xbutton.x
y = self.height - ev.xbutton.y
button = 1 << (ev.xbutton.button - 1) # 1, 2, 3 -> 1, 2, 4
modifiers = self._translate_modifiers(ev.xbutton.state)
if ev.type == xlib.ButtonPress:
# override_redirect issue: manually activate this window if
# fullscreen.
if self._override_redirect and not self._active:
self.activate()
if ev.xbutton.button == 4:
self.dispatch_event('on_mouse_scroll', x, y, 0, 1)
elif ev.xbutton.button == 5:
self.dispatch_event('on_mouse_scroll', x, y, 0, -1)
elif ev.xbutton.button < len(self._mouse_buttons):
self._mouse_buttons[ev.xbutton.button] = True
self.dispatch_event('on_mouse_press',
x, y, button, modifiers)
else:
if ev.xbutton.button < 4:
self._mouse_buttons[ev.xbutton.button] = False
self.dispatch_event('on_mouse_release',
x, y, button, modifiers)
@ViewEventHandler
@XlibEventHandler(xlib.Expose)
def _event_expose(self, ev):
# Ignore all expose events except the last one. We could be told
# about exposure rects - but I don't see the point since we're
# working with OpenGL and we'll just redraw the whole scene.
if ev.xexpose.count > 0: return
self.dispatch_event('on_expose')
@ViewEventHandler
@XlibEventHandler(xlib.EnterNotify)
def _event_enternotify(self, ev):
# figure active mouse buttons
# XXX ignore modifier state?
state = ev.xcrossing.state
self._mouse_buttons[1] = state & xlib.Button1Mask
self._mouse_buttons[2] = state & xlib.Button2Mask
self._mouse_buttons[3] = state & xlib.Button3Mask
self._mouse_buttons[4] = state & xlib.Button4Mask
self._mouse_buttons[5] = state & xlib.Button5Mask
# mouse position
x = self._mouse_x = ev.xcrossing.x
y = self._mouse_y = self.height - ev.xcrossing.y
self._mouse_in_window = True
# XXX there may be more we could do here
self.dispatch_event('on_mouse_enter', x, y)
@ViewEventHandler
@XlibEventHandler(xlib.LeaveNotify)
def _event_leavenotify(self, ev):
x = self._mouse_x = ev.xcrossing.x
y = self._mouse_y = self.height - ev.xcrossing.y
self._mouse_in_window = False
self.dispatch_event('on_mouse_leave', x, y)
@XlibEventHandler(xlib.ConfigureNotify)
def _event_configurenotify(self, ev):
if self._enable_xsync and self._current_sync_value:
self._current_sync_valid = True
if self._fullscreen:
return
self.switch_to()
w, h = ev.xconfigure.width, ev.xconfigure.height
x, y = ev.xconfigure.x, ev.xconfigure.y
if self._width != w or self._height != h:
self._width = w
self._height = h
self._update_view_size()
self.dispatch_event('on_resize', self._width, self._height)
if self._x != x or self._y != y:
self.dispatch_event('on_move', x, y)
self._x = x
self._y = y
@XlibEventHandler(xlib.FocusIn)
def _event_focusin(self, ev):
self._active = True
self._update_exclusivity()
self.dispatch_event('on_activate')
xlib.XSetICFocus(self._x_ic)
@XlibEventHandler(xlib.FocusOut)
def _event_focusout(self, ev):
self._active = False
self._update_exclusivity()
self.dispatch_event('on_deactivate')
xlib.XUnsetICFocus(self._x_ic)
@XlibEventHandler(xlib.MapNotify)
def _event_mapnotify(self, ev):
self._mapped = True
self.dispatch_event('on_show')
self._update_exclusivity()
@XlibEventHandler(xlib.UnmapNotify)
def _event_unmapnotify(self, ev):
self._mapped = False
self.dispatch_event('on_hide')
| bsd-3-clause |
ml-lab/keras | tests/auto/keras/layers/test_core.py | 48 | 5211 | import unittest
import numpy as np
from numpy.testing import assert_allclose
import theano
from keras.layers import core
class TestLayerBase(unittest.TestCase):
def test_input_output(self):
nb_samples = 10
input_dim = 5
layer = core.Layer()
# As long as there is no input, an error should be raised.
for train in [True, False]:
self.assertRaises(AttributeError, layer.get_input, train)
self.assertRaises(AttributeError, layer.get_output, train)
# Once an input is provided, it should be reachable through the
# appropriate getters
input = np.ones((nb_samples, input_dim))
layer.input = theano.shared(value=input)
for train in [True, False]:
assert_allclose(layer.get_input(train).eval(), input)
assert_allclose(layer.get_output(train).eval(), input)
def test_connections(self):
nb_samples = 10
input_dim = 5
layer1 = core.Layer()
layer2 = core.Layer()
input = np.ones((nb_samples, input_dim))
layer1.input = theano.shared(value=input)
# As long as there is no previous layer, an error should be raised.
for train in [True, False]:
self.assertRaises(AttributeError, layer2.get_input, train)
# After connecting, input of layer1 should be passed through
layer2.set_previous(layer1)
for train in [True, False]:
assert_allclose(layer2.get_input(train).eval(), input)
assert_allclose(layer2.get_output(train).eval(), input)
class TestConfigParams(unittest.TestCase):
"""
Test the constructor, config and params functions of all layers in core.
"""
def _runner(self, layer):
conf = layer.get_config()
assert (type(conf) == dict)
param = layer.get_params()
# Typically a list or a tuple, but may be any iterable
assert hasattr(param, '__iter__')
def test_base(self):
layer = core.Layer()
self._runner(layer)
def test_masked(self):
layer = core.MaskedLayer()
self._runner(layer)
def test_merge(self):
layer_1 = core.Layer()
layer_2 = core.Layer()
layer = core.Merge([layer_1, layer_2])
self._runner(layer)
def test_dropout(self):
layer = core.Dropout(0.5)
self._runner(layer)
def test_activation(self):
layer = core.Activation('linear')
self._runner(layer)
def test_reshape(self):
layer = core.Reshape(10, 10)
self._runner(layer)
def test_flatten(self):
layer = core.Flatten()
self._runner(layer)
def test_repeat_vector(self):
layer = core.RepeatVector(10)
self._runner(layer)
def test_dense(self):
layer = core.Dense(10, 10)
self._runner(layer)
def test_act_reg(self):
layer = core.ActivityRegularization(0.5, 0.5)
self._runner(layer)
def test_time_dist_dense(self):
layer = core.TimeDistributedDense(10, 10)
self._runner(layer)
def test_autoencoder(self):
layer_1 = core.Layer()
layer_2 = core.Layer()
layer = core.AutoEncoder(layer_1, layer_2)
self._runner(layer)
def test_maxout_dense(self):
layer = core.MaxoutDense(10, 10)
self._runner(layer)
class TestMasking(unittest.TestCase):
"""Test the Masking class"""
def test_sequences(self):
"""Test masking sequences with zeroes as padding"""
# integer inputs, one per timestep, like embeddings
layer = core.Masking()
func = theano.function([layer.input], layer.get_output_mask())
self.assertTrue(np.all(
# get mask for this input
func(np.array(
[[[1], [2], [3], [0]],
[[0], [4], [5], [0]]], dtype=np.int32)) ==
# This is the expected output mask, one dimension less
np.array([[1, 1, 1, 0], [0, 1, 1, 0]])))
def test_non_zero(self):
"""Test masking with non-zero mask value"""
layer = core.Masking(5)
func = theano.function([layer.input], layer.get_output_mask())
self.assertTrue(np.all(
# get mask for this input, if not all the values are 5, shouldn't masked
func(np.array(
[[[1, 1], [2, 1], [3, 1], [5, 5]],
[[1, 5], [5, 0], [0, 0], [0, 0]]], dtype=np.int32)) ==
# This is the expected output mask, one dimension less
np.array([[1, 1, 1, 0], [1, 1, 1, 1]])))
def test_non_zero_output(self):
"""Test output of masking layer with non-zero mask value"""
layer = core.Masking(5)
func = theano.function([layer.input], layer.get_output())
self.assertTrue(np.all(
# get output for this input, replace padding with 0
func(np.array(
[[[1, 1], [2, 1], [3, 1], [5, 5]],
[[1, 5], [5, 0], [0, 0], [0, 0]]], dtype=np.int32)) ==
# This is the expected output
np.array([[[1, 1], [2, 1], [3, 1], [0, 0]],
[[1, 5], [5, 0], [0, 0], [0, 0]]])))
if __name__ == '__main__':
unittest.main()
| mit |
TamirEvan/mupdf | scripts/cmapflatten.py | 5 | 3170 | #!/usr/bin/env python3
# Parse a Uni* CMap file and flatten it.
#
# The Uni* CMap files only have 'cidchar' and 'cidrange' sections, never
# 'bfchar' or 'bfrange'.
import sys
def flattencmap(filename):
codespacerange = []
usecmap = ""
cmapname = ""
cmapversion = "1.0"
csi_registry = "(Adobe)"
csi_ordering = "(Unknown)"
csi_supplement = 1
wmode = 0
map = {}
def tocode(s):
if s[0] == '<' and s[-1] == '>':
return int(s[1:-1], 16)
return int(s, 10)
def map_cidchar(lo, v):
map[lo] = v
def map_cidrange(lo, hi, v):
while lo <= hi:
map[lo] = v
lo = lo + 1
v = v + 1
current = None
for line in open(filename, "r").readlines():
if line[0] == '%':
continue
line = line.strip().split()
if len(line) == 0:
continue
if line[0] == '/CMapVersion': cmapversion = line[1]
elif line[0] == '/CMapName': cmapname = line[1][1:]
elif line[0] == '/WMode': wmode = int(line[1])
elif line[0] == '/Registry': csi_registry = line[1]
elif line[0] == '/Ordering': csi_ordering = line[1]
elif line[0] == '/Supplement': csi_supplement = line[1]
elif len(line) > 1 and line[1] == 'usecmap': usecmap = line[0][1:]
elif len(line) > 1 and line[1] == 'begincodespacerange': current = 'codespacerange'
elif len(line) > 1 and line[1] == 'begincidrange': current = 'cidrange'
elif len(line) > 1 and line[1] == 'begincidchar': current = 'cidchar'
elif line[0].startswith("end"):
current = None
elif current == 'codespacerange' and len(line) == 2:
n, a, b = (len(line[0])-2)/2, tocode(line[0]), tocode(line[1])
codespacerange.append((n, a, b))
elif current == 'cidrange' and len(line) == 3:
a, b, c = tocode(line[0]), tocode(line[1]), tocode(line[2])
map_cidrange(a, b, c)
elif current == 'cidchar' and len(line) == 2:
a, b = tocode(line[0]), tocode(line[1])
map_cidchar(a, b)
# Print flattened CMap file
print("%!PS-Adobe-3.0 Resource-CMap")
print("%%DocumentNeededResources: procset (CIDInit)")
print("%%IncludeResource: procset (CIDInit)")
print("%%%%BeginResource: CMap (%s)" % cmapname)
print("%%%%Version: %s" % cmapversion)
print("%%EndComments")
print("/CIDInit /ProcSet findresource begin")
print("12 dict begin")
print("begincmap")
if usecmap: print("/%s usecmap" % usecmap)
print("/CIDSystemInfo 3 dict dup begin")
print(" /Registry %s def" % csi_registry)
print(" /Ordering %s def" % csi_ordering)
print(" /Supplement %s def" % csi_supplement)
print("end def")
print("/CMapName /%s def" % cmapname)
print("/CMapVersion %s def" % cmapversion)
print("/CMapType 1 def")
print("/WMode %d def" % wmode)
if len(codespacerange):
print("%d begincodespacerange" % len(codespacerange))
for r in codespacerange:
fmt = "<%%0%dx> <%%0%dx>" % (r[0]*2, r[0]*2)
print(fmt % (r[1], r[2]))
print("endcodespacerange")
keys = list(map.keys())
keys.sort()
print("%d begincidchar" % len(keys))
for code in keys:
v = map[code]
print("<%04x> %d" % (code, v))
print("endcidchar")
print("endcmap")
print("CMapName currentdict /CMap defineresource pop")
print("end")
print("end")
print("%%EndResource")
print("%%EOF")
for arg in sys.argv[1:]:
flattencmap(arg)
| agpl-3.0 |
alfa-jor/addon | plugin.video.alfa/channels/novelashdgratis.py | 1 | 5527 | # -*- coding: utf-8 -*-
# -*- Channel Novelas HD Gratis -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from channels import filtertools
from channels import autoplay
host = 'http://www.novelasgratishd.co'
IDIOMAS = {'la':'Latino'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['powvideo',
'netu',
'playedto',
'allmyvideos',
'gamovideo',
'openload',
'dailymotion',
'streamplay',
'streaminto',
'youtube',
'vidoza',
'flashx']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(item.clone(title="En Emision", action="list_all", url=host, type='emision'))
itemlist.append(item.clone(title="Ultimas Agregadas", action="list_all", url=host, type='ultimas'))
itemlist.append(item.clone(title="Todas", action="list_all", url=host, type='todas'))
itemlist.append(item.clone(title="Alfabetico", action="alpha", url=host, type='alfabetico'))
if autoplay.context:
autoplay.show_option(item.channel, itemlist)
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist =[]
data = get_source(item.url)
no_thumbs= ['emision', 'todas']
if item.type not in no_thumbs:
patron = '<div class=picture><a href=(.*?) title=(.*?)><img src=(.*?) width='
else:
if item.type == 'emision':
data = scrapertools.find_single_match(data, 'class=dt>Telenovelas que se Transmiten<\/div>.*?</ul>')
if item.type == 'todas':
data = scrapertools.find_single_match(data, 'class=dt>Lista de Novelas<\/div>.*?</ul>')
patron = '<li><a href=(.*?) title=(.*?)>.*?</a></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
if item.type in no_thumbs:
for scrapedurl, scrapedtitle in matches:
url = host+scrapedurl
contentSerieName = scrapedtitle
title = contentSerieName
new_item = Item(channel=item.channel, title=title, url= url, action='episodes',
contentSerieName= contentSerieName)
itemlist.append(new_item)
else:
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
url = host + '/'+scrapedurl
contentSerieName = scrapedtitle
title = contentSerieName
thumbnail = scrapedthumbnail
new_item = Item(channel=item.channel, title=title, url=url, action='episodes', thumbnail=thumbnail,
contentSerieName=contentSerieName)
itemlist.append(new_item)
return itemlist
def alpha(item):
logger.info()
itemlist= []
data = get_source(item.url)
patron = '<li class=menu-gen><a href=(.*?)>(.*?)</a> </li>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(title=scrapedtitle, url=host+scrapedurl, action='list_all'))
return itemlist
def episodes(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron='<li class=lc><a href=(.*?) title=.*?class=lcc>(.*?)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle
url = host+scrapedurl
new_item = Item(channel=item.channel, title=title, url=url, action='findvideos')
itemlist.append(new_item)
return itemlist [::-1]
def findvideos(item):
logger.info()
servers = {'powvideo':'http://powvideo.net/embed-',
'netu':'http://netu.tv/watch_video.php?v=',
'played':'http://played.to/embed-',
'allmy':'http://allmyvideos.net/embed-',
'gamo':'http://gamovideo.com/embed-',
'openload':'https://openload.co/embed/',
'daily':'http://www.dailymotion.com/embed/video/',
'play':'http://streamplay.to/embed-',
'streamin':'http://streamin.to/embed-',
'youtube':'https://www.youtube.com/embed/',
'vidoza':'https://vidoza.net/embed-',
'flashx':'https://www.flashx.tv/embed-'}
itemlist = []
data = get_source(item.url)
patron = 'id=tab\d+><script>(.*?)\((.*?)\)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for server, id in matches:
if server in servers:
url= '%s%s'%(servers[server], id)
itemlist.append(item.clone(url=url, title='%s', action='play', language=IDIOMAS['la']))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
| gpl-3.0 |
kmcminn/rails_survey | vendor/gems/nokogiri-1.6.0/ext/nokogiri/tmp/x86_64-linux-gnu/ports/libxml2/2.8.0/libxml2-2.8.0/python/tests/ctxterror.py | 87 | 1318 | #!/usr/bin/python -u
#
# This test exercise the redirection of error messages with a
# functions defined in Python.
#
import sys
import libxml2
# Memory debug specific
libxml2.debugMemory(1)
expect="""--> (3) xmlns: URI foo is not absolute
--> (4) Opening and ending tag mismatch: x line 0 and y
"""
err=""
def callback(arg,msg,severity,reserved):
global err
err = err + "%s (%d) %s" % (arg,severity,msg)
s = """<x xmlns="foo"></y>"""
parserCtxt = libxml2.createPushParser(None,"",0,"test.xml")
parserCtxt.setErrorHandler(callback, "-->")
if parserCtxt.getErrorHandler() != (callback,"-->"):
print "getErrorHandler failed"
sys.exit(1)
parserCtxt.parseChunk(s,len(s),1)
doc = parserCtxt.doc()
doc.freeDoc()
parserCtxt = None
if err != expect:
print "error"
print "received %s" %(err)
print "expected %s" %(expect)
sys.exit(1)
i = 10000
while i > 0:
parserCtxt = libxml2.createPushParser(None,"",0,"test.xml")
parserCtxt.setErrorHandler(callback, "-->")
parserCtxt.parseChunk(s,len(s),1)
doc = parserCtxt.doc()
doc.freeDoc()
parserCtxt = None
err = ""
i = i - 1
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print "OK"
else:
print "Memory leak %d bytes" % (libxml2.debugMemory(1))
libxml2.dumpMemory()
| mit |
4022321818/w16b_test | static/Brython3.1.1-20150328-091302/Lib/multiprocessing/__init__.py | 693 | 6866 | #
# Package analogous to 'threading.py' but using processes
#
# multiprocessing/__init__.py
#
# This package is intended to duplicate the functionality (and much of
# the API) of threading.py but uses processes instead of threads. A
# subpackage 'multiprocessing.dummy' has the same API but is a simple
# wrapper for 'threading'.
#
# Try calling `multiprocessing.doc.main()` to read the html
# documentation in a webbrowser.
#
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__version__ = '0.70a1'
__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Manager', 'Pipe', 'cpu_count', 'log_to_stderr', 'get_logger',
'allow_connection_pickling', 'BufferTooShort', 'TimeoutError',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Barrier', 'Queue', 'SimpleQueue', 'JoinableQueue', 'Pool',
'Value', 'Array', 'RawValue', 'RawArray', 'SUBDEBUG', 'SUBWARNING',
]
__author__ = 'R. Oudkerk (r.m.oudkerk@gmail.com)'
#
# Imports
#
import os
import sys
from multiprocessing.process import Process, current_process, active_children
from multiprocessing.util import SUBDEBUG, SUBWARNING
#
# Exceptions
#
class ProcessError(Exception):
pass
class BufferTooShort(ProcessError):
pass
class TimeoutError(ProcessError):
pass
class AuthenticationError(ProcessError):
pass
import _multiprocessing
#
# Definitions not depending on native semaphores
#
def Manager():
'''
Returns a manager associated with a running server process
The managers methods such as `Lock()`, `Condition()` and `Queue()`
can be used to create shared objects.
'''
from multiprocessing.managers import SyncManager
m = SyncManager()
m.start()
return m
#brython fix me
#def Pipe(duplex=True):
# '''
# Returns two connection object connected by a pipe
# '''
# from multiprocessing.connection import Pipe
# return Pipe(duplex)
def cpu_count():
'''
Returns the number of CPUs in the system
'''
if sys.platform == 'win32':
try:
num = int(os.environ['NUMBER_OF_PROCESSORS'])
except (ValueError, KeyError):
num = 0
elif 'bsd' in sys.platform or sys.platform == 'darwin':
comm = '/sbin/sysctl -n hw.ncpu'
if sys.platform == 'darwin':
comm = '/usr' + comm
try:
with os.popen(comm) as p:
num = int(p.read())
except ValueError:
num = 0
else:
try:
num = os.sysconf('SC_NPROCESSORS_ONLN')
except (ValueError, OSError, AttributeError):
num = 0
if num >= 1:
return num
else:
raise NotImplementedError('cannot determine number of cpus')
def freeze_support():
'''
Check whether this is a fake forked process in a frozen executable.
If so then run code specified by commandline and exit.
'''
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
from multiprocessing.forking import freeze_support
freeze_support()
def get_logger():
'''
Return package logger -- if it does not already exist then it is created
'''
from multiprocessing.util import get_logger
return get_logger()
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
from multiprocessing.util import log_to_stderr
return log_to_stderr(level)
#brython fix me
#def allow_connection_pickling():
# '''
# Install support for sending connections and sockets between processes
# '''
# # This is undocumented. In previous versions of multiprocessing
# # its only effect was to make socket objects inheritable on Windows.
# import multiprocessing.connection
#
# Definitions depending on native semaphores
#
def Lock():
'''
Returns a non-recursive lock object
'''
from multiprocessing.synchronize import Lock
return Lock()
def RLock():
'''
Returns a recursive lock object
'''
from multiprocessing.synchronize import RLock
return RLock()
def Condition(lock=None):
'''
Returns a condition object
'''
from multiprocessing.synchronize import Condition
return Condition(lock)
def Semaphore(value=1):
'''
Returns a semaphore object
'''
from multiprocessing.synchronize import Semaphore
return Semaphore(value)
def BoundedSemaphore(value=1):
'''
Returns a bounded semaphore object
'''
from multiprocessing.synchronize import BoundedSemaphore
return BoundedSemaphore(value)
def Event():
'''
Returns an event object
'''
from multiprocessing.synchronize import Event
return Event()
def Barrier(parties, action=None, timeout=None):
'''
Returns a barrier object
'''
from multiprocessing.synchronize import Barrier
return Barrier(parties, action, timeout)
def Queue(maxsize=0):
'''
Returns a queue object
'''
from multiprocessing.queues import Queue
return Queue(maxsize)
def JoinableQueue(maxsize=0):
'''
Returns a queue object
'''
from multiprocessing.queues import JoinableQueue
return JoinableQueue(maxsize)
def SimpleQueue():
'''
Returns a queue object
'''
from multiprocessing.queues import SimpleQueue
return SimpleQueue()
def Pool(processes=None, initializer=None, initargs=(), maxtasksperchild=None):
'''
Returns a process pool object
'''
from multiprocessing.pool import Pool
return Pool(processes, initializer, initargs, maxtasksperchild)
def RawValue(typecode_or_type, *args):
'''
Returns a shared object
'''
from multiprocessing.sharedctypes import RawValue
return RawValue(typecode_or_type, *args)
def RawArray(typecode_or_type, size_or_initializer):
'''
Returns a shared array
'''
from multiprocessing.sharedctypes import RawArray
return RawArray(typecode_or_type, size_or_initializer)
def Value(typecode_or_type, *args, lock=True):
'''
Returns a synchronized shared object
'''
from multiprocessing.sharedctypes import Value
return Value(typecode_or_type, *args, lock=lock)
def Array(typecode_or_type, size_or_initializer, *, lock=True):
'''
Returns a synchronized shared array
'''
from multiprocessing.sharedctypes import Array
return Array(typecode_or_type, size_or_initializer, lock=lock)
#
#
#
if sys.platform == 'win32':
def set_executable(executable):
'''
Sets the path to a python.exe or pythonw.exe binary used to run
child processes on Windows instead of sys.executable.
Useful for people embedding Python.
'''
from multiprocessing.forking import set_executable
set_executable(executable)
__all__ += ['set_executable']
| gpl-3.0 |
newemailjdm/pybrain | pybrain/auxiliary/gradientdescent.py | 31 | 5966 | __author__ = ('Thomas Rueckstiess, ruecksti@in.tum.de'
'Justin Bayer, bayer.justin@googlemail.com')
from scipy import zeros, asarray, sign, array, cov, dot, clip, ndarray
from scipy.linalg import inv
class GradientDescent(object):
def __init__(self):
""" initialize algorithms with standard parameters (typical values given in parentheses)"""
# --- BackProp parameters ---
# learning rate (0.1-0.001, down to 1e-7 for RNNs)
self.alpha = 0.1
# alpha decay (0.999; 1.0 = disabled)
self.alphadecay = 1.0
# momentum parameters (0.1 or 0.9)
self.momentum = 0.0
self.momentumvector = None
# --- RProp parameters ---
self.rprop = False
# maximum step width (1 - 20)
self.deltamax = 5.0
# minimum step width (0.01 - 1e-6)
self.deltamin = 0.01
# the remaining parameters do not normally need to be changed
self.deltanull = 0.1
self.etaplus = 1.2
self.etaminus = 0.5
self.lastgradient = None
def init(self, values):
""" call this to initialize data structures *after* algorithm to use
has been selected
:arg values: the list (or array) of parameters to perform gradient descent on
(will be copied, original not modified)
"""
assert isinstance(values, ndarray)
self.values = values.copy()
if self.rprop:
self.lastgradient = zeros(len(values), dtype='float64')
self.rprop_theta = self.lastgradient + self.deltanull
self.momentumvector = None
else:
self.lastgradient = None
self.momentumvector = zeros(len(values))
def __call__(self, gradient, error=None):
""" calculates parameter change based on given gradient and returns updated parameters """
# check if gradient has correct dimensionality, then make array """
assert len(gradient) == len(self.values)
gradient_arr = asarray(gradient)
if self.rprop:
rprop_theta = self.rprop_theta
# update parameters
self.values += sign(gradient_arr) * rprop_theta
# update rprop meta parameters
dirSwitch = self.lastgradient * gradient_arr
rprop_theta[dirSwitch > 0] *= self.etaplus
idx = dirSwitch < 0
rprop_theta[idx] *= self.etaminus
gradient_arr[idx] = 0
# upper and lower bound for both matrices
rprop_theta = rprop_theta.clip(min=self.deltamin, max=self.deltamax)
# save current gradients to compare with in next time step
self.lastgradient = gradient_arr.copy()
self.rprop_theta = rprop_theta
else:
# update momentum vector (momentum = 0 clears it)
self.momentumvector *= self.momentum
# update parameters (including momentum)
self.momentumvector += self.alpha * gradient_arr
self.alpha *= self.alphadecay
# update parameters
self.values += self.momentumvector
return self.values
descent = __call__
class NaturalGradient(object):
def __init__(self, samplesize):
# Counter after how many samples a new gradient estimate will be
# returned.
self.samplesize = samplesize
# Samples of the gradient are held in this datastructure.
self.samples = []
def init(self, values):
self.values = values.copy()
def __call__(self, gradient, error=None):
# Append a copy to make sure this one is not changed after by the
# client.
self.samples.append(array(gradient))
# Return None if no new estimate is being given.
if len(self.samples) < self.samplesize:
return None
# After all the samples have been put into a single array, we can
# delete them.
gradientarray = array(self.samples).T
inv_covar = inv(cov(gradientarray))
self.values += dot(inv_covar, gradientarray.sum(axis=1))
return self.values
class IRpropPlus(object):
def __init__(self, upfactor=1.1, downfactor=0.9, bound=0.5):
self.upfactor = upfactor
self.downfactor = downfactor
if not bound > 0:
raise ValueError("bound greater than 0 needed.")
def init(self, values):
self.values = values.copy()
self.prev_values = values.copy()
self.more_prev_values = values.copy()
self.previous_gradient = zeros(values.shape)
self.step = zeros(values.shape)
self.previous_error = float("-inf")
def __call__(self, gradient, error):
products = self.previous_gradient * gradient
signs = sign(gradient)
# For positive gradient parts.
positive = (products > 0).astype('int8')
pos_step = self.step * self.upfactor * positive
clip(pos_step, -self.bound, self.bound)
pos_update = self.values - signs * pos_step
# For negative gradient parts.
negative = (products < 0).astype('int8')
neg_step = self.step * self.downfactor * negative
clip(neg_step, -self.bound, self.bound)
if error <= self.previous_error:
# If the error has decreased, do nothing.
neg_update = zeros(gradient.shape)
else:
# If it has increased, move back 2 steps.
neg_update = self.more_prev_values
# Set all negative gradients to zero for the next step.
gradient *= positive
# Bookkeeping.
self.previous_gradient = gradient
self.more_prev_values = self.prev_values
self.prev_values = self.values.copy()
self.previous_error = error
# Updates.
self.step[:] = pos_step + neg_step
self.values[:] = positive * pos_update + negative * neg_update
return self.values
| bsd-3-clause |
jonasschnelli/bitcoin | test/functional/wallet_dump.py | 29 | 9641 | #!/usr/bin/env python3
# Copyright (c) 2016-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the dumpwallet RPC."""
import datetime
import os
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
def read_dump(file_name, addrs, script_addrs, hd_master_addr_old):
"""
Read the given dump, count the addrs that match, count change and reserve.
Also check that the old hd_master is inactive
"""
with open(file_name, encoding='utf8') as inputfile:
found_comments = []
found_legacy_addr = 0
found_p2sh_segwit_addr = 0
found_bech32_addr = 0
found_script_addr = 0
found_addr_chg = 0
found_addr_rsv = 0
hd_master_addr_ret = None
for line in inputfile:
line = line.strip()
if not line:
continue
if line[0] == '#':
found_comments.append(line)
else:
# split out some data
key_date_label, comment = line.split("#")
key_date_label = key_date_label.split(" ")
# key = key_date_label[0]
date = key_date_label[1]
keytype = key_date_label[2]
imported_key = date == '1970-01-01T00:00:01Z'
if imported_key:
# Imported keys have multiple addresses, no label (keypath) and timestamp
# Skip them
continue
addr_keypath = comment.split(" addr=")[1]
addr = addr_keypath.split(" ")[0]
keypath = None
if keytype == "inactivehdseed=1":
# ensure the old master is still available
assert hd_master_addr_old == addr
elif keytype == "hdseed=1":
# ensure we have generated a new hd master key
assert hd_master_addr_old != addr
hd_master_addr_ret = addr
elif keytype == "script=1":
# scripts don't have keypaths
keypath = None
else:
keypath = addr_keypath.rstrip().split("hdkeypath=")[1]
# count key types
for addrObj in addrs:
if addrObj['address'] == addr.split(",")[0] and addrObj['hdkeypath'] == keypath and keytype == "label=":
if addr.startswith('m') or addr.startswith('n'):
# P2PKH address
found_legacy_addr += 1
elif addr.startswith('2'):
# P2SH-segwit address
found_p2sh_segwit_addr += 1
elif addr.startswith('bcrt1'):
found_bech32_addr += 1
break
elif keytype == "change=1":
found_addr_chg += 1
break
elif keytype == "reserve=1":
found_addr_rsv += 1
break
# count scripts
for script_addr in script_addrs:
if script_addr == addr.rstrip() and keytype == "script=1":
found_script_addr += 1
break
return found_comments, found_legacy_addr, found_p2sh_segwit_addr, found_bech32_addr, found_script_addr, found_addr_chg, found_addr_rsv, hd_master_addr_ret
class WalletDumpTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [["-keypool=90", "-addresstype=legacy"]]
self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.add_nodes(self.num_nodes, extra_args=self.extra_args)
self.start_nodes()
def run_test(self):
self.nodes[0].createwallet("dump")
wallet_unenc_dump = os.path.join(self.nodes[0].datadir, "wallet.unencrypted.dump")
wallet_enc_dump = os.path.join(self.nodes[0].datadir, "wallet.encrypted.dump")
# generate 30 addresses to compare against the dump
# - 10 legacy P2PKH
# - 10 P2SH-segwit
# - 10 bech32
test_addr_count = 10
addrs = []
for address_type in ['legacy', 'p2sh-segwit', 'bech32']:
for _ in range(test_addr_count):
addr = self.nodes[0].getnewaddress(address_type=address_type)
vaddr = self.nodes[0].getaddressinfo(addr) # required to get hd keypath
addrs.append(vaddr)
# Test scripts dump by adding a 1-of-1 multisig address
multisig_addr = self.nodes[0].addmultisigaddress(1, [addrs[1]["address"]])["address"]
# Refill the keypool. getnewaddress() refills the keypool *before* taking a key from
# the keypool, so the final call to getnewaddress leaves the keypool with one key below
# its capacity
self.nodes[0].keypoolrefill()
self.log.info('Mine a block one second before the wallet is dumped')
dump_time = int(time.time())
self.nodes[0].setmocktime(dump_time - 1)
self.nodes[0].generate(1)
self.nodes[0].setmocktime(dump_time)
dump_time_str = '# * Created on {}Z'.format(
datetime.datetime.fromtimestamp(
dump_time,
tz=datetime.timezone.utc,
).replace(tzinfo=None).isoformat())
dump_best_block_1 = '# * Best block at time of backup was {} ({}),'.format(
self.nodes[0].getblockcount(),
self.nodes[0].getbestblockhash(),
)
dump_best_block_2 = '# mined on {}Z'.format(
datetime.datetime.fromtimestamp(
dump_time - 1,
tz=datetime.timezone.utc,
).replace(tzinfo=None).isoformat())
self.log.info('Dump unencrypted wallet')
result = self.nodes[0].dumpwallet(wallet_unenc_dump)
assert_equal(result['filename'], wallet_unenc_dump)
found_comments, found_legacy_addr, found_p2sh_segwit_addr, found_bech32_addr, found_script_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \
read_dump(wallet_unenc_dump, addrs, [multisig_addr], None)
assert '# End of dump' in found_comments # Check that file is not corrupt
assert_equal(dump_time_str, next(c for c in found_comments if c.startswith('# * Created on')))
assert_equal(dump_best_block_1, next(c for c in found_comments if c.startswith('# * Best block')))
assert_equal(dump_best_block_2, next(c for c in found_comments if c.startswith('# mined on')))
assert_equal(found_legacy_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_p2sh_segwit_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_bech32_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_script_addr, 1) # all scripts must be in the dump
assert_equal(found_addr_chg, 0) # 0 blocks where mined
assert_equal(found_addr_rsv, 90 * 2) # 90 keys plus 100% internal keys
# encrypt wallet, restart, unlock and dump
self.nodes[0].encryptwallet('test')
self.nodes[0].walletpassphrase('test', 100)
# Should be a no-op:
self.nodes[0].keypoolrefill()
self.nodes[0].dumpwallet(wallet_enc_dump)
found_comments, found_legacy_addr, found_p2sh_segwit_addr, found_bech32_addr, found_script_addr, found_addr_chg, found_addr_rsv, _ = \
read_dump(wallet_enc_dump, addrs, [multisig_addr], hd_master_addr_unenc)
assert '# End of dump' in found_comments # Check that file is not corrupt
assert_equal(dump_time_str, next(c for c in found_comments if c.startswith('# * Created on')))
assert_equal(dump_best_block_1, next(c for c in found_comments if c.startswith('# * Best block')))
assert_equal(dump_best_block_2, next(c for c in found_comments if c.startswith('# mined on')))
assert_equal(found_legacy_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_p2sh_segwit_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_bech32_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_script_addr, 1)
assert_equal(found_addr_chg, 90 * 2) # old reserve keys are marked as change now
assert_equal(found_addr_rsv, 90 * 2)
# Overwriting should fail
assert_raises_rpc_error(-8, "already exists", lambda: self.nodes[0].dumpwallet(wallet_enc_dump))
# Restart node with new wallet, and test importwallet
self.restart_node(0)
self.nodes[0].createwallet("w2")
# Make sure the address is not IsMine before import
result = self.nodes[0].getaddressinfo(multisig_addr)
assert not result['ismine']
self.nodes[0].importwallet(wallet_unenc_dump)
# Now check IsMine is true
result = self.nodes[0].getaddressinfo(multisig_addr)
assert result['ismine']
self.log.info('Check that wallet is flushed')
with self.nodes[0].assert_debug_log(['Flushing wallet.dat'], timeout=20):
self.nodes[0].getnewaddress()
if __name__ == '__main__':
WalletDumpTest().main()
| mit |
spaceone/pyjs | pyjswidgets/pyjamas/ui/StackPanel.py | 9 | 5080 | # Copyright 2006 James Tauber and contributors
# Copyright (C) 2009, 2010 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyjamas import DOM
from pyjamas import Factory
from pyjamas.ui.CellPanel import CellPanel
from pyjamas.ui import Event
class StackPanel(CellPanel):
def __init__(self, **kwargs):
self.visibleStack = -1
self.indices = {}
self.stackListeners = []
kwargs['StyleName'] = kwargs.get('StyleName', "gwt-StackPanel")
CellPanel.__init__(self, **kwargs)
DOM.sinkEvents(self.getElement(), Event.ONCLICK)
def addStackChangeListener(self, listener):
self.stackListeners.append(listener)
def add(self, widget, stackText="", asHTML=False):
widget.removeFromParent()
index = self.getWidgetCount()
tr = DOM.createTR()
td = DOM.createTD()
DOM.appendChild(self.body, tr)
DOM.appendChild(tr, td)
self.setStyleName(td, "gwt-StackPanelItem", True)
self._setIndex(td, index)
DOM.setAttribute(td, "height", "1px")
tr = DOM.createTR()
td = DOM.createTD()
DOM.appendChild(self.body, tr)
DOM.appendChild(tr, td)
DOM.setAttribute(td, "height", "100%")
DOM.setAttribute(td, "vAlign", "top")
CellPanel.add(self, widget, td)
self.setStackVisible(index, False)
if self.visibleStack == -1:
self.showStack(0)
if stackText != "":
self.setStackText(self.getWidgetCount() - 1, stackText, asHTML)
def onBrowserEvent(self, event):
if DOM.eventGetType(event) == "click":
index = self.getDividerIndex(DOM.eventGetTarget(event))
if index != -1:
self.showStack(index)
# also callable as remove(child) and remove(index)
def remove(self, child, index=None):
if index is None:
if isinstance(child, int):
index = child
child = self.getWidget(child)
else:
index = self.getWidgetIndex(child)
if child.getParent() != self:
return False
if self.visibleStack == index:
self.visibleStack = -1
elif self.visibleStack > index:
self.visibleStack -= 1
rowIndex = 2 * index
tr = DOM.getChild(self.body, rowIndex)
DOM.removeChild(self.body, tr)
tr = DOM.getChild(self.body, rowIndex)
DOM.removeChild(self.body, tr)
CellPanel.remove(self, child)
rows = self.getWidgetCount() * 2
#for (int i = rowIndex; i < rows; i = i + 2) {
for i in range(rowIndex, rows, 2):
childTR = DOM.getChild(self.body, i)
td = DOM.getFirstChild(childTR)
curIndex = self._getIndex(td)
self._setIndex(td, index)
index += 1
return True
def _setIndex(self, td, index):
self.indices[td] = index
def _getIndex(self, td):
return self.indices.get(td)
def setStackText(self, index, text, asHTML=False):
if index >= self.getWidgetCount():
return
td = DOM.getChild(DOM.getChild(self.body, index * 2), 0)
if asHTML:
DOM.setInnerHTML(td, text)
else:
DOM.setInnerText(td, text)
def showStack(self, index):
if (index >= self.getWidgetCount()) or (index == self.visibleStack):
return
if self.visibleStack >= 0:
self.setStackVisible(self.visibleStack, False)
self.visibleStack = index
self.setStackVisible(self.visibleStack, True)
for listener in self.stackListeners:
listener.onStackChanged(self, index)
def getDividerIndex(self, elem):
while (elem is not None) and not DOM.compare(elem, self.getElement()):
expando = self._getIndex(elem)
if expando is not None:
return int(expando)
elem = DOM.getParent(elem)
return -1
def setStackVisible(self, index, visible):
tr = DOM.getChild(self.body, (index * 2))
if tr is None:
return
td = DOM.getFirstChild(tr)
self.setStyleName(td, "gwt-StackPanelItem-selected", visible)
tr = DOM.getChild(self.body, (index * 2) + 1)
self.setVisible(tr, visible)
self.getWidget(index).setVisible(visible)
def getSelectedIndex(self):
return self.visibleStack
Factory.registerClass('pyjamas.ui.StackPanel', 'StackPanel', StackPanel)
| apache-2.0 |
nii-cloud/dodai-compute | nova/api/openstack/accounts.py | 3 | 3019 | # Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from nova import exception
from nova import flags
from nova import log as logging
from nova.auth import manager
from nova.api.openstack import faults
from nova.api.openstack import wsgi
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.api.openstack')
def _translate_keys(account):
return dict(id=account.id,
name=account.name,
description=account.description,
manager=account.project_manager_id)
class Controller(object):
def __init__(self):
self.manager = manager.AuthManager()
def _check_admin(self, context):
"""We cannot depend on the db layer to check for admin access
for the auth manager, so we do it here"""
if not context.is_admin:
raise exception.AdminRequired()
def index(self, req):
raise webob.exc.HTTPNotImplemented()
def detail(self, req):
raise webob.exc.HTTPNotImplemented()
def show(self, req, id):
"""Return data about the given account id"""
account = self.manager.get_project(id)
return dict(account=_translate_keys(account))
def delete(self, req, id):
self._check_admin(req.environ['nova.context'])
self.manager.delete_project(id)
return {}
def create(self, req, body):
"""We use update with create-or-update semantics
because the id comes from an external source"""
raise webob.exc.HTTPNotImplemented()
def update(self, req, id, body):
"""This is really create or update."""
self._check_admin(req.environ['nova.context'])
description = body['account'].get('description')
manager = body['account'].get('manager')
try:
account = self.manager.get_project(id)
self.manager.modify_project(id, manager, description)
except exception.NotFound:
account = self.manager.create_project(id, manager, description)
return dict(account=_translate_keys(account))
def create_resource():
metadata = {
"attributes": {
"account": ["id", "name", "description", "manager"],
},
}
body_serializers = {
'application/xml': wsgi.XMLDictSerializer(metadata=metadata),
}
serializer = wsgi.ResponseSerializer(body_serializers)
return wsgi.Resource(Controller(), serializer=serializer)
| apache-2.0 |
bgporter/nanobot | demo/tockbot.py | 1 | 3611 | #! /usr/bin/env/python
# Copyright (c) 2016 Brett g Porter
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
tockbot.py -- simple demo of making a bot with the nanobot
framework.
'''
from nanobot.nanobot import Nanobot
from nanobot.nanobot import GetBotArguments
from datetime import datetime
def NowString(now):
return now.strftime("It's %-I:%M %p on %A %B %d, %Y")
class Tockbot(Nanobot):
def __init__(self, argDict=None):
super(Tockbot, self).__init__(argDict)
def IsReadyForUpdate(self):
'''
Overridden from base class.
We're ready to create an update when it's the top of the hour,
or if the user is forcing us to tweet.
'''
return 0 == datetime.now().minute
def CreateUpdateTweet(self):
''' Chime the clock! '''
now = datetime.now()
# figure out how many times to chime; 1x per hour.
chimeCount = (now.hour % 12) or 12
# create the message to tweet, repeating the chime
msg = "{0}\n\n{1}".format("\n".join(["BONG"] * chimeCount),
NowString(now))
# add the message to the end of the tweets list (rem)
self.tweets.append({'status': msg})
self.Log("Tweet", ["{} o'clock".format(chimeCount)])
def HandleOneMention(self, mention):
''' Like the tweet that mentions us. If the word 'tick' appears
in that tweet, also reply with the current time.
'''
who = mention['user']['screen_name']
text = mention['text']
theId = mention['id_str']
eventType = "Mention"
# we favorite every mention that we see
if self.debug:
print "Faving tweet {0} by {1}:\n {2}".format(theId, who, text.encode("utf-8"))
else:
self.twitter.create_favorite(id=theId)
if 'tick' in text.lower():
# reply to them with the current time.
replyMsg = "@{0} {1}".format(who, NowString(datetime.now()))
if self.debug:
print "REPLY: {}".format(replyMsg)
else:
self.tweets.append({'status': replyMsg, 'in_reply_to_status_id': theId})
eventType = "Reply"
self.Log(eventType, [who])
def Handle_quoted_tweet(self, data):
'''Like any tweet that quotes us. '''
tweetId = data['target_object']['id_str']
if self.debug:
print "Faving quoted tweet {0}".format(tweetId)
else:
try:
self.twitter.create_favorite(id=tweetId)
except TwythonError as e:
self.Log("EXCEPTION", str(e))
if __name__ == "__main__":
Tockbot.CreateAndRun(GetBotArguments())
| mit |
eamanu/ArgentosCompiler | Analyzer/Analyzer.py | 1 | 13039 | # -*- coding: utf-8 -*-
"""@package Reader
Módulo que analiza el código fuente
@author Arias Emmanuel
@date 24/10/2016
"""
from Reader import Reader
from enum import Enum
import string
import numpy as np
class campoCaracter:
def __init__(self):
self.mVariable = np.array([['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H' ], #A
['I', 'J', 'K', 'L', 'M', 'N', 'O', 'P' ], #R
['Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X' ], #G
['Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f' ], #E
['g', 'h', 'i', 'j', 'k', 'l', 'm', 'n' ], #N
['o', 'p', 'q', 'r', 's', 't', 'u', 'v' ], #T
['w', 'x', 'y', 'z', 1 , 2 , 3 , 4 ], #O
[ 5 , 6 , 7 , 8 , 9 , 0 , 99, -99 ] #S
])
# A R G E N T O S
def DameCaracter (self, y, x, caballo):
self.y, self.x = caballo.damePosCaballo()
return self.mVariable[self.y][self.x]
class coor(Enum):
"""Coordenadas del tablero
"""
a = 0
r = 1
g = 2
e = 3
n = 4
t = 5
o = 6
s = 7
class caballo:
def __init__(self):
self.y = coor.s
self.x = coor.r
def dameCoordenadasEnNumero(self, yy,xx):
y = yy.lower()
x = xx.lower()
#Coloca coordenada en y
if y == 'a':
y = coor.a
if y == 'r':
y = coor.r
if y == 'g':
y = coor.g
if y == 'e':
y = coor.e
if y == 'n':
y = coor.n
if y == 't':
y = coor.t
if y == 'o':
y = coor.o
if y == 's':
y = coor.s
#Coloca coordenada en x
if x == 'a':
x = coor.a
if x == 'r':
x = coor.r
if x == 'g':
x = coor.g
if x == 'e':
x = coor.e
if x == 'n':
x = coor.n
if x == 't':
x = coor.t
if x == 'o':
x = coor.o
if x == 's':
x = coor.s
return y, x
def mover (self, yy, xx):
y = yy.lower()
x = xx.lower()
#Coloca coordenada en y
if y == 'a':
self.y = coor.a
if y == 'r':
self.y = coor.r
if y == 'g':
self.y = coor.g
if y == 'e':
self.y = coor.e
if y == 'n':
self.y = coor.n
if y == 't':
self.y = coor.t
if y == 'o':
self.y = coor.o
if y == 's':
self.y = coor.s
#Coloca coordenada en x
if x == 'a':
self.x = coor.a
if x == 'r':
self.x = coor.r
if x == 'g':
self.x = coor.g
if x == 'e':
self.x = coor.e
if x == 'n':
self.x = coor.n
if x == 't':
self.x = coor.t
if x == 'o':
self.x = coor.o
if x == 's':
self.x = coor.s
def damePosCaballo (self):
return self.y, self.x
def isCorrectMove(self, yy, xx):
"""Funcion para saber si movimiento del caballo es correcto
Parameters
----------
yy: int -> Es la nueva posición del caballo en Y
xx: int -> Es la nueva posición del caballo en X
Return
------
1 : correcto
-1 : incorrecto
"""
yy, xx = self.dameCoordenadasEnNumero(yy, xx)
#Me aseguro que las nuevas posiciones no estén fuera del tablero
if xx > 7 or xx < 0:
#Fuera del tablero en x
return -1
if yy > 7 or yy < 0:
#Fuera del tablero en y
return -1
if ( ( self.x + 2 ) == xx ) or ( ( self.x - 2 ) == xx ):
#Significa que moví al caballo dos lugares a la derecha o a la izquierda
if ( ( self.y + 1 ) == yy ) or ( ( self.y + 1 ) == yy ):
#Significa que esta todo bien
return 1
elif ( ( self.y + 2 ) == yy ) or ( ( self.y - 2 ) == yy ):
#Significa que moví al caballo dos lugares Para abajo o arriba
if ( ( self.x + 1 ) == xx ) or ( ( self.x + 1 ) == xx ):
#Significa que esta todo bien
return 1
else:
#Esta todo mal
return -1
class Analyzer:
def __init__(self):
self.SourceCode = ""
self.caballo = caballo()
self.geFin = 0 #Variable para saber si el printeo ya termino
self.cCaracter = campoCaracter()
self.sParaMostrarEnPantallita = ""
"""
Estados
0 = campo principal
1 = campo de caracteres
2 = campo de variables
"""
self.estado = 0 #campo principal
def setSourceCode(self, nameFile):
"""Recibe el nombre del codigo fuente
Parameters
----------
source:file -> el Codigo fuente
"""
self.SourceCode = nameFile
def getSourceCode(self):
"""Devuelve el nombre del archivo
Parameter
---------
none
Returns
-------
NameFile : string
"""
return self.SourceCode
def AnalizadorCodigo(self):
"""Analizador del código
Analiza el código para llevar a cabo los comandos
Parameter
---------
None
Returns
-------
Nothing
"""
self.SourceFile = Reader.openFile(self.SourceCode)
line = Reader.readLine(self.SourceFile)
if(line != 'ARGENTOS\n'):
print "\n\nMal pibe, no hiciste bien la apertura del archivo. Ponele ARGENTOS en el primer renglón"
exit
else:
line = Reader.readLine(self.SourceFile)
while(line != "ARGENTOS\n"):
line = string.split(line, "%")[0]
line = string.split(string.split(line, "\n")[0], " ")
#line = string.split(string.split(Reader.readLine(self.SourceFile), "\n")[0], " ")
i = 0
while i < len(line):
#Verifico si viene un comando de cambio de posición del caballo
if line[i] == "argento":
y = list(line[i+1])[0]
x = list(line[i+1])[1]
#Verifico si es legal el movimiento del caballo
if self.caballo.isCorrectMove(y,x) == -1:
print "\n\nError Chabon: Moviste mal el caballo \n\n"
exit(-1)
#El movimiento del caballo es ok, por lo tanto continúo
self.caballo.mover(y, x)
i = i + 2 #Aumento el índice
if(y == 't' and x == 'a'):
#Ingreso al campo de caracteres
self.estado = 1
elif(line[i] == 'ge' and self.geFin == 0):
#Se necesita imprimir
line = Reader.readLine(self.SourceFile)
line = string.split(line, "%")[0]
line = string.split(string.split(line, "\n")[0]," " )
while(line[0] != 'ge'):
j = 0
while j < len(line):
#Me encuentro en el campo principal
if line[0] == '' and len(line) == 1:
line = Reader.readLine(self.SourceFile)
line = string.split(line, "%")[0]
line = string.split(string.split(line, "\n")[0]," " )
if self.estado == 0:
if line[j] == "argento":
y = list(line[j+1])[0]
x = list(line[j+1])[1]
#Verifico si es legal el movimiento del caballo
if self.caballo.isCorrectMove(y,x) == -1:
print "\n\nError Chabon: Moviste mal el caballo \n\n"
exit(-1)
#El movimiento del caballo es ok, por lo tanto continúo
self.caballo.mover(y, x)
j = j + 2 #Aumento el índice
if(line[j] == "aaa"):
y = list(line[j-1])[0]
x = list(line[j-1])[1]
if(y == 'T' and x == 'A'):
#Ingreso al campo de caracteres
self.estado = 1
if(y == 'N' and x == 'A'):
self.sParaMostrarEnPantallita += ' '
break
#Me encuentro en el campo de caracteres
elif self.estado == 1:
if line[j] == "argento":
y = list(line[j+1])[0]
x = list(line[j+1])[1]
#Verifico si es legal el movimiento del caballo
if self.caballo.isCorrectMove(y,x) == -1:
print "\n\nError Chabon: Moviste mal el caballo \n\n"
exit(-1)
#El movimiento del caballo es ok, por lo tanto continúo
self.caballo.mover(y, x)
j = j + 2 #Aumento el índice
elif(line[j] == "aaa"):
y = list(line[j-1])[0]
x = list(line[j-1])[1]
#Aquí leo en el campo de caracteres
self.sParaMostrarEnPantallita += self.cCaracter.DameCaracter(y,x, self.caballo)
j += 1
elif(line[j] == "oo"):
self.estado = 0
break
elif (line[j] != "argento" and line[j] != "aaa" and line[j] != "oo"):
j += 1
line = Reader.readLine(self.SourceFile)
line = string.split(line, "%")[0]
line = string.split(string.split(line, "\n")[0]," " )
#Para dar por finalizado el printeo
if (line[0] == 'ge'):
print self.sParaMostrarEnPantallita
self.sParaMostrarEnPantallita = ""
self.geFin = 1
j += 1
i += 1
line = Reader.readLine(self.SourceFile)
| gpl-3.0 |
elijah513/scikit-learn | sklearn/decomposition/base.py | 313 | 5647 | """Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis A. Engemann <d.engemann@fz-juelich.de>
# Kyle Kastner <kastnerkyle@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
from ..externals import six
from abc import ABCMeta, abstractmethod
class _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
@abstractmethod
def fit(X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X, y=None):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)
>>> ipca.transform(X) # doctest: +SKIP
"""
check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return fast_dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
| bsd-3-clause |
auready/django | django/middleware/gzip.py | 68 | 2082 | import re
from django.utils.cache import patch_vary_headers
from django.utils.deprecation import MiddlewareMixin
from django.utils.text import compress_sequence, compress_string
re_accepts_gzip = re.compile(r'\bgzip\b')
class GZipMiddleware(MiddlewareMixin):
"""
This middleware compresses content if the browser allows gzip compression.
It sets the Vary header accordingly, so that caches will base their storage
on the Accept-Encoding header.
"""
def process_response(self, request, response):
# It's not worth attempting to compress really short responses.
if not response.streaming and len(response.content) < 200:
return response
# Avoid gzipping if we've already got a content-encoding.
if response.has_header('Content-Encoding'):
return response
patch_vary_headers(response, ('Accept-Encoding',))
ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
if not re_accepts_gzip.search(ae):
return response
if response.streaming:
# Delete the `Content-Length` header for streaming content, because
# we won't know the compressed size until we stream it.
response.streaming_content = compress_sequence(response.streaming_content)
del response['Content-Length']
else:
# Return the compressed content only if it's actually shorter.
compressed_content = compress_string(response.content)
if len(compressed_content) >= len(response.content):
return response
response.content = compressed_content
response['Content-Length'] = str(len(response.content))
# If there is a strong ETag, make it weak to fulfill the requirements
# of RFC 7232 section-2.1 while also allowing conditional request
# matches on ETags.
etag = response.get('ETag')
if etag and etag.startswith('"'):
response['ETag'] = 'W/' + etag
response['Content-Encoding'] = 'gzip'
return response
| bsd-3-clause |
mgaitan/scipy | scipy/weave/examples/swig2_example.py | 100 | 1596 | """Simple example to show how to use weave.inline on SWIG2 wrapped
objects. SWIG2 refers to SWIG versions >= 1.3.
To run this example you must build the trivial SWIG2 extension called
swig2_ext. To do this you need to do something like this::
$ swig -c++ -python -I. -o swig2_ext_wrap.cxx swig2_ext.i
$ g++ -Wall -O2 -I/usr/include/python2.3 -fPIC -I. -c \
-o swig2_ext_wrap.os swig2_ext_wrap.cxx
$ g++ -shared -o _swig2_ext.so swig2_ext_wrap.os \
-L/usr/lib/python2.3/config
The files swig2_ext.i and swig2_ext.h are included in the same
directory that contains this file.
Note that weave's SWIG2 support works fine whether SWIG_COBJECT_TYPES
are used or not.
Author: Prabhu Ramachandran
Copyright (c) 2004, Prabhu Ramachandran
License: BSD Style.
"""
from __future__ import absolute_import, print_function
# Import our SWIG2 wrapped library
import swig2_ext
import scipy.weave as weave
from scipy.weave import swig2_spec, converters
# SWIG2 support is not enabled by default. We do this by adding the
# swig2 converter to the default list of converters.
converters.default.insert(0, swig2_spec.swig2_converter())
def test():
"""Instantiate the SWIG wrapped object and then call its method
from C++ using weave.inline
"""
a = swig2_ext.A()
b = swig2_ext.foo() # This will be an APtr instance.
b.thisown = 1 # Prevent memory leaks.
code = """a->f();
b->f();
"""
weave.inline(code, ['a', 'b'], include_dirs=['.'],
headers=['"swig2_ext.h"'], verbose=1)
if __name__ == "__main__":
test()
| bsd-3-clause |
cbertinato/pandas | pandas/tests/indexes/test_setops.py | 1 | 2362 | '''
The tests in this package are to ensure the proper resultant dtypes of
set operations.
'''
import itertools as it
import numpy as np
import pytest
from pandas.core.dtypes.common import is_dtype_equal
import pandas as pd
from pandas import Int64Index, RangeIndex
from pandas.tests.indexes.conftest import indices_list
import pandas.util.testing as tm
COMPATIBLE_INCONSISTENT_PAIRS = {
(Int64Index, RangeIndex): (tm.makeIntIndex, tm.makeRangeIndex)
}
@pytest.fixture(params=list(it.combinations(indices_list, 2)),
ids=lambda x: type(x[0]).__name__ + type(x[1]).__name__)
def index_pair(request):
"""
Create all combinations of 2 index types.
"""
return request.param
def test_union_same_types(indices):
# Union with a non-unique, non-monotonic index raises error
# Only needed for bool index factory
idx1 = indices.sort_values()
idx2 = indices.sort_values()
assert idx1.union(idx2).dtype == idx1.dtype
def test_union_different_types(index_pair):
# GH 23525
idx1, idx2 = index_pair
type_pair = tuple(sorted([type(idx1), type(idx2)], key=lambda x: str(x)))
if type_pair in COMPATIBLE_INCONSISTENT_PAIRS:
pytest.xfail('This test only considers non compatible indexes.')
if any(isinstance(idx, pd.MultiIndex) for idx in index_pair):
pytest.xfail('This test doesn\'t consider multiindixes.')
if is_dtype_equal(idx1.dtype, idx2.dtype):
pytest.xfail('This test only considers non matching dtypes.')
# A union with a CategoricalIndex (even as dtype('O')) and a
# non-CategoricalIndex can only be made if both indices are monotonic.
# This is true before this PR as well.
# Union with a non-unique, non-monotonic index raises error
# This applies to the boolean index
idx1 = idx1.sort_values()
idx2 = idx2.sort_values()
assert idx1.union(idx2).dtype == np.dtype('O')
assert idx2.union(idx1).dtype == np.dtype('O')
@pytest.mark.parametrize('idx_fact1,idx_fact2',
COMPATIBLE_INCONSISTENT_PAIRS.values())
def test_compatible_inconsistent_pairs(idx_fact1, idx_fact2):
# GH 23525
idx1 = idx_fact1(10)
idx2 = idx_fact2(20)
res1 = idx1.union(idx2)
res2 = idx2.union(idx1)
assert res1.dtype in (idx1.dtype, idx2.dtype)
assert res2.dtype in (idx1.dtype, idx2.dtype)
| bsd-3-clause |
csrocha/OpenUpgrade | addons/mrp_repair/__openerp__.py | 259 | 2554 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Repairs Management',
'version': '1.0',
'category': 'Manufacturing',
'description': """
The aim is to have a complete module to manage all products repairs.
====================================================================
The following topics should be covered by this module:
------------------------------------------------------
* Add/remove products in the reparation
* Impact for stocks
* Invoicing (products and/or services)
* Warranty concept
* Repair quotation report
* Notes for the technician and for the final customer
""",
'author': 'OpenERP SA',
'depends': ['mrp', 'sale', 'account'],
'website': 'https://www.odoo.com/page/manufacturing',
'data': [
'security/ir.model.access.csv',
'security/mrp_repair_security.xml',
'mrp_repair_data.xml',
'mrp_repair_sequence.xml',
'wizard/mrp_repair_cancel_view.xml',
'wizard/mrp_repair_make_invoice_view.xml',
'mrp_repair_view.xml',
'mrp_repair_workflow.xml',
'mrp_repair_report.xml',
'views/report_mrprepairorder.xml',
],
'demo': ['mrp_repair_demo.yml'],
'test': ['test/mrp_repair_users.yml',
'test/test_mrp_repair_noneinv.yml',
'test/test_mrp_repair_b4inv.yml',
'test/test_mrp_repair_afterinv.yml',
'test/test_mrp_repair_cancel.yml',
'test/test_mrp_repair_fee.yml',
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.