repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
Soovox/django-socialregistration | docs/conf.py | 10 | 8050 | # -*- coding: utf-8 -*-
#
# django-socialregistration documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 22 17:25:59 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, socialregistration
os.environ['DJANGO_SETTINGS_MODULE'] = 'example.settings'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-socialregistration'
copyright = u'2012, Alen Mujezinovic'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.5.1'
# The full version, including alpha/beta/rc tags.
release = '0.5.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-socialregistrationdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('fooasdf', 'django-socialregistration.tex', u'django-socialregistration Documentation',
u'Alen Mujezinovic', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('fooasdf', 'django-socialregistration', u'django-socialregistration Documentation',
[u'Alen Mujezinovic'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('fooasdf', 'django-socialregistration', u'django-socialregistration Documentation',
u'Alen Mujezinovic', 'django-socialregistration', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| mit |
dulems/hue | desktop/core/ext-py/tablib-develop/tablib/packages/odf3/odf2xhtml.py | 56 | 65058 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2010 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
#import pdb
#pdb.set_trace()
from xml.sax import handler
from xml.sax.saxutils import escape, quoteattr
from xml.dom import Node
from .opendocument import load
from .namespaces import ANIMNS, CHARTNS, CONFIGNS, DCNS, DR3DNS, DRAWNS, FONS, \
FORMNS, MATHNS, METANS, NUMBERNS, OFFICENS, PRESENTATIONNS, SCRIPTNS, \
SMILNS, STYLENS, SVGNS, TABLENS, TEXTNS, XLINKNS
# Handling of styles
#
# First there are font face declarations. These set up a font style that will be
# referenced from a text-property. The declaration describes the font making
# it possible for the application to find a similar font should the system not
# have that particular one. The StyleToCSS stores these attributes to be used
# for the CSS2 font declaration.
#
# Then there are default-styles. These set defaults for various style types:
# "text", "paragraph", "section", "ruby", "table", "table-column", "table-row",
# "table-cell", "graphic", "presentation", "drawing-page", "chart".
# Since CSS2 can't refer to another style, ODF2XHTML add these to all
# styles unless overridden.
#
# The real styles are declared in the <style:style> element. They have a
# family referring to the default-styles, and may have a parent style.
#
# Styles have scope. The same name can be used for both paragraph and
# character etc. styles Since CSS2 has no scope we use a prefix. (Not elegant)
# In ODF a style can have a parent, these parents can be chained.
class StyleToCSS:
""" The purpose of the StyleToCSS class is to contain the rules to convert
ODF styles to CSS2. Since it needs the generic fonts, it would probably
make sense to also contain the Styles in a dict as well..
"""
def __init__(self):
# Font declarations
self.fontdict = {}
# Fill-images from presentations for backgrounds
self.fillimages = {}
self.ruleconversions = {
(DRAWNS,'fill-image-name'): self.c_drawfillimage,
(FONS,"background-color"): self.c_fo,
(FONS,"border"): self.c_fo,
(FONS,"border-bottom"): self.c_fo,
(FONS,"border-left"): self.c_fo,
(FONS,"border-right"): self.c_fo,
(FONS,"border-top"): self.c_fo,
(FONS,"color"): self.c_fo,
(FONS,"font-family"): self.c_fo,
(FONS,"font-size"): self.c_fo,
(FONS,"font-style"): self.c_fo,
(FONS,"font-variant"): self.c_fo,
(FONS,"font-weight"): self.c_fo,
(FONS,"line-height"): self.c_fo,
(FONS,"margin"): self.c_fo,
(FONS,"margin-bottom"): self.c_fo,
(FONS,"margin-left"): self.c_fo,
(FONS,"margin-right"): self.c_fo,
(FONS,"margin-top"): self.c_fo,
(FONS,"min-height"): self.c_fo,
(FONS,"padding"): self.c_fo,
(FONS,"padding-bottom"): self.c_fo,
(FONS,"padding-left"): self.c_fo,
(FONS,"padding-right"): self.c_fo,
(FONS,"padding-top"): self.c_fo,
(FONS,"page-width"): self.c_page_width,
(FONS,"page-height"): self.c_page_height,
(FONS,"text-align"): self.c_text_align,
(FONS,"text-indent") :self.c_fo,
(TABLENS,'border-model') :self.c_border_model,
(STYLENS,'column-width') : self.c_width,
(STYLENS,"font-name"): self.c_fn,
(STYLENS,'horizontal-pos'): self.c_hp,
(STYLENS,'text-position'): self.c_text_position,
(STYLENS,'text-line-through-style'): self.c_text_line_through_style,
(STYLENS,'text-underline-style'): self.c_text_underline_style,
(STYLENS,'width') : self.c_width,
# FIXME Should do style:vertical-pos here
}
def save_font(self, name, family, generic):
""" It is possible that the HTML browser doesn't know how to
show a particular font. Fortunately ODF provides generic fallbacks.
Unfortunately they are not the same as CSS2.
CSS2: serif, sans-serif, cursive, fantasy, monospace
ODF: roman, swiss, modern, decorative, script, system
This method put the font and fallback into a dictionary
"""
htmlgeneric = "sans-serif"
if generic == "roman": htmlgeneric = "serif"
elif generic == "swiss": htmlgeneric = "sans-serif"
elif generic == "modern": htmlgeneric = "monospace"
elif generic == "decorative": htmlgeneric = "sans-serif"
elif generic == "script": htmlgeneric = "monospace"
elif generic == "system": htmlgeneric = "serif"
self.fontdict[name] = (family, htmlgeneric)
def c_drawfillimage(self, ruleset, sdict, rule, val):
""" Fill a figure with an image. Since CSS doesn't let you resize images
this should really be implemented as an absolutely position <img>
with a width and a height
"""
sdict['background-image'] = "url('%s')" % self.fillimages[val]
def c_fo(self, ruleset, sdict, rule, val):
""" XSL formatting attributes """
selector = rule[1]
sdict[selector] = val
def c_border_model(self, ruleset, sdict, rule, val):
""" Convert to CSS2 border model """
if val == 'collapsing':
sdict['border-collapse'] ='collapse'
else:
sdict['border-collapse'] ='separate'
def c_width(self, ruleset, sdict, rule, val):
""" Set width of box """
sdict['width'] = val
def c_text_align(self, ruleset, sdict, rule, align):
""" Text align """
if align == "start": align = "left"
if align == "end": align = "right"
sdict['text-align'] = align
def c_fn(self, ruleset, sdict, rule, fontstyle):
""" Generate the CSS font family
A generic font can be found in two ways. In a <style:font-face>
element or as a font-family-generic attribute in text-properties.
"""
generic = ruleset.get((STYLENS,'font-family-generic') )
if generic is not None:
self.save_font(fontstyle, fontstyle, generic)
family, htmlgeneric = self.fontdict.get(fontstyle, (fontstyle, 'serif'))
sdict['font-family'] = '%s, %s' % (family, htmlgeneric)
def c_text_position(self, ruleset, sdict, rule, tp):
""" Text position. This is used e.g. to make superscript and subscript
This attribute can have one or two values.
The first value must be present and specifies the vertical
text position as a percentage that relates to the current font
height or it takes one of the values sub or super. Negative
percentages or the sub value place the text below the
baseline. Positive percentages or the super value place
the text above the baseline. If sub or super is specified,
the application can choose an appropriate text position.
The second value is optional and specifies the font height
as a percentage that relates to the current font-height. If
this value is not specified, an appropriate font height is
used. Although this value may change the font height that
is displayed, it never changes the current font height that
is used for additional calculations.
"""
textpos = tp.split(' ')
if len(textpos) == 2 and textpos[0] != "0%":
# Bug in OpenOffice. If vertical-align is 0% - ignore the text size.
sdict['font-size'] = textpos[1]
if textpos[0] == "super":
sdict['vertical-align'] = "33%"
elif textpos[0] == "sub":
sdict['vertical-align'] = "-33%"
else:
sdict['vertical-align'] = textpos[0]
def c_hp(self, ruleset, sdict, rule, hpos):
#FIXME: Frames wrap-style defaults to 'parallel', graphics to 'none'.
# It is properly set in the parent-styles, but the program doesn't
# collect the information.
wrap = ruleset.get((STYLENS,'wrap'),'parallel')
# Can have: from-left, left, center, right, from-inside, inside, outside
if hpos == "center":
sdict['margin-left'] = "auto"
sdict['margin-right'] = "auto"
# else:
# # force it to be *something* then delete it
# sdict['margin-left'] = sdict['margin-right'] = ''
# del sdict['margin-left'], sdict['margin-right']
if hpos in ("right","outside"):
if wrap in ( "left", "parallel","dynamic"):
sdict['float'] = "right"
elif wrap == "run-through":
sdict['position'] = "absolute" # Simulate run-through
sdict['top'] = "0"
sdict['right'] = "0";
else: # No wrapping
sdict['margin-left'] = "auto"
sdict['margin-right'] = "0px"
elif hpos in ("left", "inside"):
if wrap in ( "right", "parallel","dynamic"):
sdict['float'] = "left"
elif wrap == "run-through":
sdict['position'] = "absolute" # Simulate run-through
sdict['top'] = "0"
sdict['left'] = "0"
else: # No wrapping
sdict['margin-left'] = "0px"
sdict['margin-right'] = "auto"
elif hpos in ("from-left", "from-inside"):
if wrap in ( "right", "parallel"):
sdict['float'] = "left"
else:
sdict['position'] = "relative" # No wrapping
if (SVGNS,'x') in ruleset:
sdict['left'] = ruleset[(SVGNS,'x')]
def c_page_width(self, ruleset, sdict, rule, val):
""" Set width of box
HTML doesn't really have a page-width. It is always 100% of the browser width
"""
sdict['width'] = val
def c_text_underline_style(self, ruleset, sdict, rule, val):
""" Set underline decoration
HTML doesn't really have a page-width. It is always 100% of the browser width
"""
if val and val != "none":
sdict['text-decoration'] = "underline"
def c_text_line_through_style(self, ruleset, sdict, rule, val):
""" Set underline decoration
HTML doesn't really have a page-width. It is always 100% of the browser width
"""
if val and val != "none":
sdict['text-decoration'] = "line-through"
def c_page_height(self, ruleset, sdict, rule, val):
""" Set height of box """
sdict['height'] = val
def convert_styles(self, ruleset):
""" Rule is a tuple of (namespace, name). If the namespace is '' then
it is already CSS2
"""
sdict = {}
for rule,val in list(ruleset.items()):
if rule[0] == '':
sdict[rule[1]] = val
continue
method = self.ruleconversions.get(rule, None )
if method:
method(ruleset, sdict, rule, val)
return sdict
class TagStack:
def __init__(self):
self.stack = []
def push(self, tag, attrs):
self.stack.append( (tag, attrs) )
def pop(self):
item = self.stack.pop()
return item
def stackparent(self):
item = self.stack[-1]
return item[1]
def rfindattr(self, attr):
""" Find a tag with the given attribute """
for tag, attrs in self.stack:
if attr in attrs:
return attrs[attr]
return None
def count_tags(self, tag):
c = 0
for ttag, tattrs in self.stack:
if ttag == tag: c = c + 1
return c
special_styles = {
'S-Emphasis':'em',
'S-Citation':'cite',
'S-Strong_20_Emphasis':'strong',
'S-Variable':'var',
'S-Definition':'dfn',
'S-Teletype':'tt',
'P-Heading_20_1':'h1',
'P-Heading_20_2':'h2',
'P-Heading_20_3':'h3',
'P-Heading_20_4':'h4',
'P-Heading_20_5':'h5',
'P-Heading_20_6':'h6',
# 'P-Caption':'caption',
'P-Addressee':'address',
# 'P-List_20_Heading':'dt',
# 'P-List_20_Contents':'dd',
'P-Preformatted_20_Text':'pre',
# 'P-Table_20_Heading':'th',
# 'P-Table_20_Contents':'td',
# 'P-Text_20_body':'p'
}
#-----------------------------------------------------------------------------
#
# ODFCONTENTHANDLER
#
#-----------------------------------------------------------------------------
class ODF2XHTML(handler.ContentHandler):
""" The ODF2XHTML parses an ODF file and produces XHTML"""
def __init__(self, generate_css=True, embedable=False):
# Tags
self.generate_css = generate_css
self.elements = {
(DCNS, 'title'): (self.s_processcont, self.e_dc_title),
(DCNS, 'language'): (self.s_processcont, self.e_dc_contentlanguage),
(DCNS, 'creator'): (self.s_processcont, self.e_dc_creator),
(DCNS, 'description'): (self.s_processcont, self.e_dc_metatag),
(DCNS, 'date'): (self.s_processcont, self.e_dc_metatag),
(DRAWNS, 'custom-shape'): (self.s_custom_shape, self.e_custom_shape),
(DRAWNS, 'frame'): (self.s_draw_frame, self.e_draw_frame),
(DRAWNS, 'image'): (self.s_draw_image, None),
(DRAWNS, 'fill-image'): (self.s_draw_fill_image, None),
(DRAWNS, "layer-set"):(self.s_ignorexml, None),
(DRAWNS, 'object'): (self.s_draw_object, None),
(DRAWNS, 'object-ole'): (self.s_draw_object_ole, None),
(DRAWNS, 'page'): (self.s_draw_page, self.e_draw_page),
(DRAWNS, 'text-box'): (self.s_draw_textbox, self.e_draw_textbox),
(METANS, 'creation-date'):(self.s_processcont, self.e_dc_metatag),
(METANS, 'generator'):(self.s_processcont, self.e_dc_metatag),
(METANS, 'initial-creator'): (self.s_processcont, self.e_dc_metatag),
(METANS, 'keyword'): (self.s_processcont, self.e_dc_metatag),
(NUMBERNS, "boolean-style"):(self.s_ignorexml, None),
(NUMBERNS, "currency-style"):(self.s_ignorexml, None),
(NUMBERNS, "date-style"):(self.s_ignorexml, None),
(NUMBERNS, "number-style"):(self.s_ignorexml, None),
(NUMBERNS, "text-style"):(self.s_ignorexml, None),
(OFFICENS, "annotation"):(self.s_ignorexml, None),
(OFFICENS, "automatic-styles"):(self.s_office_automatic_styles, None),
(OFFICENS, "document"):(self.s_office_document_content, self.e_office_document_content),
(OFFICENS, "document-content"):(self.s_office_document_content, self.e_office_document_content),
(OFFICENS, "forms"):(self.s_ignorexml, None),
(OFFICENS, "master-styles"):(self.s_office_master_styles, None),
(OFFICENS, "meta"):(self.s_ignorecont, None),
(OFFICENS, "presentation"):(self.s_office_presentation, self.e_office_presentation),
(OFFICENS, "spreadsheet"):(self.s_office_spreadsheet, self.e_office_spreadsheet),
(OFFICENS, "styles"):(self.s_office_styles, None),
(OFFICENS, "text"):(self.s_office_text, self.e_office_text),
(OFFICENS, "scripts"):(self.s_ignorexml, None),
(OFFICENS, "settings"):(self.s_ignorexml, None),
(PRESENTATIONNS, "notes"):(self.s_ignorexml, None),
# (STYLENS, "default-page-layout"):(self.s_style_default_page_layout, self.e_style_page_layout),
(STYLENS, "default-page-layout"):(self.s_ignorexml, None),
(STYLENS, "default-style"):(self.s_style_default_style, self.e_style_default_style),
(STYLENS, "drawing-page-properties"):(self.s_style_handle_properties, None),
(STYLENS, "font-face"):(self.s_style_font_face, None),
# (STYLENS, "footer"):(self.s_style_footer, self.e_style_footer),
# (STYLENS, "footer-style"):(self.s_style_footer_style, None),
(STYLENS, "graphic-properties"):(self.s_style_handle_properties, None),
(STYLENS, "handout-master"):(self.s_ignorexml, None),
# (STYLENS, "header"):(self.s_style_header, self.e_style_header),
# (STYLENS, "header-footer-properties"):(self.s_style_handle_properties, None),
# (STYLENS, "header-style"):(self.s_style_header_style, None),
(STYLENS, "master-page"):(self.s_style_master_page, None),
(STYLENS, "page-layout-properties"):(self.s_style_handle_properties, None),
(STYLENS, "page-layout"):(self.s_style_page_layout, self.e_style_page_layout),
# (STYLENS, "page-layout"):(self.s_ignorexml, None),
(STYLENS, "paragraph-properties"):(self.s_style_handle_properties, None),
(STYLENS, "style"):(self.s_style_style, self.e_style_style),
(STYLENS, "table-cell-properties"):(self.s_style_handle_properties, None),
(STYLENS, "table-column-properties"):(self.s_style_handle_properties, None),
(STYLENS, "table-properties"):(self.s_style_handle_properties, None),
(STYLENS, "text-properties"):(self.s_style_handle_properties, None),
(SVGNS, 'desc'): (self.s_ignorexml, None),
(TABLENS, 'covered-table-cell'): (self.s_ignorexml, None),
(TABLENS, 'table-cell'): (self.s_table_table_cell, self.e_table_table_cell),
(TABLENS, 'table-column'): (self.s_table_table_column, None),
(TABLENS, 'table-row'): (self.s_table_table_row, self.e_table_table_row),
(TABLENS, 'table'): (self.s_table_table, self.e_table_table),
(TEXTNS, 'a'): (self.s_text_a, self.e_text_a),
(TEXTNS, "alphabetical-index-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, "bibliography-configuration"):(self.s_ignorexml, None),
(TEXTNS, "bibliography-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, 'bookmark'): (self.s_text_bookmark, None),
(TEXTNS, 'bookmark-start'): (self.s_text_bookmark, None),
(TEXTNS, 'bookmark-ref'): (self.s_text_bookmark_ref, self.e_text_a),
(TEXTNS, 'bookmark-ref-start'): (self.s_text_bookmark_ref, None),
(TEXTNS, 'h'): (self.s_text_h, self.e_text_h),
(TEXTNS, "illustration-index-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, 'line-break'):(self.s_text_line_break, None),
(TEXTNS, "linenumbering-configuration"):(self.s_ignorexml, None),
(TEXTNS, "list"):(self.s_text_list, self.e_text_list),
(TEXTNS, "list-item"):(self.s_text_list_item, self.e_text_list_item),
(TEXTNS, "list-level-style-bullet"):(self.s_text_list_level_style_bullet, self.e_text_list_level_style_bullet),
(TEXTNS, "list-level-style-number"):(self.s_text_list_level_style_number, self.e_text_list_level_style_number),
(TEXTNS, "list-style"):(None, None),
(TEXTNS, "note"):(self.s_text_note, None),
(TEXTNS, "note-body"):(self.s_text_note_body, self.e_text_note_body),
(TEXTNS, "note-citation"):(None, self.e_text_note_citation),
(TEXTNS, "notes-configuration"):(self.s_ignorexml, None),
(TEXTNS, "object-index-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, 'p'): (self.s_text_p, self.e_text_p),
(TEXTNS, 's'): (self.s_text_s, None),
(TEXTNS, 'span'): (self.s_text_span, self.e_text_span),
(TEXTNS, 'tab'): (self.s_text_tab, None),
(TEXTNS, "table-index-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, "table-of-content-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, "user-index-source"):(self.s_text_x_source, self.e_text_x_source),
}
if embedable:
self.make_embedable()
self._resetobject()
def set_plain(self):
""" Tell the parser to not generate CSS """
self.generate_css = False
def set_embedable(self):
""" Tells the converter to only output the parts inside the <body>"""
self.elements[(OFFICENS, "text")] = (None,None)
self.elements[(OFFICENS, "spreadsheet")] = (None,None)
self.elements[(OFFICENS, "presentation")] = (None,None)
self.elements[(OFFICENS, "document-content")] = (None,None)
def add_style_file(self, stylefilename, media=None):
""" Add a link to an external style file.
Also turns of the embedding of styles in the HTML
"""
self.use_internal_css = False
self.stylefilename = stylefilename
if media:
self.metatags.append('<link rel="stylesheet" type="text/css" href="%s" media="%s"/>\n' % (stylefilename,media))
else:
self.metatags.append('<link rel="stylesheet" type="text/css" href="%s"/>\n' % (stylefilename))
def _resetfootnotes(self):
# Footnotes and endnotes
self.notedict = {}
self.currentnote = 0
self.notebody = ''
def _resetobject(self):
self.lines = []
self._wfunc = self._wlines
self.xmlfile = ''
self.title = ''
self.language = ''
self.creator = ''
self.data = []
self.tagstack = TagStack()
self.htmlstack = []
self.pstack = []
self.processelem = True
self.processcont = True
self.listtypes = {}
self.headinglevels = [0, 0,0,0,0,0, 0,0,0,0,0] # level 0 to 10
self.use_internal_css = True
self.cs = StyleToCSS()
self.anchors = {}
# Style declarations
self.stylestack = []
self.styledict = {}
self.currentstyle = None
self._resetfootnotes()
# Tags from meta.xml
self.metatags = []
def writeout(self, s):
if s != '':
self._wfunc(s)
def writedata(self):
d = ''.join(self.data)
if d != '':
self.writeout(escape(d))
def opentag(self, tag, attrs={}, block=False):
""" Create an open HTML tag """
self.htmlstack.append((tag,attrs,block))
a = []
for key,val in list(attrs.items()):
a.append('''%s=%s''' % (key, quoteattr(val)))
if len(a) == 0:
self.writeout("<%s>" % tag)
else:
self.writeout("<%s %s>" % (tag, " ".join(a)))
if block == True:
self.writeout("\n")
def closetag(self, tag, block=True):
""" Close an open HTML tag """
self.htmlstack.pop()
self.writeout("</%s>" % tag)
if block == True:
self.writeout("\n")
def emptytag(self, tag, attrs={}):
a = []
for key,val in list(attrs.items()):
a.append('''%s=%s''' % (key, quoteattr(val)))
self.writeout("<%s %s/>\n" % (tag, " ".join(a)))
#--------------------------------------------------
# Interface to parser
#--------------------------------------------------
def characters(self, data):
if self.processelem and self.processcont:
self.data.append(data)
def startElementNS(self, tag, qname, attrs):
self.pstack.append( (self.processelem, self.processcont) )
if self.processelem:
method = self.elements.get(tag, (None, None) )[0]
if method:
self.handle_starttag(tag, method, attrs)
else:
self.unknown_starttag(tag,attrs)
self.tagstack.push( tag, attrs )
def endElementNS(self, tag, qname):
stag, attrs = self.tagstack.pop()
if self.processelem:
method = self.elements.get(tag, (None, None) )[1]
if method:
self.handle_endtag(tag, attrs, method)
else:
self.unknown_endtag(tag, attrs)
self.processelem, self.processcont = self.pstack.pop()
#--------------------------------------------------
def handle_starttag(self, tag, method, attrs):
method(tag,attrs)
def handle_endtag(self, tag, attrs, method):
method(tag, attrs)
def unknown_starttag(self, tag, attrs):
pass
def unknown_endtag(self, tag, attrs):
pass
def s_ignorexml(self, tag, attrs):
""" Ignore this xml element and all children of it
It will automatically stop ignoring
"""
self.processelem = False
def s_ignorecont(self, tag, attrs):
""" Stop processing the text nodes """
self.processcont = False
def s_processcont(self, tag, attrs):
""" Start processing the text nodes """
self.processcont = True
def classname(self, attrs):
""" Generate a class name from a style name """
c = attrs.get((TEXTNS,'style-name'),'')
c = c.replace(".","_")
return c
def get_anchor(self, name):
""" Create a unique anchor id for a href name """
if name not in self.anchors:
self.anchors[name] = "anchor%03d" % (len(self.anchors) + 1)
return self.anchors.get(name)
#--------------------------------------------------
def purgedata(self):
self.data = []
#-----------------------------------------------------------------------------
#
# Handle meta data
#
#-----------------------------------------------------------------------------
def e_dc_title(self, tag, attrs):
""" Get the title from the meta data and create a HTML <title>
"""
self.title = ''.join(self.data)
#self.metatags.append('<title>%s</title>\n' % escape(self.title))
self.data = []
def e_dc_metatag(self, tag, attrs):
""" Any other meta data is added as a <meta> element
"""
self.metatags.append('<meta name="%s" content=%s/>\n' % (tag[1], quoteattr(''.join(self.data))))
self.data = []
def e_dc_contentlanguage(self, tag, attrs):
""" Set the content language. Identifies the targeted audience
"""
self.language = ''.join(self.data)
self.metatags.append('<meta http-equiv="content-language" content="%s"/>\n' % escape(self.language))
self.data = []
def e_dc_creator(self, tag, attrs):
""" Set the content creator. Identifies the targeted audience
"""
self.creator = ''.join(self.data)
self.metatags.append('<meta http-equiv="creator" content="%s"/>\n' % escape(self.creator))
self.data = []
def s_custom_shape(self, tag, attrs):
""" A <draw:custom-shape> is made into a <div> in HTML which is then styled
"""
anchor_type = attrs.get((TEXTNS,'anchor-type'),'notfound')
htmltag = 'div'
name = "G-" + attrs.get( (DRAWNS,'style-name'), "")
if name == 'G-':
name = "PR-" + attrs.get( (PRESENTATIONNS,'style-name'), "")
name = name.replace(".","_")
if anchor_type == "paragraph":
style = 'position:absolute;'
elif anchor_type == 'char':
style = "position:absolute;"
elif anchor_type == 'as-char':
htmltag = 'div'
style = ''
else:
style = "position: absolute;"
if (SVGNS,"width") in attrs:
style = style + "width:" + attrs[(SVGNS,"width")] + ";"
if (SVGNS,"height") in attrs:
style = style + "height:" + attrs[(SVGNS,"height")] + ";"
if (SVGNS,"x") in attrs:
style = style + "left:" + attrs[(SVGNS,"x")] + ";"
if (SVGNS,"y") in attrs:
style = style + "top:" + attrs[(SVGNS,"y")] + ";"
if self.generate_css:
self.opentag(htmltag, {'class': name, 'style': style})
else:
self.opentag(htmltag)
def e_custom_shape(self, tag, attrs):
""" End the <draw:frame>
"""
self.closetag('div')
def s_draw_frame(self, tag, attrs):
""" A <draw:frame> is made into a <div> in HTML which is then styled
"""
anchor_type = attrs.get((TEXTNS,'anchor-type'),'notfound')
htmltag = 'div'
name = "G-" + attrs.get( (DRAWNS,'style-name'), "")
if name == 'G-':
name = "PR-" + attrs.get( (PRESENTATIONNS,'style-name'), "")
name = name.replace(".","_")
if anchor_type == "paragraph":
style = 'position:relative;'
elif anchor_type == 'char':
style = "position:relative;"
elif anchor_type == 'as-char':
htmltag = 'div'
style = ''
else:
style = "position:absolute;"
if (SVGNS,"width") in attrs:
style = style + "width:" + attrs[(SVGNS,"width")] + ";"
if (SVGNS,"height") in attrs:
style = style + "height:" + attrs[(SVGNS,"height")] + ";"
if (SVGNS,"x") in attrs:
style = style + "left:" + attrs[(SVGNS,"x")] + ";"
if (SVGNS,"y") in attrs:
style = style + "top:" + attrs[(SVGNS,"y")] + ";"
if self.generate_css:
self.opentag(htmltag, {'class': name, 'style': style})
else:
self.opentag(htmltag)
def e_draw_frame(self, tag, attrs):
""" End the <draw:frame>
"""
self.closetag('div')
def s_draw_fill_image(self, tag, attrs):
name = attrs.get( (DRAWNS,'name'), "NoName")
imghref = attrs[(XLINKNS,"href")]
imghref = self.rewritelink(imghref)
self.cs.fillimages[name] = imghref
def rewritelink(self, imghref):
""" Intended to be overloaded if you don't store your pictures
in a Pictures subfolder
"""
return imghref
def s_draw_image(self, tag, attrs):
""" A <draw:image> becomes an <img/> element
"""
parent = self.tagstack.stackparent()
anchor_type = parent.get((TEXTNS,'anchor-type'))
imghref = attrs[(XLINKNS,"href")]
imghref = self.rewritelink(imghref)
htmlattrs = {'alt':"", 'src':imghref }
if self.generate_css:
if anchor_type != "char":
htmlattrs['style'] = "display: block;"
self.emptytag('img', htmlattrs)
def s_draw_object(self, tag, attrs):
""" A <draw:object> is embedded object in the document (e.g. spreadsheet in presentation).
"""
objhref = attrs[(XLINKNS,"href")]
# Remove leading "./": from "./Object 1" to "Object 1"
# objhref = objhref [2:]
# Not using os.path.join since it fails to find the file on Windows.
# objcontentpath = '/'.join([objhref, 'content.xml'])
for c in self.document.childnodes:
if c.folder == objhref:
self._walknode(c.topnode)
def s_draw_object_ole(self, tag, attrs):
""" A <draw:object-ole> is embedded OLE object in the document (e.g. MS Graph).
"""
class_id = attrs[(DRAWNS,"class-id")]
if class_id and class_id.lower() == "00020803-0000-0000-c000-000000000046": ## Microsoft Graph 97 Chart
tagattrs = { 'name':'object_ole_graph', 'class':'ole-graph' }
self.opentag('a', tagattrs)
self.closetag('a', tagattrs)
def s_draw_page(self, tag, attrs):
""" A <draw:page> is a slide in a presentation. We use a <fieldset> element in HTML.
Therefore if you convert a ODP file, you get a series of <fieldset>s.
Override this for your own purpose.
"""
name = attrs.get( (DRAWNS,'name'), "NoName")
stylename = attrs.get( (DRAWNS,'style-name'), "")
stylename = stylename.replace(".","_")
masterpage = attrs.get( (DRAWNS,'master-page-name'),"")
masterpage = masterpage.replace(".","_")
if self.generate_css:
self.opentag('fieldset', {'class':"DP-%s MP-%s" % (stylename, masterpage) })
else:
self.opentag('fieldset')
self.opentag('legend')
self.writeout(escape(name))
self.closetag('legend')
def e_draw_page(self, tag, attrs):
self.closetag('fieldset')
def s_draw_textbox(self, tag, attrs):
style = ''
if (FONS,"min-height") in attrs:
style = style + "min-height:" + attrs[(FONS,"min-height")] + ";"
self.opentag('div')
# self.opentag('div', {'style': style})
def e_draw_textbox(self, tag, attrs):
""" End the <draw:text-box>
"""
self.closetag('div')
def html_body(self, tag, attrs):
self.writedata()
if self.generate_css and self.use_internal_css:
self.opentag('style', {'type':"text/css"}, True)
self.writeout('/*<![CDATA[*/\n')
self.generate_stylesheet()
self.writeout('/*]]>*/\n')
self.closetag('style')
self.purgedata()
self.closetag('head')
self.opentag('body', block=True)
default_styles = """
img { width: 100%; height: 100%; }
* { padding: 0; margin: 0; background-color:white; }
body { margin: 0 1em; }
ol, ul { padding-left: 2em; }
"""
def generate_stylesheet(self):
for name in self.stylestack:
styles = self.styledict.get(name)
# Preload with the family's default style
if '__style-family' in styles and styles['__style-family'] in self.styledict:
familystyle = self.styledict[styles['__style-family']].copy()
del styles['__style-family']
for style, val in list(styles.items()):
familystyle[style] = val
styles = familystyle
# Resolve the remaining parent styles
while '__parent-style-name' in styles and styles['__parent-style-name'] in self.styledict:
parentstyle = self.styledict[styles['__parent-style-name']].copy()
del styles['__parent-style-name']
for style, val in list(styles.items()):
parentstyle[style] = val
styles = parentstyle
self.styledict[name] = styles
# Write the styles to HTML
self.writeout(self.default_styles)
for name in self.stylestack:
styles = self.styledict.get(name)
css2 = self.cs.convert_styles(styles)
self.writeout("%s {\n" % name)
for style, val in list(css2.items()):
self.writeout("\t%s: %s;\n" % (style, val) )
self.writeout("}\n")
def generate_footnotes(self):
if self.currentnote == 0:
return
if self.generate_css:
self.opentag('ol', {'style':'border-top: 1px solid black'}, True)
else:
self.opentag('ol')
for key in range(1,self.currentnote+1):
note = self.notedict[key]
# for key,note in self.notedict.items():
self.opentag('li', { 'id':"footnote-%d" % key })
# self.opentag('sup')
# self.writeout(escape(note['citation']))
# self.closetag('sup', False)
self.writeout(note['body'])
self.closetag('li')
self.closetag('ol')
def s_office_automatic_styles(self, tag, attrs):
if self.xmlfile == 'styles.xml':
self.autoprefix = "A"
else:
self.autoprefix = ""
def s_office_document_content(self, tag, attrs):
""" First tag in the content.xml file"""
self.writeout('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" ')
self.writeout('"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n')
self.opentag('html', {'xmlns':"http://www.w3.org/1999/xhtml"}, True)
self.opentag('head', block=True)
self.emptytag('meta', { 'http-equiv':"Content-Type", 'content':"text/html;charset=UTF-8"})
for metaline in self.metatags:
self.writeout(metaline)
self.writeout('<title>%s</title>\n' % escape(self.title))
def e_office_document_content(self, tag, attrs):
""" Last tag """
self.closetag('html')
def s_office_master_styles(self, tag, attrs):
""" """
def s_office_presentation(self, tag, attrs):
""" For some odd reason, OpenOffice Impress doesn't define a default-style
for the 'paragraph'. We therefore force a standard when we see
it is a presentation
"""
self.styledict['p'] = {(FONS,'font-size'): "24pt" }
self.styledict['presentation'] = {(FONS,'font-size'): "24pt" }
self.html_body(tag, attrs)
def e_office_presentation(self, tag, attrs):
self.generate_footnotes()
self.closetag('body')
def s_office_spreadsheet(self, tag, attrs):
self.html_body(tag, attrs)
def e_office_spreadsheet(self, tag, attrs):
self.generate_footnotes()
self.closetag('body')
def s_office_styles(self, tag, attrs):
self.autoprefix = ""
def s_office_text(self, tag, attrs):
""" OpenDocument text """
self.styledict['frame'] = { (STYLENS,'wrap'): 'parallel'}
self.html_body(tag, attrs)
def e_office_text(self, tag, attrs):
self.generate_footnotes()
self.closetag('body')
def s_style_handle_properties(self, tag, attrs):
""" Copy all attributes to a struct.
We will later convert them to CSS2
"""
for key,attr in list(attrs.items()):
self.styledict[self.currentstyle][key] = attr
familymap = {'frame':'frame', 'paragraph':'p', 'presentation':'presentation',
'text':'span','section':'div',
'table':'table','table-cell':'td','table-column':'col',
'table-row':'tr','graphic':'graphic' }
def s_style_default_style(self, tag, attrs):
""" A default style is like a style on an HTML tag
"""
family = attrs[(STYLENS,'family')]
htmlfamily = self.familymap.get(family,'unknown')
self.currentstyle = htmlfamily
# self.stylestack.append(self.currentstyle)
self.styledict[self.currentstyle] = {}
def e_style_default_style(self, tag, attrs):
self.currentstyle = None
def s_style_font_face(self, tag, attrs):
""" It is possible that the HTML browser doesn't know how to
show a particular font. Luckily ODF provides generic fallbacks
Unfortunately they are not the same as CSS2.
CSS2: serif, sans-serif, cursive, fantasy, monospace
ODF: roman, swiss, modern, decorative, script, system
"""
name = attrs[(STYLENS,"name")]
family = attrs[(SVGNS,"font-family")]
generic = attrs.get( (STYLENS,'font-family-generic'),"" )
self.cs.save_font(name, family, generic)
def s_style_footer(self, tag, attrs):
self.opentag('div', { 'id':"footer" })
self.purgedata()
def e_style_footer(self, tag, attrs):
self.writedata()
self.closetag('div')
self.purgedata()
def s_style_footer_style(self, tag, attrs):
self.currentstyle = "@print #footer"
self.stylestack.append(self.currentstyle)
self.styledict[self.currentstyle] = {}
def s_style_header(self, tag, attrs):
self.opentag('div', { 'id':"header" })
self.purgedata()
def e_style_header(self, tag, attrs):
self.writedata()
self.closetag('div')
self.purgedata()
def s_style_header_style(self, tag, attrs):
self.currentstyle = "@print #header"
self.stylestack.append(self.currentstyle)
self.styledict[self.currentstyle] = {}
def s_style_default_page_layout(self, tag, attrs):
""" Collect the formatting for the default page layout style.
"""
self.currentstyle = "@page"
self.stylestack.append(self.currentstyle)
self.styledict[self.currentstyle] = {}
def s_style_page_layout(self, tag, attrs):
""" Collect the formatting for the page layout style.
This won't work in CSS 2.1, as page identifiers are not allowed.
It is legal in CSS3, but the rest of the application doesn't specify when to use what page layout
"""
name = attrs[(STYLENS,'name')]
name = name.replace(".","_")
self.currentstyle = ".PL-" + name
self.stylestack.append(self.currentstyle)
self.styledict[self.currentstyle] = {}
def e_style_page_layout(self, tag, attrs):
""" End this style
"""
self.currentstyle = None
def s_style_master_page(self, tag, attrs):
""" Collect the formatting for the page layout style.
"""
name = attrs[(STYLENS,'name')]
name = name.replace(".","_")
self.currentstyle = ".MP-" + name
self.stylestack.append(self.currentstyle)
self.styledict[self.currentstyle] = {('','position'):'relative'}
# Then load the pagelayout style if we find it
pagelayout = attrs.get( (STYLENS,'page-layout-name'), None)
if pagelayout:
pagelayout = ".PL-" + pagelayout
if pagelayout in self.styledict:
styles = self.styledict[pagelayout]
for style, val in list(styles.items()):
self.styledict[self.currentstyle][style] = val
else:
self.styledict[self.currentstyle]['__parent-style-name'] = pagelayout
self.s_ignorexml(tag, attrs)
# Short prefixes for class selectors
_familyshort = {'drawing-page':'DP', 'paragraph':'P', 'presentation':'PR',
'text':'S', 'section':'D',
'table':'T', 'table-cell':'TD', 'table-column':'TC',
'table-row':'TR', 'graphic':'G' }
def s_style_style(self, tag, attrs):
""" Collect the formatting for the style.
Styles have scope. The same name can be used for both paragraph and
character styles Since CSS has no scope we use a prefix. (Not elegant)
In ODF a style can have a parent, these parents can be chained.
We may not have encountered the parent yet, but if we have, we resolve it.
"""
name = attrs[(STYLENS,'name')]
name = name.replace(".","_")
family = attrs[(STYLENS,'family')]
htmlfamily = self.familymap.get(family,'unknown')
sfamily = self._familyshort.get(family,'X')
name = "%s%s-%s" % (self.autoprefix, sfamily, name)
parent = attrs.get( (STYLENS,'parent-style-name') )
self.currentstyle = special_styles.get(name,"."+name)
self.stylestack.append(self.currentstyle)
if self.currentstyle not in self.styledict:
self.styledict[self.currentstyle] = {}
self.styledict[self.currentstyle]['__style-family'] = htmlfamily
# Then load the parent style if we find it
if parent:
parent = "%s-%s" % (sfamily, parent)
parent = special_styles.get(parent, "."+parent)
if parent in self.styledict:
styles = self.styledict[parent]
for style, val in list(styles.items()):
self.styledict[self.currentstyle][style] = val
else:
self.styledict[self.currentstyle]['__parent-style-name'] = parent
def e_style_style(self, tag, attrs):
""" End this style
"""
self.currentstyle = None
def s_table_table(self, tag, attrs):
""" Start a table
"""
c = attrs.get( (TABLENS,'style-name'), None)
if c and self.generate_css:
c = c.replace(".","_")
self.opentag('table',{ 'class': "T-%s" % c })
else:
self.opentag('table')
self.purgedata()
def e_table_table(self, tag, attrs):
""" End a table
"""
self.writedata()
self.closetag('table')
self.purgedata()
def s_table_table_cell(self, tag, attrs):
""" Start a table cell """
#FIXME: number-columns-repeated § 8.1.3
#repeated = int(attrs.get( (TABLENS,'number-columns-repeated'), 1))
htmlattrs = {}
rowspan = attrs.get( (TABLENS,'number-rows-spanned') )
if rowspan:
htmlattrs['rowspan'] = rowspan
colspan = attrs.get( (TABLENS,'number-columns-spanned') )
if colspan:
htmlattrs['colspan'] = colspan
c = attrs.get( (TABLENS,'style-name') )
if c:
htmlattrs['class'] = 'TD-%s' % c.replace(".","_")
self.opentag('td', htmlattrs)
self.purgedata()
def e_table_table_cell(self, tag, attrs):
""" End a table cell """
self.writedata()
self.closetag('td')
self.purgedata()
def s_table_table_column(self, tag, attrs):
""" Start a table column """
c = attrs.get( (TABLENS,'style-name'), None)
repeated = int(attrs.get( (TABLENS,'number-columns-repeated'), 1))
htmlattrs = {}
if c:
htmlattrs['class'] = "TC-%s" % c.replace(".","_")
for x in range(repeated):
self.emptytag('col', htmlattrs)
self.purgedata()
def s_table_table_row(self, tag, attrs):
""" Start a table row """
#FIXME: table:number-rows-repeated
c = attrs.get( (TABLENS,'style-name'), None)
htmlattrs = {}
if c:
htmlattrs['class'] = "TR-%s" % c.replace(".","_")
self.opentag('tr', htmlattrs)
self.purgedata()
def e_table_table_row(self, tag, attrs):
""" End a table row """
self.writedata()
self.closetag('tr')
self.purgedata()
def s_text_a(self, tag, attrs):
""" Anchors start """
self.writedata()
href = attrs[(XLINKNS,"href")].split("|")[0]
if href[0] == "#":
href = "#" + self.get_anchor(href[1:])
self.opentag('a', {'href':href})
self.purgedata()
def e_text_a(self, tag, attrs):
""" End an anchor or bookmark reference """
self.writedata()
self.closetag('a', False)
self.purgedata()
def s_text_bookmark(self, tag, attrs):
""" Bookmark definition """
name = attrs[(TEXTNS,'name')]
html_id = self.get_anchor(name)
self.writedata()
self.opentag('span', {'id':html_id})
self.closetag('span', False)
self.purgedata()
def s_text_bookmark_ref(self, tag, attrs):
""" Bookmark reference """
name = attrs[(TEXTNS,'ref-name')]
html_id = "#" + self.get_anchor(name)
self.writedata()
self.opentag('a', {'href':html_id})
self.purgedata()
def s_text_h(self, tag, attrs):
""" Headings start """
level = int(attrs[(TEXTNS,'outline-level')])
if level > 6: level = 6 # Heading levels go only to 6 in XHTML
if level < 1: level = 1
self.headinglevels[level] = self.headinglevels[level] + 1
name = self.classname(attrs)
for x in range(level + 1,10):
self.headinglevels[x] = 0
special = special_styles.get("P-"+name)
if special or not self.generate_css:
self.opentag('h%s' % level)
else:
self.opentag('h%s' % level, {'class':"P-%s" % name })
self.purgedata()
def e_text_h(self, tag, attrs):
""" Headings end
Side-effect: If there is no title in the metadata, then it is taken
from the first heading of any level.
"""
self.writedata()
level = int(attrs[(TEXTNS,'outline-level')])
if level > 6: level = 6 # Heading levels go only to 6 in XHTML
if level < 1: level = 1
lev = self.headinglevels[1:level+1]
outline = '.'.join(map(str,lev) )
heading = ''.join(self.data)
if self.title == '': self.title = heading
anchor = self.get_anchor("%s.%s" % ( outline, heading))
self.opentag('a', {'id': anchor} )
self.closetag('a', False)
self.closetag('h%s' % level)
self.purgedata()
def s_text_line_break(self, tag, attrs):
""" Force a line break (<br/>) """
self.writedata()
self.emptytag('br')
self.purgedata()
def s_text_list(self, tag, attrs):
""" Start a list (<ul> or <ol>)
To know which level we're at, we have to count the number
of <text:list> elements on the tagstack.
"""
name = attrs.get( (TEXTNS,'style-name') )
level = self.tagstack.count_tags(tag) + 1
if name:
name = name.replace(".","_")
else:
# FIXME: If a list is contained in a table cell or text box,
# the list level must return to 1, even though the table or
# textbox itself may be nested within another list.
name = self.tagstack.rfindattr( (TEXTNS,'style-name') )
list_class = "%s_%d" % (name, level)
if self.generate_css:
self.opentag('%s' % self.listtypes.get(list_class,'ul'), {'class': list_class })
else:
self.opentag('%s' % self.listtypes.get(list_class,'ul'))
self.purgedata()
def e_text_list(self, tag, attrs):
""" End a list """
self.writedata()
name = attrs.get( (TEXTNS,'style-name') )
level = self.tagstack.count_tags(tag) + 1
if name:
name = name.replace(".","_")
else:
# FIXME: If a list is contained in a table cell or text box,
# the list level must return to 1, even though the table or
# textbox itself may be nested within another list.
name = self.tagstack.rfindattr( (TEXTNS,'style-name') )
list_class = "%s_%d" % (name, level)
self.closetag(self.listtypes.get(list_class,'ul'))
self.purgedata()
def s_text_list_item(self, tag, attrs):
""" Start list item """
self.opentag('li')
self.purgedata()
def e_text_list_item(self, tag, attrs):
""" End list item """
self.writedata()
self.closetag('li')
self.purgedata()
def s_text_list_level_style_bullet(self, tag, attrs):
""" CSS doesn't have the ability to set the glyph
to a particular character, so we just go through
the available glyphs
"""
name = self.tagstack.rfindattr( (STYLENS,'name') )
level = attrs[(TEXTNS,'level')]
self.prevstyle = self.currentstyle
list_class = "%s_%s" % (name, level)
self.listtypes[list_class] = 'ul'
self.currentstyle = ".%s_%s" % ( name.replace(".","_"), level)
self.stylestack.append(self.currentstyle)
self.styledict[self.currentstyle] = {}
level = int(level)
listtype = ("square", "disc", "circle")[level % 3]
self.styledict[self.currentstyle][('','list-style-type')] = listtype
def e_text_list_level_style_bullet(self, tag, attrs):
self.currentstyle = self.prevstyle
del self.prevstyle
def s_text_list_level_style_number(self, tag, attrs):
name = self.tagstack.stackparent()[(STYLENS,'name')]
level = attrs[(TEXTNS,'level')]
num_format = attrs.get( (STYLENS,'name'),"1")
list_class = "%s_%s" % (name, level)
self.prevstyle = self.currentstyle
self.currentstyle = ".%s_%s" % ( name.replace(".","_"), level)
self.listtypes[list_class] = 'ol'
self.stylestack.append(self.currentstyle)
self.styledict[self.currentstyle] = {}
if num_format == "1": listtype = "decimal"
elif num_format == "I": listtype = "upper-roman"
elif num_format == "i": listtype = "lower-roman"
elif num_format == "A": listtype = "upper-alpha"
elif num_format == "a": listtype = "lower-alpha"
else: listtype = "decimal"
self.styledict[self.currentstyle][('','list-style-type')] = listtype
def e_text_list_level_style_number(self, tag, attrs):
self.currentstyle = self.prevstyle
del self.prevstyle
def s_text_note(self, tag, attrs):
self.writedata()
self.purgedata()
self.currentnote = self.currentnote + 1
self.notedict[self.currentnote] = {}
self.notebody = []
def e_text_note(self, tag, attrs):
pass
def collectnote(self,s):
if s != '':
self.notebody.append(s)
def s_text_note_body(self, tag, attrs):
self._orgwfunc = self._wfunc
self._wfunc = self.collectnote
def e_text_note_body(self, tag, attrs):
self._wfunc = self._orgwfunc
self.notedict[self.currentnote]['body'] = ''.join(self.notebody)
self.notebody = ''
del self._orgwfunc
def e_text_note_citation(self, tag, attrs):
mark = ''.join(self.data)
self.notedict[self.currentnote]['citation'] = mark
self.opentag('a',{ 'href': "#footnote-%s" % self.currentnote })
self.opentag('sup')
# self.writeout( escape(mark) )
# Since HTML only knows about endnotes, there is too much risk that the
# marker is reused in the source. Therefore we force numeric markers
self.writeout(str(self.currentnote))
self.closetag('sup')
self.closetag('a')
def s_text_p(self, tag, attrs):
""" Paragraph
"""
htmlattrs = {}
specialtag = "p"
c = attrs.get( (TEXTNS,'style-name'), None)
if c:
c = c.replace(".","_")
specialtag = special_styles.get("P-"+c)
if specialtag is None:
specialtag = 'p'
if self.generate_css:
htmlattrs['class'] = "P-%s" % c
self.opentag(specialtag, htmlattrs)
self.purgedata()
def e_text_p(self, tag, attrs):
""" End Paragraph
"""
specialtag = "p"
c = attrs.get( (TEXTNS,'style-name'), None)
if c:
c = c.replace(".","_")
specialtag = special_styles.get("P-"+c)
if specialtag is None:
specialtag = 'p'
self.writedata()
self.closetag(specialtag)
self.purgedata()
def s_text_s(self, tag, attrs):
""" Generate a number of spaces. ODF has an element; HTML uses
We use   so we can send the output through an XML parser if we desire to
"""
c = attrs.get( (TEXTNS,'c'),"1")
for x in range(int(c)):
self.writeout(' ')
def s_text_span(self, tag, attrs):
""" The <text:span> element matches the <span> element in HTML. It is
typically used to properties of the text.
"""
self.writedata()
c = attrs.get( (TEXTNS,'style-name'), None)
htmlattrs = {}
if c:
c = c.replace(".","_")
special = special_styles.get("S-"+c)
if special is None and self.generate_css:
htmlattrs['class'] = "S-%s" % c
self.opentag('span', htmlattrs)
self.purgedata()
def e_text_span(self, tag, attrs):
""" End the <text:span> """
self.writedata()
self.closetag('span', False)
self.purgedata()
def s_text_tab(self, tag, attrs):
""" Move to the next tabstop. We ignore this in HTML
"""
self.writedata()
self.writeout(' ')
self.purgedata()
def s_text_x_source(self, tag, attrs):
""" Various indexes and tables of contents. We ignore those.
"""
self.writedata()
self.purgedata()
self.s_ignorexml(tag, attrs)
def e_text_x_source(self, tag, attrs):
""" Various indexes and tables of contents. We ignore those.
"""
self.writedata()
self.purgedata()
#-----------------------------------------------------------------------------
#
# Reading the file
#
#-----------------------------------------------------------------------------
def load(self, odffile):
""" Loads a document into the parser and parses it.
The argument can either be a filename or a document in memory.
"""
self.lines = []
self._wfunc = self._wlines
if isinstance(odffile, str):
self.document = load(odffile)
else:
self.document = odffile
self._walknode(self.document.topnode)
def _walknode(self, node):
if node.nodeType == Node.ELEMENT_NODE:
self.startElementNS(node.qname, node.tagName, node.attributes)
for c in node.childNodes:
self._walknode(c)
self.endElementNS(node.qname, node.tagName)
if node.nodeType == Node.TEXT_NODE or node.nodeType == Node.CDATA_SECTION_NODE:
self.characters(str(node))
def odf2xhtml(self, odffile):
""" Load a file and return the XHTML
"""
self.load(odffile)
return self.xhtml()
def _wlines(self,s):
if s != '': self.lines.append(s)
def xhtml(self):
""" Returns the xhtml
"""
return ''.join(self.lines)
def _writecss(self, s):
if s != '': self._csslines.append(s)
def _writenothing(self, s):
pass
def css(self):
""" Returns the CSS content """
self._csslines = []
self._wfunc = self._writecss
self.generate_stylesheet()
res = ''.join(self._csslines)
self._wfunc = self._wlines
del self._csslines
return res
def save(self, outputfile, addsuffix=False):
""" Save the HTML under the filename.
If the filename is '-' then save to stdout
We have the last style filename in self.stylefilename
"""
if outputfile == '-':
outputfp = sys.stdout
else:
if addsuffix:
outputfile = outputfile + ".html"
outputfp = file(outputfile, "w")
outputfp.write(self.xhtml().encode('us-ascii','xmlcharrefreplace'))
outputfp.close()
class ODF2XHTMLembedded(ODF2XHTML):
""" The ODF2XHTML parses an ODF file and produces XHTML"""
def __init__(self, lines, generate_css=True, embedable=False):
self._resetobject()
self.lines = lines
# Tags
self.generate_css = generate_css
self.elements = {
# (DCNS, 'title'): (self.s_processcont, self.e_dc_title),
# (DCNS, 'language'): (self.s_processcont, self.e_dc_contentlanguage),
# (DCNS, 'creator'): (self.s_processcont, self.e_dc_metatag),
# (DCNS, 'description'): (self.s_processcont, self.e_dc_metatag),
# (DCNS, 'date'): (self.s_processcont, self.e_dc_metatag),
(DRAWNS, 'frame'): (self.s_draw_frame, self.e_draw_frame),
(DRAWNS, 'image'): (self.s_draw_image, None),
(DRAWNS, 'fill-image'): (self.s_draw_fill_image, None),
(DRAWNS, "layer-set"):(self.s_ignorexml, None),
(DRAWNS, 'page'): (self.s_draw_page, self.e_draw_page),
(DRAWNS, 'object'): (self.s_draw_object, None),
(DRAWNS, 'object-ole'): (self.s_draw_object_ole, None),
(DRAWNS, 'text-box'): (self.s_draw_textbox, self.e_draw_textbox),
# (METANS, 'creation-date'):(self.s_processcont, self.e_dc_metatag),
# (METANS, 'generator'):(self.s_processcont, self.e_dc_metatag),
# (METANS, 'initial-creator'): (self.s_processcont, self.e_dc_metatag),
# (METANS, 'keyword'): (self.s_processcont, self.e_dc_metatag),
(NUMBERNS, "boolean-style"):(self.s_ignorexml, None),
(NUMBERNS, "currency-style"):(self.s_ignorexml, None),
(NUMBERNS, "date-style"):(self.s_ignorexml, None),
(NUMBERNS, "number-style"):(self.s_ignorexml, None),
(NUMBERNS, "text-style"):(self.s_ignorexml, None),
# (OFFICENS, "automatic-styles"):(self.s_office_automatic_styles, None),
# (OFFICENS, "document-content"):(self.s_office_document_content, self.e_office_document_content),
(OFFICENS, "forms"):(self.s_ignorexml, None),
# (OFFICENS, "master-styles"):(self.s_office_master_styles, None),
(OFFICENS, "meta"):(self.s_ignorecont, None),
# (OFFICENS, "presentation"):(self.s_office_presentation, self.e_office_presentation),
# (OFFICENS, "spreadsheet"):(self.s_office_spreadsheet, self.e_office_spreadsheet),
# (OFFICENS, "styles"):(self.s_office_styles, None),
# (OFFICENS, "text"):(self.s_office_text, self.e_office_text),
(OFFICENS, "scripts"):(self.s_ignorexml, None),
(PRESENTATIONNS, "notes"):(self.s_ignorexml, None),
## (STYLENS, "default-page-layout"):(self.s_style_default_page_layout, self.e_style_page_layout),
# (STYLENS, "default-page-layout"):(self.s_ignorexml, None),
# (STYLENS, "default-style"):(self.s_style_default_style, self.e_style_default_style),
# (STYLENS, "drawing-page-properties"):(self.s_style_handle_properties, None),
# (STYLENS, "font-face"):(self.s_style_font_face, None),
## (STYLENS, "footer"):(self.s_style_footer, self.e_style_footer),
## (STYLENS, "footer-style"):(self.s_style_footer_style, None),
# (STYLENS, "graphic-properties"):(self.s_style_handle_properties, None),
# (STYLENS, "handout-master"):(self.s_ignorexml, None),
## (STYLENS, "header"):(self.s_style_header, self.e_style_header),
## (STYLENS, "header-footer-properties"):(self.s_style_handle_properties, None),
## (STYLENS, "header-style"):(self.s_style_header_style, None),
# (STYLENS, "master-page"):(self.s_style_master_page, None),
# (STYLENS, "page-layout-properties"):(self.s_style_handle_properties, None),
## (STYLENS, "page-layout"):(self.s_style_page_layout, self.e_style_page_layout),
# (STYLENS, "page-layout"):(self.s_ignorexml, None),
# (STYLENS, "paragraph-properties"):(self.s_style_handle_properties, None),
# (STYLENS, "style"):(self.s_style_style, self.e_style_style),
# (STYLENS, "table-cell-properties"):(self.s_style_handle_properties, None),
# (STYLENS, "table-column-properties"):(self.s_style_handle_properties, None),
# (STYLENS, "table-properties"):(self.s_style_handle_properties, None),
# (STYLENS, "text-properties"):(self.s_style_handle_properties, None),
(SVGNS, 'desc'): (self.s_ignorexml, None),
(TABLENS, 'covered-table-cell'): (self.s_ignorexml, None),
(TABLENS, 'table-cell'): (self.s_table_table_cell, self.e_table_table_cell),
(TABLENS, 'table-column'): (self.s_table_table_column, None),
(TABLENS, 'table-row'): (self.s_table_table_row, self.e_table_table_row),
(TABLENS, 'table'): (self.s_table_table, self.e_table_table),
(TEXTNS, 'a'): (self.s_text_a, self.e_text_a),
(TEXTNS, "alphabetical-index-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, "bibliography-configuration"):(self.s_ignorexml, None),
(TEXTNS, "bibliography-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, 'h'): (self.s_text_h, self.e_text_h),
(TEXTNS, "illustration-index-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, 'line-break'):(self.s_text_line_break, None),
(TEXTNS, "linenumbering-configuration"):(self.s_ignorexml, None),
(TEXTNS, "list"):(self.s_text_list, self.e_text_list),
(TEXTNS, "list-item"):(self.s_text_list_item, self.e_text_list_item),
(TEXTNS, "list-level-style-bullet"):(self.s_text_list_level_style_bullet, self.e_text_list_level_style_bullet),
(TEXTNS, "list-level-style-number"):(self.s_text_list_level_style_number, self.e_text_list_level_style_number),
(TEXTNS, "list-style"):(None, None),
(TEXTNS, "note"):(self.s_text_note, None),
(TEXTNS, "note-body"):(self.s_text_note_body, self.e_text_note_body),
(TEXTNS, "note-citation"):(None, self.e_text_note_citation),
(TEXTNS, "notes-configuration"):(self.s_ignorexml, None),
(TEXTNS, "object-index-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, 'p'): (self.s_text_p, self.e_text_p),
(TEXTNS, 's'): (self.s_text_s, None),
(TEXTNS, 'span'): (self.s_text_span, self.e_text_span),
(TEXTNS, 'tab'): (self.s_text_tab, None),
(TEXTNS, "table-index-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, "table-of-content-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, "user-index-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, "page-number"):(None, None),
}
| apache-2.0 |
omakk/servo | tests/wpt/css-tests/tools/py/py/_builtin.py | 259 | 6521 | import sys
try:
reversed = reversed
except NameError:
def reversed(sequence):
"""reversed(sequence) -> reverse iterator over values of the sequence
Return a reverse iterator
"""
if hasattr(sequence, '__reversed__'):
return sequence.__reversed__()
if not hasattr(sequence, '__getitem__'):
raise TypeError("argument to reversed() must be a sequence")
return reversed_iterator(sequence)
class reversed_iterator(object):
def __init__(self, seq):
self.seq = seq
self.remaining = len(seq)
def __iter__(self):
return self
def next(self):
i = self.remaining
if i > 0:
i -= 1
item = self.seq[i]
self.remaining = i
return item
raise StopIteration
def __length_hint__(self):
return self.remaining
try:
any = any
except NameError:
def any(iterable):
for x in iterable:
if x:
return True
return False
try:
all = all
except NameError:
def all(iterable):
for x in iterable:
if not x:
return False
return True
try:
sorted = sorted
except NameError:
builtin_cmp = cmp # need to use cmp as keyword arg
def sorted(iterable, cmp=None, key=None, reverse=0):
use_cmp = None
if key is not None:
if cmp is None:
def use_cmp(x, y):
return builtin_cmp(x[0], y[0])
else:
def use_cmp(x, y):
return cmp(x[0], y[0])
l = [(key(element), element) for element in iterable]
else:
if cmp is not None:
use_cmp = cmp
l = list(iterable)
if use_cmp is not None:
l.sort(use_cmp)
else:
l.sort()
if reverse:
l.reverse()
if key is not None:
return [element for (_, element) in l]
return l
try:
set, frozenset = set, frozenset
except NameError:
from sets import set, frozenset
# pass through
enumerate = enumerate
try:
BaseException = BaseException
except NameError:
BaseException = Exception
try:
GeneratorExit = GeneratorExit
except NameError:
class GeneratorExit(Exception):
""" This exception is never raised, it is there to make it possible to
write code compatible with CPython 2.5 even in lower CPython
versions."""
pass
GeneratorExit.__module__ = 'exceptions'
_sysex = (KeyboardInterrupt, SystemExit, MemoryError, GeneratorExit)
try:
callable = callable
except NameError:
def callable(obj):
return hasattr(obj, "__call__")
if sys.version_info >= (3, 0):
exec ("print_ = print ; exec_=exec")
import builtins
# some backward compatibility helpers
_basestring = str
def _totext(obj, encoding=None, errors=None):
if isinstance(obj, bytes):
if errors is None:
obj = obj.decode(encoding)
else:
obj = obj.decode(encoding, errors)
elif not isinstance(obj, str):
obj = str(obj)
return obj
def _isbytes(x):
return isinstance(x, bytes)
def _istext(x):
return isinstance(x, str)
text = str
bytes = bytes
def _getimself(function):
return getattr(function, '__self__', None)
def _getfuncdict(function):
return getattr(function, "__dict__", None)
def _getcode(function):
return getattr(function, "__code__", None)
def execfile(fn, globs=None, locs=None):
if globs is None:
back = sys._getframe(1)
globs = back.f_globals
locs = back.f_locals
del back
elif locs is None:
locs = globs
fp = open(fn, "r")
try:
source = fp.read()
finally:
fp.close()
co = compile(source, fn, "exec", dont_inherit=True)
exec_(co, globs, locs)
else:
import __builtin__ as builtins
_totext = unicode
_basestring = basestring
text = unicode
bytes = str
execfile = execfile
callable = callable
def _isbytes(x):
return isinstance(x, str)
def _istext(x):
return isinstance(x, unicode)
def _getimself(function):
return getattr(function, 'im_self', None)
def _getfuncdict(function):
return getattr(function, "__dict__", None)
def _getcode(function):
try:
return getattr(function, "__code__")
except AttributeError:
return getattr(function, "func_code", None)
def print_(*args, **kwargs):
""" minimal backport of py3k print statement. """
sep = ' '
if 'sep' in kwargs:
sep = kwargs.pop('sep')
end = '\n'
if 'end' in kwargs:
end = kwargs.pop('end')
file = 'file' in kwargs and kwargs.pop('file') or sys.stdout
if kwargs:
args = ", ".join([str(x) for x in kwargs])
raise TypeError("invalid keyword arguments: %s" % args)
at_start = True
for x in args:
if not at_start:
file.write(sep)
file.write(str(x))
at_start = False
file.write(end)
def exec_(obj, globals=None, locals=None):
""" minimal backport of py3k exec statement. """
__tracebackhide__ = True
if globals is None:
frame = sys._getframe(1)
globals = frame.f_globals
if locals is None:
locals = frame.f_locals
elif locals is None:
locals = globals
exec2(obj, globals, locals)
if sys.version_info >= (3, 0):
def _reraise(cls, val, tb):
__tracebackhide__ = True
assert hasattr(val, '__traceback__')
raise cls.with_traceback(val, tb)
else:
exec ("""
def _reraise(cls, val, tb):
__tracebackhide__ = True
raise cls, val, tb
def exec2(obj, globals, locals):
__tracebackhide__ = True
exec obj in globals, locals
""")
def _tryimport(*names):
""" return the first successfully imported module. """
assert names
for name in names:
try:
__import__(name)
except ImportError:
excinfo = sys.exc_info()
else:
return sys.modules[name]
_reraise(*excinfo)
| mpl-2.0 |
jacobsenanaizabel/shoop | shoop/core/models/_base.py | 6 | 1266 | # This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
import parler.models
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from shoop.utils import text
class ShoopModel(models.Model):
"""
Shoop Model.
"""
identifier_attr = 'identifier'
def __repr__(self):
if hasattr(self, self.identifier_attr):
identifier = getattr(self, self.identifier_attr) or ''
identifier_suf = '-{}'.format(text.force_ascii(identifier))
else:
identifier_suf = ''
return '<{}:{}{}>'.format(type(self).__name__, self.pk, identifier_suf)
class Meta:
abstract = True
@python_2_unicode_compatible
class TranslatableShoopModel(ShoopModel, parler.models.TranslatableModel):
name_attr = 'name'
def __str__(self):
name = self.safe_translation_getter(self.name_attr, any_language=True)
if name is None:
return '{}:{}'.format(type(self).__name__, self.pk)
return name
class Meta:
abstract = True
| agpl-3.0 |
mozilla-services/campaign_manager | campaign/auth/browserid_auth.py | 2 | 1261 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import logging
import urllib2
import browserid
class BrowserIDAuth(object):
_raw_assertion = None
def __init__(self,
**kw):
self.bid = browserid
pass
def create_user(self, assertion=None, **kw):
self._raw_assertion = assertion
def get_user_id(self,
request,
audience=None,
assertion=None):
session = request.session
if (session.get('uid')):
return session.get('uid')
if audience is None:
audience = request.host
if assertion is None and 'assertion' in request.params:
assertion = \
urllib2.unquote(request.params.get('assertion')).strip()
if assertion is None:
assertion = self._raw_assertion
if assertion is None:
return None
try:
data = self.bid.verify(assertion, audience)
return data['email']
except Exception, e:
logging.info("Bad assertion [%s]" % repr(e))
return None
| mpl-2.0 |
aonotas/chainer | chainer/links/model/classifier.py | 1 | 4579 | from chainer.functions.evaluation import accuracy
from chainer.functions.loss import softmax_cross_entropy
from chainer import link
from chainer import reporter
class Classifier(link.Chain):
"""A simple classifier model.
This is an example of chain that wraps another chain. It computes the
loss and accuracy based on a given input/label pair.
Args:
predictor (~chainer.Link): Predictor network.
lossfun (function): Loss function.
accfun (function): Function that computes accuracy.
label_key (int or str): Key to specify label variable from arguments.
When it is ``int``, a variable in positional arguments is used.
And when it is ``str``, a variable in keyword arguments is used.
Attributes:
predictor (~chainer.Link): Predictor network.
lossfun (function): Loss function.
accfun (function): Function that computes accuracy.
y (~chainer.Variable): Prediction for the last minibatch.
loss (~chainer.Variable): Loss value for the last minibatch.
accuracy (~chainer.Variable): Accuracy for the last minibatch.
compute_accuracy (bool): If ``True``, compute accuracy on the forward
computation. The default value is ``True``.
.. note::
This link uses :func:`chainer.softmax_cross_entropy` with
default arguments as a loss function (specified by ``lossfun``),
if users do not explicitly change it. In particular, the loss function
does not support double backpropagation.
If you need second or higher order differentiation, you need to turn
it on with ``enable_double_backprop=True``:
>>> import chainer.functions as F
>>> import chainer.links as L
>>>
>>> def lossfun(x, t):
... return F.softmax_cross_entropy(
... x, t, enable_double_backprop=True)
>>>
>>> predictor = L.Linear(10)
>>> model = L.Classifier(predictor, lossfun=lossfun)
"""
compute_accuracy = True
def __init__(self, predictor,
lossfun=softmax_cross_entropy.softmax_cross_entropy,
accfun=accuracy.accuracy,
label_key=-1):
if not (isinstance(label_key, (int, str))):
raise TypeError('label_key must be int or str, but is %s' %
type(label_key))
super(Classifier, self).__init__()
self.lossfun = lossfun
self.accfun = accfun
self.y = None
self.loss = None
self.accuracy = None
self.label_key = label_key
with self.init_scope():
self.predictor = predictor
def __call__(self, *args, **kwargs):
"""Computes the loss value for an input and label pair.
It also computes accuracy and stores it to the attribute.
Args:
args (list of ~chainer.Variable): Input minibatch.
kwargs (dict of ~chainer.Variable): Input minibatch.
When ``label_key`` is ``int``, the correpoding element in ``args``
is treated as ground truth labels. And when it is ``str``, the
element in ``kwargs`` is used.
The all elements of ``args`` and ``kwargs`` except the ground trush
labels are features.
It feeds features to the predictor and compare the result
with ground truth labels.
Returns:
~chainer.Variable: Loss value.
"""
if isinstance(self.label_key, int):
if not (-len(args) <= self.label_key < len(args)):
msg = 'Label key %d is out of bounds' % self.label_key
raise ValueError(msg)
t = args[self.label_key]
if self.label_key == -1:
args = args[:-1]
else:
args = args[:self.label_key] + args[self.label_key + 1:]
elif isinstance(self.label_key, str):
if self.label_key not in kwargs:
msg = 'Label key "%s" is not found' % self.label_key
raise ValueError(msg)
t = kwargs[self.label_key]
del kwargs[self.label_key]
self.y = None
self.loss = None
self.accuracy = None
self.y = self.predictor(*args, **kwargs)
self.loss = self.lossfun(self.y, t)
reporter.report({'loss': self.loss}, self)
if self.compute_accuracy:
self.accuracy = self.accfun(self.y, t)
reporter.report({'accuracy': self.accuracy}, self)
return self.loss
| mit |
voc/voctomix | vocto/composites.py | 1 | 12816 | #!/usr/bin/env python3
# for debug logging
import logging
# use Frame
from vocto.frame import Frame, X, Y, L, T, R, B
# for cloning objects
import copy
# for parsing configuration items
import re
log = logging.getLogger('Composites')
class Composites:
""" a namespace for composite related methods
"""
def configure(cfg, size, add_swap=True):
""" read INI like configuration from <cfg> and return all the defined
composites. <size> is the overall frame size which all proportional
(floating point) coordinates are related to.
"""
# prepare resulting composites dictonary
composites = dict()
# walk through composites configuration
for c_name, c_val in cfg:
if '.' not in c_name:
raise RuntimeError("syntax error in composite config '{}' "
"(must be: 'name.attribute')"
.format(c_name))
# split name into name and attribute
name, attr = c_name.lower().rsplit('.', 1)
if name not in composites:
# add new composite
composites[name] = Composite(len(composites), name)
try:
# set attribute
composites[name].config(attr, c_val, size)
except RuntimeError as err:
raise RuntimeError(
"syntax error in composite config value at '{}':\n{}"
.format(name, err))
add_mirrored_composites(composites)
if add_swap:
# add any useful swapped targets
add_swapped_targets(composites)
return composites
def targets(composites):
""" return a list of all composites that are not intermediate
"""
result = []
for c_name, c in composites.items():
if not c.inter:
result.append(c)
return sorted(result, key=lambda c: c.order)
def intermediates(composites):
""" return a list of all composites that are intermediate
"""
result = []
for c_name, c in composites.items():
if c.inter:
result.append(c)
return sorted(result, key=lambda c: c.order)
class Composite:
def __init__(self, order, name, a=Frame(True), b=Frame(True)):
assert type(order) is int or order is None
assert type(name) is str or not name
self.name = name
self.frame = [copy.deepcopy(a), copy.deepcopy(b)]
self.default = [None, None]
self.inter = False
self.noswap = False
self.mirror = False
self.order = order
def str_title():
return "Key A%s\tB%s Name" % (Frame.str_title(), Frame.str_title())
def __str__(self):
def hidden( x, hidden ):
return str(x).replace(' ','_') if hidden else str(x)
return "%s A%s\tB%s %s" % (" * " if self.A().key else " ",
hidden(self.A(), self.A().invisible() or self.covered()),
hidden(self.B(), self.B().invisible()),
self.name)
def equals(self, other, treat_covered_as_invisible, swapped=False):
""" compare two composites if they are looking the same
(e.g. a rectangle with size 0x0=looks the same as one with alpha=0
and so it is treated as equal here)
"""
if not swapped:
if not (self.A() == other.A() or (treat_covered_as_invisible and self.covered() and other.covered())):
return False
elif not (self.B() == other.B() or (self.B().invisible() and other.B().invisible())):
return False
else:
if not (self.A() == other.B() or (treat_covered_as_invisible and self.covered() and other.B().invisible())):
return False
elif not (self.B() == other.A() or (self.B().invisible() and other.covered())):
return False
return True
def A(self):
return self.frame[0]
def B(self):
return self.frame[1]
def Az(self, zorder):
frame = copy.deepcopy(self.frame[0])
frame.zorder = zorder
return frame
def Bz(self, zorder):
frame = copy.deepcopy(self.frame[1])
frame.zorder = zorder
return frame
def swapped(self):
""" swap A and B source items
"""
if self.noswap:
return self
else:
# deep copy everything
s = copy.deepcopy(self)
# then swap frames
s.frame = self.frame[::-1]
s.name = swap_name(self.name)
return s
def mirrored(self):
""" mirror A and B source items
"""
# deep copy everything
s = copy.copy(self)
# then mirror frames
s.frame = [f.mirrored() for f in self.frame]
s.name = mirror_name(self.name)
return s
def key(self):
for f in self.frame:
if f.key:
return True
return False
def config(self, attr, value, size):
""" set value <value> from INI attribute <attr>.
<size> is the input channel size
"""
if attr == 'a':
self.frame[0].rect = str2rect(value, size)
elif attr == 'b':
self.frame[1].rect = str2rect(value, size)
elif attr == 'crop-a':
self.frame[0].crop = str2crop(value, size)
elif attr == 'crop-b':
self.frame[1].crop = str2crop(value, size)
elif attr == 'default-a':
self.default[0] = value
elif attr == 'default-b':
self.default[1] = value
elif attr == 'alpha-a':
self.frame[0].alpha = str2alpha(value)
elif attr == 'alpha-b':
self.frame[1].alpha = str2alpha(value)
elif attr == 'inter':
self.inter = value
elif attr == 'noswap':
self.noswap = value
elif attr == 'mirror':
self.mirror = value
self.frame[0].original_size = size
self.frame[1].original_size = size
def covered(self):
""" check if below (A) is invisible or covered by above (B)
(considers shape with cropping and transparency)
"""
below, above = self.frame
if below.invisible():
return True
if above.invisible():
return False
bc = below.cropped()
ac = above.cropped()
# return if above is (semi-)transparent or covers below completely
return (above.alpha == 255 and
bc[L] >= ac[L] and
bc[T] >= ac[T] and
bc[R] <= ac[R] and
bc[B] <= ac[B])
def single(self):
""" check if above (B) is invisible
"""
below, above = self.frame
return above.invisible()
def both(self):
return not (single() or covered())
def add_swapped_targets(composites):
result = dict()
for c_name, c in composites.items():
if not (c.inter or c.noswap):
inc = True
for v_name, v in composites.items():
if v.equals(c.swapped(), True) and not v.inter:
inc = False
break
if inc:
log.debug("Adding auto-swapped target %s from %s" %
(swap_name(c_name), c_name))
r = c.swapped()
r.order = len(composites) + len(result)
result[swap_name(c_name)] = r
return composites.update(result)
def add_mirrored_composites(composites):
result = dict()
for c_name, c in composites.items():
if c.mirror:
r = c.mirrored()
r.order = len(composites) + len(result)
result[mirror_name(c_name)] = r
return composites.update(result)
def swap_name(name): return name[1:] if name[0] == '^' else "^" + name
def mirror_name(name): return name[1:] if name[0] == '|' else "|" + name
def absolute(str, max):
if str == '*':
assert max
# return maximum value
return int(max)
elif '.' in str:
assert max
# return absolute (Pixel) value in proportion to max
return int(float(str) * max)
else:
# return absolute (Pixel) value
return int(str)
def str2rect(str, size):
""" read rectangle pair from string '*', 'X/Y WxH', 'X/Y', 'WxH', 'X/Y WH', 'X/Y WH' or 'XY WH'
"""
# check for '*'
if str == "*":
# return overall position and size
return [0, 0, size[X], size[Y]]
# check for 'X/Y'
r = re.match(r'^\s*([-.\d]+)\s*/\s*([-.\d]+)\s*$', str)
if r:
# return X,Y and overall size
return [absolute(r.group(1), size[X]),
absolute(r.group(2), size[Y]),
size[X],
size[Y]]
# check for 'WxH'
r = re.match(r'^\s*([.\d]+)\s*x\s*([.\d]+)\s*$', str)
if r:
# return overall pos and W,H
return [0,
0,
absolute(r.group(3), size[X]),
absolute(r.group(4), size[Y])]
# check for 'X/Y WxH'
r = re.match(
r'^\s*([-.\d]+)\s*/\s*([-.\d]+)\s+([.\d]+)\s*x\s*([.\d]+)\s*$', str)
if r:
# return X,Y,X+W,Y+H
return [absolute(r.group(1), size[X]),
absolute(r.group(2), size[Y]),
absolute(r.group(1), size[X]) + absolute(r.group(3), size[X]),
absolute(r.group(2), size[Y]) + absolute(r.group(4), size[Y])]
# check for 'XY WxH'
r = re.match(r'^\s*(-?\d+.\d+)\s+([.\d]+)\s*x\s*([.\d]+)\s*$', str)
if r:
# return XY,XY,XY+W,XY+H
return [absolute(r.group(1), size[X]),
absolute(r.group(1), size[Y]),
absolute(r.group(1), size[X]) + absolute(r.group(2), size[X]),
absolute(r.group(1), size[Y]) + absolute(r.group(3), size[Y])]
# check for 'X/Y WH'
r = re.match(r'^\s*([-.\d]+)\s*/\s*([-.\d]+)\s+(\d+.\d+)\s*$', str)
if r:
# return X,Y,X+WH,Y+WH
return [absolute(r.group(1), size[X]),
absolute(r.group(2), size[Y]),
absolute(r.group(1), size[X]) + absolute(r.group(3), size[X]),
absolute(r.group(2), size[Y]) + absolute(r.group(3), size[Y])]
# check for 'XY WH'
r = re.match(r'^\s*(-?\d+.\d+)\s+(\d+.\d+)\s*$', str)
if r:
# return XY,XY,XY+WH,XY+WH
return [absolute(r.group(1), size[X]),
absolute(r.group(1), size[Y]),
absolute(r.group(1), size[X]) + absolute(r.group(2), size[X]),
absolute(r.group(1), size[Y]) + absolute(r.group(2), size[Y])]
# didn't get it
raise RuntimeError("syntax error in rectangle value '{}' "
"(must be either '*', 'X/Y WxH', 'X/Y', 'WxH', 'X/Y WH', 'X/Y WH' or 'XY WH' where X, Y, W, H may be int or float and XY, WH must be float)".format(str))
def str2crop(str, size):
""" read crop values pair from string '*' or 'L/T/R/B'
"""
# check for '*'
if str == "*":
# return zero borders
return [0, 0, 0, 0]
# check for L/T/R/B
r = re.match(
r'^\s*([.\d]+)\s*/\s*([.\d]+)\s*/\s*([.\d]+)\s*/\s*([.\d]+)\s*$', str)
if r:
return [absolute(r.group(1), size[X]),
absolute(r.group(2), size[Y]),
absolute(r.group(3), size[X]),
absolute(r.group(4), size[Y])]
# check for LR/TB
r = re.match(
r'^\s*([.\d]+)\s*/\s*([.\d]+)\s*$', str)
if r:
return [absolute(r.group(1), size[X]),
absolute(r.group(2), size[Y]),
absolute(r.group(1), size[X]),
absolute(r.group(2), size[Y])]
# check for LTRB
r = re.match(
r'^\s*([.\d]+)\s*$', str)
if r:
return [absolute(r.group(1), size[X]),
absolute(r.group(1), size[Y]),
absolute(r.group(1), size[X]),
absolute(r.group(1), size[Y])]
# didn't get it
raise RuntimeError("syntax error in crop value '{}' "
"(must be either '*', 'L/T/R/B', 'LR/TB', 'LTRB' where L, T, R, B, LR/TB and LTRB must be int or float')".format(str))
def str2alpha(str):
""" read alpha values from string as float between 0.0 and 1.0 or as int between 0 an 255
"""
# check for floating point value
r = re.match(
r'^\s*([.\d]+)\s*$', str)
if r:
# return absolute proportional to 255
return absolute(r.group(1), 255)
# didn't get it
raise RuntimeError("syntax error in alpha value '{}' "
"(must be float or int)".format(str))
| mit |
mahmoud/wapiti | wapiti/operations/utils.py | 1 | 12249 | # -*- coding: utf-8 -*-
import sys
from heapq import heappush, heappop
import itertools
from functools import total_ordering
def is_scalar(obj):
return not hasattr(obj, '__iter__') or isinstance(obj, basestring)
def prefixed(arg, prefix=None):
if prefix and not arg.startswith(prefix):
arg = prefix + arg
return arg
@total_ordering
class MaxInt(long):
"""
A quite-large integer type that tries to be like float('inf')
(Infinity), but can be used for slicing and other integer
operations. float('inf') is generally more correct, except that
mixing a float and integer in arithmetic operations will result in
a float, which will raise an error on slicing.
"""
def __new__(cls, *a, **kw):
return super(MaxInt, cls).__new__(cls, sys.maxint + 1)
def __init__(self, name='MAX'):
self._name = str(name)
def __repr__(self):
return self._name
def __str__(self):
return repr(self)
# TODO: better math
for func in ('__add__', '__sub__', '__mul__', '__floordiv__', '__div__',
'__mod__', '__divmod__', '__pow__', '__lshift__',
'__rshift__'):
locals()[func] = lambda self, other: self
def __gt__(self, other):
return not self == other
def __eq__(self, other):
return isinstance(other, MaxInt)
def __int__(self):
return self
class OperationExample(object):
"""
Sort of like a partial, but specialer.
# other types of tests?
"""
def __init__(self,
param=None,
limit=None,
op_type=None,
**kw):
self.op_type = op_type
self.param = param
self.limit = limit
self.doc = kw.pop('doc', '')
self.test = kw.pop('test', None)
# test defaults to limit_equal_or_depleted in test_ops.py
if kw:
raise TypeError('got unexpected keyword arguments: %r' % kw)
@property
def op_name(self):
if self.op_type is None:
return None
return self.op_type.__name__
@property
def disp_name(self):
if not self.op_type:
return '(unbound OperationExample)'
tmpl = '%(type)s(%(param)r, limit=%(limit)s)'
if self.op_type.input_field is None:
tmpl = '%(type)s(limit=%(limit)s)'
return tmpl % {'type': self.op_type.__name__,
'param': self.param,
'limit': self.limit}
def bind_op_type(self, op_type):
if self.op_type is None:
self.op_type = op_type
if self.limit is None:
try:
pql = op_type.per_query_limit
except AttributeError:
pql = op_type.subop_chain[0].per_query_limit
self.limit = pql.get_limit()
return
def make_op(self, mag=None):
if not self.op_type:
raise TypeError('no Operation type assigned')
mag = int(mag or 1)
limit = self.limit * mag
if self.op_type.input_field is None:
return self.op_type(limit=limit)
return self.op_type(self.param, limit=limit)
def __repr__(self):
cn = self.__class__.__name__
kwargs = ['param', 'limit', 'test', 'doc']
kw_parts = ['op_type=%s' % self.op_name]
vals = [getattr(self, a) for a in kwargs if getattr(self, a)]
kw_parts.extend(['%s=%r' % (a, v) for a, v in zip(kwargs, vals)])
kwarg_str = ', '.join(kw_parts)
return '%s(%s)' % (cn, kwarg_str)
__str__ = __repr__
"""
TypeWrapper and MetaTypeWrapper are a pair of what are technically
metaclasses, but really just a very overwrought way of enabling
customized versions of types floating around in some
locations. Because Wapiti is a DSL, but also just a bunch of Python,
we have to deal with the fact that if you modify a type/class, it will
be modified everywhere that references it.
TL;DR: This overblown thing lets Operations use something like
Prioritized(GetCategory, key='total_count'), which sets a priority for
better queueing, without modifying the GetCategory Operation
itself. (Different operations will want to prioritiez different
things.)
(There is almost certainly a better way, but this was a bit of
fun. Ever made an object that is an instance and a subclass of
itself?)
"""
def make_type_wrapper(name, init_args=None):
init_args = init_args or []
args, defaults = [], {}
for ia in init_args:
try:
arg, _default = ia
defaults[arg] = _default
except ValueError:
arg = ia
if not isinstance(arg, basestring):
raise TypeError('expected string arg name, not %r' % arg)
args.append(arg)
attrs = {'_args': args, '_defaults': defaults}
return WrapperType(str(name), (Wrapper,), attrs)
class WrapperType(type):
@property
def _repr_args(self):
ret = []
for a in self._args:
try:
ret.append((a, self._defaults[a]))
except KeyError:
ret.append(a)
return ret
def __repr__(cls):
name, cname = cls.__name__, cls.__class__.__name__
if cls._repr_args:
return '%s(%r, %r)' % (cname, name, cls._repr_args)
else:
return '%s(%r)' % (cname, name)
class Wrapper(object):
__metaclass__ = WrapperType
_args, _defaults = [], {}
def __init__(self, to_wrap, *args, **kwargs):
wrapped_dict = {}
if isinstance(to_wrap, Wrapper):
wrapped_dict = dict(to_wrap._wrapped_dict)
to_wrap = to_wrap._wrapped
self.__dict__['_wrapped'] = to_wrap
self.__dict__['_wrapped_dict'] = wrapped_dict
cn = self.__name__
for arg_i, arg_name in enumerate(self._args):
try:
val = args[arg_i]
if arg_name in kwargs:
raise TypeError('%s got multiple values for arg %r'
% (cn, arg_name))
except IndexError:
try:
val = kwargs.pop(arg_name)
except KeyError:
try:
val = self._defaults[arg_name]
except KeyError:
raise TypeError('%s expected required arg %r'
% (cn, arg_name))
setattr(self, arg_name, val)
return
def __repr__(self):
kv = ', '.join(['%s=%r' % (k, v) for k, v
in self._wrapped_dict.items()])
tmpl = "<wrapped %r (%s)>"
return tmpl % (self._wrapped, kv)
def __getattr__(self, name):
return getattr(self._wrapped, name)
def __setattr__(self, name, val):
super(Wrapper, self).__setattr__(name, val)
self._wrapped_dict[name] = val
def __delattr__(self, name, val):
super(Wrapper, self).__delattr__(name, val)
self._wrapped_dict.pop(name, None)
def __call__(self, *a, **kw):
return self._wrapped(*a, **kw)
REMOVED = '<removed-task>'
class PriorityQueue(object):
"""
Real quick type based on the heapq docs.
"""
def __init__(self):
self._pq = []
self._entry_map = {}
self.counter = itertools.count()
def add(self, task, priority=None):
# larger numbers = higher priority
priority = -int(priority or 0)
if task in self._entry_map:
self.remove_task(task)
count = next(self.counter)
entry = [priority, count, task]
self._entry_map[task] = entry
heappush(self._pq, entry)
def remove(self, task):
entry = self._entry_map.pop(task)
entry[-1] = REMOVED
def _cull(self):
while self._pq:
priority, count, task = self._pq[0]
if task is REMOVED:
heappop(self._pq)
continue
return
raise IndexError('empty priority queue')
def peek(self, default=REMOVED):
try:
self._cull()
_, _, task = self._pq[0]
except IndexError:
if default is not REMOVED:
return default
raise IndexError('peek on empty queue')
return task
def pop(self, default=REMOVED):
try:
self._cull()
_, _, task = heappop(self._pq)
del self._entry_map[task]
except IndexError:
if default is not REMOVED:
return default
raise IndexError('pop on empty queue')
return task
def __len__(self):
return len(self._entry_map)
def chunked_iter(src, size, **kw):
"""
Generates 'size'-sized chunks from 'src' iterable. Unless
the optional 'fill' keyword argument is provided, iterables
not even divisible by 'size' will have a final chunk that is
smaller than 'size'.
Note that fill=None will in fact use None as the fill value.
>>> list(chunked_iter(range(10), 3))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> list(chunked_iter(range(10), 3, fill=None))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, None, None]]
"""
size = int(size)
if size <= 0:
raise ValueError('expected a positive integer chunk size')
do_fill = True
try:
fill_val = kw.pop('fill')
except KeyError:
do_fill = False
fill_val = None
if kw:
raise ValueError('got unexpected keyword arguments: %r' % kw.keys())
if not src:
return
cur_chunk = []
i = 0
for item in src:
cur_chunk.append(item)
i += 1
if i % size == 0:
yield cur_chunk
cur_chunk = []
if cur_chunk:
if do_fill:
lc = len(cur_chunk)
cur_chunk[lc:] = [fill_val] * (size - lc)
yield cur_chunk
return
# From http://en.wikipedia.org/wiki/Wikipedia:Namespace
NAMESPACES = {
'Main': 0,
'Talk': 1,
'User': 2,
'User talk': 3,
'Wikipedia': 4,
'Wikipedia talk': 5,
'File': 6,
'File talk': 7,
'MediaWiki': 8,
'MediaWiki talk': 9,
'Template': 10,
'Template talk': 11,
'Help': 12,
'Help talk': 13,
'Category': 14,
'Category talk': 15,
'Portal': 100,
'Portal talk': 101,
'Book': 108,
'Book talk': 109,
'Special': -1,
'Media': -2}
def bucketize(src, keyfunc=None):
"""
Group values in 'src' iterable by value returned by 'keyfunc'.
keyfunc defaults to bool, which will group the values by
truthiness; at most there will be two keys, True and False, and
each key will have a list with at least one item.
>>> bucketize(range(5))
{False: [0], True: [1, 2, 3, 4]}
>>> is_odd = lambda x: x % 2 == 1
>>> bucketize(range(5), is_odd)
{False: [0, 2, 4], True: [1, 3]}
Value lists are not deduplicated:
>>> bucketize([None, None, None, 'hello'])
{False: [None, None, None], True: ['hello']}
"""
if not is_iterable(src):
raise TypeError('expected an iterable')
if keyfunc is None:
keyfunc = bool
if not callable(keyfunc):
raise TypeError('expected callable key function')
ret = {}
for val in src:
key = keyfunc(val)
ret.setdefault(key, []).append(val)
return ret
def bucketize_bool(src, keyfunc=None):
"""
Like bucketize, but for added convenience returns a tuple of
(truthy_values, falsy_values).
>>> nonempty, empty = bucketize_bool(['', '', 'hi', '', 'bye'])
>>> nonempty
['hi', 'bye']
keyfunc defaults to bool, but can be carefully overridden to
use any function that returns either True or False.
>>> import string
>>> is_digit = lambda x: x in string.digits
>>> decimal_digits, hexletters = bucketize_bool(string.hexdigits, is_digit)
>>> ''.join(decimal_digits), ''.join(hexletters)
('0123456789', 'abcdefABCDEF')
"""
bucketized = bucketize(src, keyfunc)
return bucketized.get(True, []), bucketized.get(False, [])
def coerce_namespace(ns_arg):
ns_str = str(ns_arg).capitalize()
return NAMESPACES.get(ns_str, ns_str)
| bsd-3-clause |
vklap/demo-ddd-python | run.py | 1 | 1665 | from app import app, init_db
if __name__ == '__main__':
from app.domain.entry import Entry
from app.infrastructure.repositories import entries as entries_repo
init_db()
entry1 = Entry(title='Domain Driven Design', text='Tackling complexity in the Heart of Software. The software development community widely acknowledges that domain modeling is central to software design. Through domain models, software developers are able to express rich functionality and translate it into a software implementation that truly serves the needs of its users. But despite its obvious importance, there are few practical resources that explain how to incorporate effective domain modeling into the software development process.')
entry2 = Entry(title='Design Patterns: Elements of Reusable Object-Oriented Software', text="Design Patterns is a modern classic in the literature of object-oriented development, offering timeless and elegant solutions to common problems in software design. It describes patterns for managing object creation, composing objects into larger structures, and coordinating control flow between objects. The book provides numerous examples where using composition rather than inheritance can improve the reusability and flexibility of code. Note, though, that it's not a tutorial but a catalog that you can use to find an object-oriented design pattern that's appropriate for the needs of your particular application--a selection for virtuoso programmers who appreciate (or require) consistent, well-engineered object-oriented designs.")
entries_repo.add_entry(entry1)
entries_repo.add_entry(entry2)
app.run(debug=True) | mit |
shepdelacreme/ansible | lib/ansible/plugins/action/dellos9.py | 21 | 4061 | #
# (c) 2016 Red Hat Inc.
#
# Copyright (c) 2017 Dell Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.module_utils.network.common.utils import load_provider
from ansible.module_utils.network.dellos9.dellos9 import dellos9_provider_spec
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
socket_path = None
if self._play_context.connection == 'network_cli':
provider = self._task.args.get('provider', {})
if any(provider.values()):
display.warning('provider is unnecessary when using network_cli and will be ignored')
del self._task.args['provider']
elif self._play_context.connection == 'local':
provider = load_provider(dellos9_provider_spec, self._task.args)
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'dellos9'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
command_timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
pc.become = provider['authorize'] or False
if pc.become:
pc.become_method = 'enable'
pc.become_pass = provider['auth_pass']
display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
connection.set_options(direct={'persistent_command_timeout': command_timeout})
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
task_vars['ansible_socket'] = socket_path
# make sure we are in the right cli context which should be
# enable mode and not config module
if socket_path is None:
socket_path = self._connection.socket_path
conn = Connection(socket_path)
out = conn.get_prompt()
while to_text(out, errors='surrogate_then_replace').strip().endswith(')#'):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
conn.send_command('exit')
out = conn.get_prompt()
result = super(ActionModule, self).run(task_vars=task_vars)
return result
| gpl-3.0 |
w1ll1am23/home-assistant | homeassistant/components/rest/__init__.py | 5 | 5796 | """The rest component."""
import asyncio
import logging
import httpx
import voluptuous as vol
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.const import (
CONF_AUTHENTICATION,
CONF_HEADERS,
CONF_METHOD,
CONF_PARAMS,
CONF_PASSWORD,
CONF_PAYLOAD,
CONF_RESOURCE,
CONF_RESOURCE_TEMPLATE,
CONF_SCAN_INTERVAL,
CONF_TIMEOUT,
CONF_USERNAME,
CONF_VERIFY_SSL,
HTTP_DIGEST_AUTHENTICATION,
SERVICE_RELOAD,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import discovery
from homeassistant.helpers.entity_component import (
DEFAULT_SCAN_INTERVAL,
EntityComponent,
)
from homeassistant.helpers.reload import async_reload_integration_platforms
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import COORDINATOR, DOMAIN, PLATFORM_IDX, REST, REST_DATA, REST_IDX
from .data import RestData
from .schema import CONFIG_SCHEMA # noqa: F401
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["binary_sensor", "notify", "sensor", "switch"]
COORDINATOR_AWARE_PLATFORMS = [SENSOR_DOMAIN, BINARY_SENSOR_DOMAIN]
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the rest platforms."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
_async_setup_shared_data(hass)
async def reload_service_handler(service):
"""Remove all user-defined groups and load new ones from config."""
conf = await component.async_prepare_reload()
if conf is None:
return
await async_reload_integration_platforms(hass, DOMAIN, PLATFORMS)
_async_setup_shared_data(hass)
await _async_process_config(hass, conf)
hass.services.async_register(
DOMAIN, SERVICE_RELOAD, reload_service_handler, schema=vol.Schema({})
)
return await _async_process_config(hass, config)
@callback
def _async_setup_shared_data(hass: HomeAssistant):
"""Create shared data for platform config and rest coordinators."""
hass.data[DOMAIN] = {key: [] for key in [REST_DATA, *COORDINATOR_AWARE_PLATFORMS]}
async def _async_process_config(hass, config) -> bool:
"""Process rest configuration."""
if DOMAIN not in config:
return True
refresh_tasks = []
load_tasks = []
for rest_idx, conf in enumerate(config[DOMAIN]):
scan_interval = conf.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
resource_template = conf.get(CONF_RESOURCE_TEMPLATE)
rest = create_rest_data_from_config(hass, conf)
coordinator = _rest_coordinator(hass, rest, resource_template, scan_interval)
refresh_tasks.append(coordinator.async_refresh())
hass.data[DOMAIN][REST_DATA].append({REST: rest, COORDINATOR: coordinator})
for platform_domain in COORDINATOR_AWARE_PLATFORMS:
if platform_domain not in conf:
continue
for platform_conf in conf[platform_domain]:
hass.data[DOMAIN][platform_domain].append(platform_conf)
platform_idx = len(hass.data[DOMAIN][platform_domain]) - 1
load = discovery.async_load_platform(
hass,
platform_domain,
DOMAIN,
{REST_IDX: rest_idx, PLATFORM_IDX: platform_idx},
config,
)
load_tasks.append(load)
if refresh_tasks:
await asyncio.gather(*refresh_tasks)
if load_tasks:
await asyncio.gather(*load_tasks)
return True
async def async_get_config_and_coordinator(hass, platform_domain, discovery_info):
"""Get the config and coordinator for the platform from discovery."""
shared_data = hass.data[DOMAIN][REST_DATA][discovery_info[REST_IDX]]
conf = hass.data[DOMAIN][platform_domain][discovery_info[PLATFORM_IDX]]
coordinator = shared_data[COORDINATOR]
rest = shared_data[REST]
if rest.data is None:
await coordinator.async_request_refresh()
return conf, coordinator, rest
def _rest_coordinator(hass, rest, resource_template, update_interval):
"""Wrap a DataUpdateCoordinator around the rest object."""
if resource_template:
async def _async_refresh_with_resource_template():
rest.set_url(resource_template.async_render(parse_result=False))
await rest.async_update()
update_method = _async_refresh_with_resource_template
else:
update_method = rest.async_update
return DataUpdateCoordinator(
hass,
_LOGGER,
name="rest data",
update_method=update_method,
update_interval=update_interval,
)
def create_rest_data_from_config(hass, config):
"""Create RestData from config."""
resource = config.get(CONF_RESOURCE)
resource_template = config.get(CONF_RESOURCE_TEMPLATE)
method = config.get(CONF_METHOD)
payload = config.get(CONF_PAYLOAD)
verify_ssl = config.get(CONF_VERIFY_SSL)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
headers = config.get(CONF_HEADERS)
params = config.get(CONF_PARAMS)
timeout = config.get(CONF_TIMEOUT)
if resource_template is not None:
resource_template.hass = hass
resource = resource_template.async_render(parse_result=False)
if username and password:
if config.get(CONF_AUTHENTICATION) == HTTP_DIGEST_AUTHENTICATION:
auth = httpx.DigestAuth(username, password)
else:
auth = (username, password)
else:
auth = None
return RestData(
hass, method, resource, auth, headers, params, payload, verify_ssl, timeout
)
| apache-2.0 |
monuszko/django-anothercrm | anothercrm/models.py | 1 | 5607 | from django.db import models
from django.utils.translation import ugettext_lazy as _
class Person(models.Model):
SEX_CHOICES = (
('M', 'Male'),
('F', 'Female'),
)
#TODO: validators for name, mobile...
firstname = models.CharField(max_length=30)
lastname = models.CharField(max_length=30)
sex = models.CharField(max_length=1, choices=SEX_CHOICES)
email = models.EmailField(
max_length=200, verbose_name=_('Email address'), blank=True)
mobile = models.CharField(
max_length=20, verbose_name=_('Mobile Phone Number'), blank=True)
address = models.CharField(max_length=100, verbose_name=_('Address'),
help_text=_('24 Badger Rd., etc.'), blank=True)
zipcode = models.CharField(max_length=10, verbose_name=_('Postal code'),
help_text=_("For example, '80-209' in Poland"), blank=True)
city = models.CharField(max_length=100, verbose_name=_('City'), blank=True)
state = models.CharField(
max_length=100, verbose_name=_('State'), blank=True)
country = models.CharField(
max_length=2, verbose_name=_('Country'), blank=True)
creation_date = models.DateTimeField(
verbose_name=_('Creation Date'), auto_now_add=True)
modification_date = models.DateTimeField(
verbose_name=_('Modification Date'), auto_now=True)
def get_absolute_url(self):
from django.core.urlresolvers import reverse
from django.utils.text import slugify
fname = slugify(self.firstname)
lname = slugify(self.lastname)
kwargs = {
'firstname': fname,
'lastname': lname,
'pk': self.id,
}
return reverse('anothercrm:person', kwargs=kwargs)
def __unicode__(self):
return u'{0} {1}'.format(self.firstname, self.lastname)
def employee_count(self):
'''
Returns the number of relationships where the person
is employed at a company.
'''
return self.relationship_set.filter(relatype__category='E').count()
def client_count(self):
'''
Returns the number of relationships where the person
is a cliento of a company.
'''
return self.relationship_set.filter(relatype__category='C').count()
def company_names(self):
'''
Returns the names of companies the person is involved with.
'''
return ', '.join(self.relationship_set.all().values_list(
'company__name', flat=True))
def employee_relationships(self):
'''
Returns the number of relationships where the person
is employed at a company.
'''
return self.relationship_set.filter(relatype__category='E')
def client_relationships(self):
'''
Returns the number of relationships where the person
is a cliento of a company.
'''
return self.relationship_set.filter(relatype__category='C')
class Trade(models.Model):
name = models.CharField(max_length=100, unique=True,
help_text="the industry the company is in.")
def __unicode__(self):
return self.name
class Company(models.Model):
name = models.CharField(max_length=100)
mission = models.TextField(blank=True, default="To make money.")
trades = models.ManyToManyField(Trade, blank=True)
def __unicode__(self):
return self.name
def get_absolute_url(self):
#TODO: ask on IRC about these imports
from django.core.urlresolvers import reverse
from django.utils.text import slugify
slug = slugify(self.name)
return reverse(
'anothercrm:company', kwargs={'name': slug, 'pk': self.id})
def get_trades(self):
return ', '.join(tr.name for tr in self.trades.all())
get_trades.short_description='Trade(s)'
get_trades.admin_order_field='trades'
def employees_by_position(self):
'''
Returns Relations with employees - not Persons.
'''
return self.relationship_set.filter(
relatype__category='E').order_by('relatype__name')
def clients_by_type(self):
'''
Returns Relations with clients, agents etc - not Persons.
'''
return self.relationship_set.filter(
relatype__category='C').order_by('relatype__name')
class Meta:
verbose_name_plural = _('companies')
class RelationshipType(models.Model):
CATEGORY_CHOICES = (
('E', 'Employee'),
('C', 'Client'),
)
category = models.CharField(max_length=1, choices=CATEGORY_CHOICES)
name = models.CharField(max_length=50, unique=True,
help_text=("For employees, this is position. For customers, it can"
" be 'regular customer', etc."))
notes = models.TextField(blank=True)
def __unicode__(self):
return u'{0} ({1})'.format(self.name, self.get_category_display())
class Relationship(models.Model):
relatype = models.ForeignKey(RelationshipType,
verbose_name=_('relationship type'))
company = models.ForeignKey(Company)
person = models.ForeignKey(Person)
def __unicode__(self):
return u'{0} {1} {2} {3}'.format(self.person.firstname,
self.person.lastname, self.relatype, self.company)
| agpl-3.0 |
edmorley/django | tests/serializers/models/data.py | 28 | 7612 | """
******** Models for test_data.py ***********
The following classes are for testing basic data marshalling, including
NULL values, where allowed.
The basic idea is to have a model for each Django data type.
"""
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
from .base import BaseModel
class BinaryData(models.Model):
data = models.BinaryField(null=True)
class BooleanData(models.Model):
data = models.BooleanField(default=False)
class CharData(models.Model):
data = models.CharField(max_length=30, null=True)
class DateData(models.Model):
data = models.DateField(null=True)
class DateTimeData(models.Model):
data = models.DateTimeField(null=True)
class DecimalData(models.Model):
data = models.DecimalField(null=True, decimal_places=3, max_digits=5)
class EmailData(models.Model):
data = models.EmailField(null=True)
class FileData(models.Model):
data = models.FileField(null=True)
class FilePathData(models.Model):
data = models.FilePathField(null=True)
class FloatData(models.Model):
data = models.FloatField(null=True)
class IntegerData(models.Model):
data = models.IntegerField(null=True)
class BigIntegerData(models.Model):
data = models.BigIntegerField(null=True)
# class ImageData(models.Model):
# data = models.ImageField(null=True)
class GenericIPAddressData(models.Model):
data = models.GenericIPAddressField(null=True)
class NullBooleanData(models.Model):
data = models.NullBooleanField(null=True)
class PositiveIntegerData(models.Model):
data = models.PositiveIntegerField(null=True)
class PositiveSmallIntegerData(models.Model):
data = models.PositiveSmallIntegerField(null=True)
class SlugData(models.Model):
data = models.SlugField(null=True)
class SmallData(models.Model):
data = models.SmallIntegerField(null=True)
class TextData(models.Model):
data = models.TextField(null=True)
class TimeData(models.Model):
data = models.TimeField(null=True)
class Tag(models.Model):
"""A tag on an item."""
data = models.SlugField()
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class Meta:
ordering = ["data"]
class GenericData(models.Model):
data = models.CharField(max_length=30)
tags = GenericRelation(Tag)
# The following test classes are all for validation
# of related objects; in particular, forward, backward,
# and self references.
class Anchor(models.Model):
"""This is a model that can be used as
something for other models to point at"""
data = models.CharField(max_length=30)
class Meta:
ordering = ('id',)
class UniqueAnchor(models.Model):
"""This is a model that can be used as
something for other models to point at"""
data = models.CharField(unique=True, max_length=30)
class FKData(models.Model):
data = models.ForeignKey(Anchor, models.SET_NULL, null=True)
class M2MData(models.Model):
data = models.ManyToManyField(Anchor)
class O2OData(models.Model):
# One to one field can't be null here, since it is a PK.
data = models.OneToOneField(Anchor, models.CASCADE, primary_key=True)
class FKSelfData(models.Model):
data = models.ForeignKey('self', models.CASCADE, null=True)
class M2MSelfData(models.Model):
data = models.ManyToManyField('self', symmetrical=False)
class FKDataToField(models.Model):
data = models.ForeignKey(UniqueAnchor, models.SET_NULL, null=True, to_field='data')
class FKDataToO2O(models.Model):
data = models.ForeignKey(O2OData, models.SET_NULL, null=True)
class M2MIntermediateData(models.Model):
data = models.ManyToManyField(Anchor, through='Intermediate')
class Intermediate(models.Model):
left = models.ForeignKey(M2MIntermediateData, models.CASCADE)
right = models.ForeignKey(Anchor, models.CASCADE)
extra = models.CharField(max_length=30, blank=True, default="doesn't matter")
# The following test classes are for validating the
# deserialization of objects that use a user-defined
# field as the primary key.
# Some of these data types have been commented out
# because they can't be used as a primary key on one
# or all database backends.
class BooleanPKData(models.Model):
data = models.BooleanField(primary_key=True, default=False)
class CharPKData(models.Model):
data = models.CharField(max_length=30, primary_key=True)
class DatePKData(models.Model):
data = models.DateField(primary_key=True)
class DateTimePKData(models.Model):
data = models.DateTimeField(primary_key=True)
class DecimalPKData(models.Model):
data = models.DecimalField(primary_key=True, decimal_places=3, max_digits=5)
class EmailPKData(models.Model):
data = models.EmailField(primary_key=True)
# class FilePKData(models.Model):
# data = models.FileField(primary_key=True)
class FilePathPKData(models.Model):
data = models.FilePathField(primary_key=True)
class FloatPKData(models.Model):
data = models.FloatField(primary_key=True)
class IntegerPKData(models.Model):
data = models.IntegerField(primary_key=True)
# class ImagePKData(models.Model):
# data = models.ImageField(primary_key=True)
class GenericIPAddressPKData(models.Model):
data = models.GenericIPAddressField(primary_key=True)
# This is just a Boolean field with null=True, and we can't test a PK value of NULL.
# class NullBooleanPKData(models.Model):
# data = models.NullBooleanField(primary_key=True)
class PositiveIntegerPKData(models.Model):
data = models.PositiveIntegerField(primary_key=True)
class PositiveSmallIntegerPKData(models.Model):
data = models.PositiveSmallIntegerField(primary_key=True)
class SlugPKData(models.Model):
data = models.SlugField(primary_key=True)
class SmallPKData(models.Model):
data = models.SmallIntegerField(primary_key=True)
# class TextPKData(models.Model):
# data = models.TextField(primary_key=True)
# class TimePKData(models.Model):
# data = models.TimeField(primary_key=True)
class UUIDData(models.Model):
data = models.UUIDField(primary_key=True)
class FKToUUID(models.Model):
data = models.ForeignKey(UUIDData, models.CASCADE)
# Tests for handling fields with pre_save functions, or
# models with save functions that modify data
class AutoNowDateTimeData(models.Model):
data = models.DateTimeField(null=True, auto_now=True)
class ModifyingSaveData(models.Model):
data = models.IntegerField(null=True)
def save(self, *args, **kwargs):
"""
A save method that modifies the data in the object.
A user-defined save() method isn't called when objects are deserialized
(#4459).
"""
self.data = 666
super().save(*args, **kwargs)
# Tests for serialization of models using inheritance.
# Regression for #7202, #7350
class AbstractBaseModel(models.Model):
parent_data = models.IntegerField()
class Meta:
abstract = True
class InheritAbstractModel(AbstractBaseModel):
child_data = models.IntegerField()
class InheritBaseModel(BaseModel):
child_data = models.IntegerField()
class ExplicitInheritBaseModel(BaseModel):
parent = models.OneToOneField(BaseModel, models.CASCADE, parent_link=True)
child_data = models.IntegerField()
class LengthModel(models.Model):
data = models.IntegerField()
def __len__(self):
return self.data
| bsd-3-clause |
jaeilepp/eggie | mne/viz/_3d.py | 1 | 24122 | """Functions to make 3D plots with M/EEG data
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
#
# License: Simplified BSD
from ..externals.six import string_types, advance_iterator
from distutils.version import LooseVersion
import os
import inspect
import warnings
from itertools import cycle
import numpy as np
from scipy import linalg
from ..io.pick import pick_types
from ..surface import get_head_surf, get_meg_helmet_surf, read_surface
from ..transforms import read_trans, _find_trans, apply_trans
from ..utils import get_subjects_dir, logger, _check_subject
from .utils import mne_analyze_colormap, _prepare_trellis, COLORS
def plot_evoked_field(evoked, surf_maps, time=None, time_label='t = %0.0f ms',
n_jobs=1):
"""Plot MEG/EEG fields on head surface and helmet in 3D
Parameters
----------
evoked : instance of mne.Evoked
The evoked object.
surf_maps : list
The surface mapping information obtained with make_field_map.
time : float | None
The time point at which the field map shall be displayed. If None,
the average peak latency (across sensor types) is used.
time_label : str
How to print info about the time instant visualized.
n_jobs : int
Number of jobs to eggie in parallel.
Returns
-------
fig : instance of mlab.Figure
The mayavi figure.
"""
types = [t for t in ['eeg', 'grad', 'mag'] if t in evoked]
time_idx = None
if time is None:
time = np.mean([evoked.get_peak(ch_type=t)[1] for t in types])
if not evoked.times[0] <= time <= evoked.times[-1]:
raise ValueError('`time` (%0.3f) must be inside `evoked.times`' % time)
time_idx = np.argmin(np.abs(evoked.times - time))
types = [sm['kind'] for sm in surf_maps]
# Plot them
from mayavi import mlab
alphas = [1.0, 0.5]
colors = [(0.6, 0.6, 0.6), (1.0, 1.0, 1.0)]
colormap = mne_analyze_colormap(format='mayavi')
colormap_lines = np.concatenate([np.tile([0., 0., 255., 255.], (127, 1)),
np.tile([0., 0., 0., 255.], (2, 1)),
np.tile([255., 0., 0., 255.], (127, 1))])
fig = mlab.figure(bgcolor=(0.0, 0.0, 0.0), size=(600, 600))
for ii, this_map in enumerate(surf_maps):
surf = this_map['surf']
map_data = this_map['data']
map_type = this_map['kind']
map_ch_names = this_map['ch_names']
if map_type == 'eeg':
pick = pick_types(evoked.info, meg=False, eeg=True)
else:
pick = pick_types(evoked.info, meg=True, eeg=False, ref_meg=False)
ch_names = [evoked.ch_names[k] for k in pick]
set_ch_names = set(ch_names)
set_map_ch_names = set(map_ch_names)
if set_ch_names != set_map_ch_names:
message = ['Channels in map and data do not match.']
diff = set_map_ch_names - set_ch_names
if len(diff):
message += ['%s not in data file. ' % list(diff)]
diff = set_ch_names - set_map_ch_names
if len(diff):
message += ['%s not in map file.' % list(diff)]
raise RuntimeError(' '.join(message))
data = np.dot(map_data, evoked.data[pick, time_idx])
x, y, z = surf['rr'].T
nn = surf['nn']
# make absolutely sure these are normalized for Mayavi
nn = nn / np.sum(nn * nn, axis=1)[:, np.newaxis]
# Make a solid surface
vlim = np.max(np.abs(data))
alpha = alphas[ii]
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'])
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
mlab.pipeline.surface(mesh, color=colors[ii], opacity=alpha)
# Now show our field pattern
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'],
scalars=data)
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
with warnings.catch_warnings(record=True): # traits
fsurf = mlab.pipeline.surface(mesh, vmin=-vlim, vmax=vlim)
fsurf.module_manager.scalar_lut_manager.lut.table = colormap
# And the field lines on top
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'],
scalars=data)
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
with warnings.catch_warnings(record=True): # traits
cont = mlab.pipeline.contour_surface(mesh, contours=21,
line_width=1.0,
vmin=-vlim, vmax=vlim,
opacity=alpha)
cont.module_manager.scalar_lut_manager.lut.table = colormap_lines
if '%' in time_label:
time_label %= (1e3 * evoked.times[time_idx])
mlab.text(0.01, 0.01, time_label, width=0.4)
mlab.view(10, 60)
return fig
def _plot_mri_contours(mri_fname, surf_fnames, orientation='coronal',
slices=None, show=True):
"""Plot BEM contours on anatomical slices.
Parameters
----------
mri_fname : str
The name of the file containing anatomical data.
surf_fnames : list of str
The filenames for the BEM surfaces in the format
['inner_skull.surf', 'outer_skull.surf', 'outer_skin.surf'].
orientation : str
'coronal' or 'transverse' or 'sagittal'
slices : list of int
Slice indices.
show : bool
Call pyplot.show() at the end.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
"""
import matplotlib.pyplot as plt
import nibabel as nib
if orientation not in ['coronal', 'axial', 'sagittal']:
raise ValueError("Orientation must be 'coronal', 'axial' or "
"'sagittal'. Got %s." % orientation)
# Load the T1 data
nim = nib.load(mri_fname)
data = nim.get_data()
affine = nim.get_affine()
n_sag, n_axi, n_cor = data.shape
orientation_name2axis = dict(sagittal=0, axial=1, coronal=2)
orientation_axis = orientation_name2axis[orientation]
if slices is None:
n_slices = data.shape[orientation_axis]
slices = np.linspace(0, n_slices, 12, endpoint=False).astype(np.int)
# create of list of surfaces
surfs = list()
trans = linalg.inv(affine)
# XXX : next line is a hack don't ask why
trans[:3, -1] = [n_sag // 2, n_axi // 2, n_cor // 2]
for surf_fname in surf_fnames:
surf = dict()
surf['rr'], surf['tris'] = read_surface(surf_fname)
# move back surface to MRI coordinate system
surf['rr'] = nib.affines.apply_affine(trans, surf['rr'])
surfs.append(surf)
fig, axs = _prepare_trellis(len(slices), 4)
for ax, sl in zip(axs, slices):
# adjust the orientations for good view
if orientation == 'coronal':
dat = data[:, :, sl].transpose()
elif orientation == 'axial':
dat = data[:, sl, :]
elif orientation == 'sagittal':
dat = data[sl, :, :]
# First plot the anatomical data
ax.imshow(dat, cmap=plt.cm.gray)
ax.axis('off')
# and then plot the contours on top
for surf in surfs:
if orientation == 'coronal':
ax.tricontour(surf['rr'][:, 0], surf['rr'][:, 1],
surf['tris'], surf['rr'][:, 2],
levels=[sl], colors='yellow', linewidths=2.0)
elif orientation == 'axial':
ax.tricontour(surf['rr'][:, 2], surf['rr'][:, 0],
surf['tris'], surf['rr'][:, 1],
levels=[sl], colors='yellow', linewidths=2.0)
elif orientation == 'sagittal':
ax.tricontour(surf['rr'][:, 2], surf['rr'][:, 1],
surf['tris'], surf['rr'][:, 0],
levels=[sl], colors='yellow', linewidths=2.0)
if show:
plt.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0.,
hspace=0.)
plt.show()
return fig
def plot_trans(info, trans_fname='auto', subject=None, subjects_dir=None,
ch_type=None, source='bem'):
"""Plot MEG/EEG head surface and helmet in 3D.
Parameters
----------
info : dict
The measurement info.
trans_fname : str | 'auto'
The full path to the `*-trans.fif` file produced during
coregistration.
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT.
subjects_dir : str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
ch_type : None | 'eeg' | 'meg'
If None, both the MEG helmet and EEG electrodes will be shown.
If 'meg', only the MEG helmet will be shown. If 'eeg', only the
EEG electrodes will be shown.
source : str
Type to load. Common choices would be `'bem'` or `'head'`. We first
try loading `'$SUBJECTS_DIR/$SUBJECT/bem/$SUBJECT-$SOURCE.fif'`, and
then look for `'$SUBJECT*$SOURCE.fif'` in the same directory. Defaults
to 'bem'. Note. For single layer bems it is recommended to use 'head'.
Returns
-------
fig : instance of mlab.Figure
The mayavi figure.
"""
if ch_type not in [None, 'eeg', 'meg']:
raise ValueError('Argument ch_type must be None | eeg | meg. Got %s.'
% ch_type)
if trans_fname == 'auto':
# let's try to do this in MRI coordinates so they're easy to plot
trans_fname = _find_trans(subject, subjects_dir)
trans = read_trans(trans_fname)
surfs = [get_head_surf(subject, source=source, subjects_dir=subjects_dir)]
if ch_type is None or ch_type == 'meg':
surfs.append(get_meg_helmet_surf(info, trans))
# Plot them
from mayavi import mlab
alphas = [1.0, 0.5]
colors = [(0.6, 0.6, 0.6), (0.0, 0.0, 0.6)]
fig = mlab.figure(bgcolor=(0.0, 0.0, 0.0), size=(600, 600))
for ii, surf in enumerate(surfs):
x, y, z = surf['rr'].T
nn = surf['nn']
# make absolutely sure these are normalized for Mayavi
nn = nn / np.sum(nn * nn, axis=1)[:, np.newaxis]
# Make a solid surface
alpha = alphas[ii]
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'])
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
mlab.pipeline.surface(mesh, color=colors[ii], opacity=alpha)
if ch_type is None or ch_type == 'eeg':
eeg_locs = [l['eeg_loc'][:, 0] for l in info['chs']
if l['eeg_loc'] is not None]
if len(eeg_locs) > 0:
eeg_loc = np.array(eeg_locs)
# Transform EEG electrodes to MRI coordinates
eeg_loc = apply_trans(trans['trans'], eeg_loc)
with warnings.catch_warnings(record=True): # traits
mlab.points3d(eeg_loc[:, 0], eeg_loc[:, 1], eeg_loc[:, 2],
color=(1.0, 0.0, 0.0), scale_factor=0.005)
else:
warnings.warn('EEG electrode locations not found. '
'Cannot plot EEG electrodes.')
mlab.view(90, 90)
return fig
def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh',
colormap='hot', time_label='time=%0.2f ms',
smoothing_steps=10, fmin=5., fmid=10., fmax=15.,
transparent=True, alpha=1.0, time_viewer=False,
config_opts={}, subjects_dir=None, figure=None,
views='lat', colorbar=True):
"""Plot SourceEstimates with PySurfer
Note: PySurfer currently needs the SUBJECTS_DIR environment variable,
which will automatically be set by this function. Plotting multiple
SourceEstimates with different values for subjects_dir will cause
PySurfer to use the wrong FreeSurfer surfaces when using methods of
the returned Brain object. It is therefore recommended to set the
SUBJECTS_DIR environment variable or always use the same value for
subjects_dir (within the same Python session).
Parameters
----------
stc : SourceEstimates
The source estimates to plot.
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT. If None stc.subject will be used. If that
is None, the environment will be used.
surface : str
The type of surface (inflated, white etc.).
hemi : str, 'lh' | 'rh' | 'split' | 'both'
The hemisphere to display. Using 'both' or 'split' requires
PySurfer version 0.4 or above.
colormap : str
The type of colormap to use.
time_label : str
How to print info about the time instant visualized.
smoothing_steps : int
The amount of smoothing
fmin : float
The minimum value to display.
fmid : float
The middle value on the colormap.
fmax : float
The maximum value for the colormap.
transparent : bool
If True, use a linear transparency between fmin and fmid.
alpha : float
Alpha value to apply globally to the overlay.
time_viewer : bool
Display time viewer GUI.
config_opts : dict
Keyword arguments for Brain initialization.
See pysurfer.viz.Brain.
subjects_dir : str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
figure : instance of mayavi.core.scene.Scene | list | int | None
If None, a new figure will be created. If multiple views or a
split view is requested, this must be a list of the appropriate
length. If int is provided it will be used to identify the Mayavi
figure by it's id or create a new figure with the given id.
views : str | list
View to use. See surfer.Brain().
colorbar : bool
If True, display colorbar on scene.
Returns
-------
brain : Brain
A instance of surfer.viz.Brain from PySurfer.
"""
import surfer
from surfer import Brain, TimeViewer
if hemi in ['split', 'both'] and LooseVersion(surfer.__version__) < '0.4':
raise NotImplementedError('hemi type "%s" not supported with your '
'version of pysurfer. Please upgrade to '
'version 0.4 or higher.' % hemi)
try:
import mayavi
from mayavi import mlab
except ImportError:
from enthought import mayavi
from enthought.mayavi import mlab
# import here to avoid circular import problem
from ..source_estimate import SourceEstimate
if not isinstance(stc, SourceEstimate):
raise ValueError('stc has to be a surface source estimate')
if hemi not in ['lh', 'rh', 'split', 'both']:
raise ValueError('hemi has to be either "lh", "rh", "split", '
'or "both"')
n_split = 2 if hemi == 'split' else 1
n_views = 1 if isinstance(views, string_types) else len(views)
if figure is not None:
# use figure with specified id or create new figure
if isinstance(figure, int):
figure = mlab.figure(figure, size=(600, 600))
# make sure it is of the correct type
if not isinstance(figure, list):
figure = [figure]
if not all([isinstance(f, mayavi.core.scene.Scene) for f in figure]):
raise TypeError('figure must be a mayavi scene or list of scenes')
# make sure we have the right number of figures
n_fig = len(figure)
if not n_fig == n_split * n_views:
raise RuntimeError('`figure` must be a list with the same '
'number of elements as PySurfer plots that '
'will be created (%s)' % n_split * n_views)
subjects_dir = get_subjects_dir(subjects_dir=subjects_dir)
subject = _check_subject(stc.subject, subject, False)
if subject is None:
if 'SUBJECT' in os.environ:
subject = os.environ['SUBJECT']
else:
raise ValueError('SUBJECT environment variable not set')
if hemi in ['both', 'split']:
hemis = ['lh', 'rh']
else:
hemis = [hemi]
title = subject if len(hemis) > 1 else '%s - %s' % (subject, hemis[0])
args = inspect.getargspec(Brain.__init__)[0]
kwargs = dict(title=title, figure=figure, config_opts=config_opts,
subjects_dir=subjects_dir)
if 'views' in args:
kwargs['views'] = views
else:
logger.info('PySurfer does not support "views" argument, please '
'consider updating to a newer version (0.4 or later)')
with warnings.catch_warnings(record=True): # traits warnings
brain = Brain(subject, hemi, surface, **kwargs)
for hemi in hemis:
hemi_idx = 0 if hemi == 'lh' else 1
if hemi_idx == 0:
data = stc.data[:len(stc.vertno[0])]
else:
data = stc.data[len(stc.vertno[0]):]
vertices = stc.vertno[hemi_idx]
time = 1e3 * stc.times
with warnings.catch_warnings(record=True): # traits warnings
brain.add_data(data, colormap=colormap, vertices=vertices,
smoothing_steps=smoothing_steps, time=time,
time_label=time_label, alpha=alpha, hemi=hemi,
colorbar=colorbar)
# scale colormap and set time (index) to display
brain.scale_data_colormap(fmin=fmin, fmid=fmid, fmax=fmax,
transparent=transparent)
if time_viewer:
TimeViewer(brain)
return brain
def plot_sparse_source_estimates(src, stcs, colors=None, linewidth=2,
fontsize=18, bgcolor=(.05, 0, .1),
opacity=0.2, brain_color=(0.7,) * 3,
show=True, high_resolution=False,
fig_name=None, fig_number=None, labels=None,
modes=['cone', 'sphere'],
scale_factors=[1, 0.6],
verbose=None, **kwargs):
"""Plot source estimates obtained with sparse solver
Active dipoles are represented in a "Glass" brain.
If the same source is active in multiple source estimates it is
displayed with a sphere otherwise with a cone in 3D.
Parameters
----------
src : dict
The source space.
stcs : instance of SourceEstimate or list of instances of SourceEstimate
The source estimates (up to 3).
colors : list
List of colors
linewidth : int
Line width in 2D plot.
fontsize : int
Font size.
bgcolor : tuple of length 3
Background color in 3D.
opacity : float in [0, 1]
Opacity of brain mesh.
brain_color : tuple of length 3
Brain color.
show : bool
Show figures if True.
fig_name :
Mayavi figure name.
fig_number :
Matplotlib figure number.
labels : ndarray or list of ndarrays
Labels to show sources in clusters. Sources with the same
label and the waveforms within each cluster are presented in
the same color. labels should be a list of ndarrays when
stcs is a list ie. one label for each stc.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
kwargs : kwargs
Keyword arguments to pass to mlab.triangular_mesh.
"""
if not isinstance(stcs, list):
stcs = [stcs]
if labels is not None and not isinstance(labels, list):
labels = [labels]
if colors is None:
colors = COLORS
linestyles = ['-', '--', ':']
# Show 3D
lh_points = src[0]['rr']
rh_points = src[1]['rr']
points = np.r_[lh_points, rh_points]
lh_normals = src[0]['nn']
rh_normals = src[1]['nn']
normals = np.r_[lh_normals, rh_normals]
if high_resolution:
use_lh_faces = src[0]['tris']
use_rh_faces = src[1]['tris']
else:
use_lh_faces = src[0]['use_tris']
use_rh_faces = src[1]['use_tris']
use_faces = np.r_[use_lh_faces, lh_points.shape[0] + use_rh_faces]
points *= 170
vertnos = [np.r_[stc.lh_vertno, lh_points.shape[0] + stc.rh_vertno]
for stc in stcs]
unique_vertnos = np.unique(np.concatenate(vertnos).ravel())
try:
from mayavi import mlab
except ImportError:
from enthought.mayavi import mlab
from matplotlib.colors import ColorConverter
color_converter = ColorConverter()
f = mlab.figure(figure=fig_name, bgcolor=bgcolor, size=(600, 600))
mlab.clf()
if mlab.options.backend != 'test':
f.scene.disable_render = True
with warnings.catch_warnings(record=True): # traits warnings
surface = mlab.triangular_mesh(points[:, 0], points[:, 1],
points[:, 2], use_faces,
color=brain_color,
opacity=opacity, **kwargs)
import matplotlib.pyplot as plt
# Show time courses
plt.figure(fig_number)
plt.clf()
colors = cycle(colors)
logger.info("Total number of active sources: %d" % len(unique_vertnos))
if labels is not None:
colors = [advance_iterator(colors) for _ in
range(np.unique(np.concatenate(labels).ravel()).size)]
for idx, v in enumerate(unique_vertnos):
# get indices of stcs it belongs to
ind = [k for k, vertno in enumerate(vertnos) if v in vertno]
is_common = len(ind) > 1
if labels is None:
c = advance_iterator(colors)
else:
# if vertex is in different stcs than take label from first one
c = colors[labels[ind[0]][vertnos[ind[0]] == v]]
mode = modes[1] if is_common else modes[0]
scale_factor = scale_factors[1] if is_common else scale_factors[0]
if (isinstance(scale_factor, (np.ndarray, list, tuple))
and len(unique_vertnos) == len(scale_factor)):
scale_factor = scale_factor[idx]
x, y, z = points[v]
nx, ny, nz = normals[v]
with warnings.catch_warnings(record=True): # traits
mlab.quiver3d(x, y, z, nx, ny, nz, color=color_converter.to_rgb(c),
mode=mode, scale_factor=scale_factor)
for k in ind:
vertno = vertnos[k]
mask = (vertno == v)
assert np.sum(mask) == 1
linestyle = linestyles[k]
plt.plot(1e3 * stc.times, 1e9 * stcs[k].data[mask].ravel(), c=c,
linewidth=linewidth, linestyle=linestyle)
plt.xlabel('Time (ms)', fontsize=18)
plt.ylabel('Source amplitude (nAm)', fontsize=18)
if fig_name is not None:
plt.title(fig_name)
if show:
plt.show()
surface.actor.property.backface_culling = True
surface.actor.property.shading = True
return surface
| bsd-2-clause |
mihail911/nupic | examples/prediction/experiments/confidenceTest/2/description.py | 17 | 2037 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Tests the following set of sequences:
z-a-b-c: (1X)
a-b-c: (6X)
a-d-e: (2X)
a-f-g-a-h: (1X)
We want to insure that when we see 'a', that we predict 'b' with highest
confidence, then 'd', then 'f' and 'h' with equally low confidence.
We expect the following prediction scores:
inputPredScore_at1 : 0.7
inputPredScore_at2 : 1.0
inputPredScore_at3 : 1.0
inputPredScore_at4 : 1.0
"""
from nupic.frameworks.prediction.helpers import importBaseDescription
config = dict(
sensorVerbosity=0,
spVerbosity=0,
tpVerbosity=0,
ppVerbosity=2,
filenameTrain = 'confidence/confidence2.csv',
filenameTest = 'confidence/confidence2.csv',
iterationCountTrain=None,
iterationCountTest=None,
trainTPRepeats = 5,
trainTP=True,
)
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| gpl-3.0 |
weiawe/django | scripts/manage_translations.py | 277 | 7141 | #!/usr/bin/env python
#
# This python file contains utility scripts to manage Django translations.
# It has to be run inside the django git root directory.
#
# The following commands are available:
#
# * update_catalogs: check for new strings in core and contrib catalogs, and
# output how much strings are new/changed.
#
# * lang_stats: output statistics for each catalog/language combination
#
# * fetch: fetch translations from transifex.com
#
# Each command support the --languages and --resources options to limit their
# operation to the specified language or resource. For example, to get stats
# for Spanish in contrib.admin, run:
#
# $ python scripts/manage_translations.py lang_stats --language=es --resources=admin
import os
from argparse import ArgumentParser
from subprocess import PIPE, Popen, call
from django.core.management import call_command
HAVE_JS = ['admin']
def _get_locale_dirs(resources, include_core=True):
"""
Return a tuple (contrib name, absolute path) for all locale directories,
optionally including the django core catalog.
If resources list is not None, filter directories matching resources content.
"""
contrib_dir = os.path.join(os.getcwd(), 'django', 'contrib')
dirs = []
# Collect all locale directories
for contrib_name in os.listdir(contrib_dir):
path = os.path.join(contrib_dir, contrib_name, 'locale')
if os.path.isdir(path):
dirs.append((contrib_name, path))
if contrib_name in HAVE_JS:
dirs.append(("%s-js" % contrib_name, path))
if include_core:
dirs.insert(0, ('core', os.path.join(os.getcwd(), 'django', 'conf', 'locale')))
# Filter by resources, if any
if resources is not None:
res_names = [d[0] for d in dirs]
dirs = [ld for ld in dirs if ld[0] in resources]
if len(resources) > len(dirs):
print("You have specified some unknown resources. "
"Available resource names are: %s" % (', '.join(res_names),))
exit(1)
return dirs
def _tx_resource_for_name(name):
""" Return the Transifex resource name """
if name == 'core':
return "django.core"
else:
return "django.contrib-%s" % name
def _check_diff(cat_name, base_path):
"""
Output the approximate number of changed/added strings in the en catalog.
"""
po_path = '%(path)s/en/LC_MESSAGES/django%(ext)s.po' % {
'path': base_path, 'ext': 'js' if cat_name.endswith('-js') else ''}
p = Popen("git diff -U0 %s | egrep '^[-+]msgid' | wc -l" % po_path,
stdout=PIPE, stderr=PIPE, shell=True)
output, errors = p.communicate()
num_changes = int(output.strip())
print("%d changed/added messages in '%s' catalog." % (num_changes, cat_name))
def update_catalogs(resources=None, languages=None):
"""
Update the en/LC_MESSAGES/django.po (main and contrib) files with
new/updated translatable strings.
"""
if resources is not None:
print("`update_catalogs` will always process all resources.")
contrib_dirs = _get_locale_dirs(None, include_core=False)
os.chdir(os.path.join(os.getcwd(), 'django'))
print("Updating en catalogs for Django and contrib apps...")
call_command('makemessages', locale=['en'])
print("Updating en JS catalogs for Django and contrib apps...")
call_command('makemessages', locale=['en'], domain='djangojs')
# Output changed stats
_check_diff('core', os.path.join(os.getcwd(), 'conf', 'locale'))
for name, dir_ in contrib_dirs:
_check_diff(name, dir_)
def lang_stats(resources=None, languages=None):
"""
Output language statistics of committed translation files for each
Django catalog.
If resources is provided, it should be a list of translation resource to
limit the output (e.g. ['core', 'gis']).
"""
locale_dirs = _get_locale_dirs(resources)
for name, dir_ in locale_dirs:
print("\nShowing translations stats for '%s':" % name)
langs = sorted([d for d in os.listdir(dir_) if not d.startswith('_')])
for lang in langs:
if languages and lang not in languages:
continue
# TODO: merge first with the latest en catalog
p = Popen("msgfmt -vc -o /dev/null %(path)s/%(lang)s/LC_MESSAGES/django%(ext)s.po" % {
'path': dir_, 'lang': lang, 'ext': 'js' if name.endswith('-js') else ''},
stdout=PIPE, stderr=PIPE, shell=True)
output, errors = p.communicate()
if p.returncode == 0:
# msgfmt output stats on stderr
print("%s: %s" % (lang, errors.strip()))
else:
print("Errors happened when checking %s translation for %s:\n%s" % (
lang, name, errors))
def fetch(resources=None, languages=None):
"""
Fetch translations from Transifex, wrap long lines, generate mo files.
"""
locale_dirs = _get_locale_dirs(resources)
errors = []
for name, dir_ in locale_dirs:
# Transifex pull
if languages is None:
call('tx pull -r %(res)s -a -f --minimum-perc=5' % {'res': _tx_resource_for_name(name)}, shell=True)
target_langs = sorted([d for d in os.listdir(dir_) if not d.startswith('_') and d != 'en'])
else:
for lang in languages:
call('tx pull -r %(res)s -f -l %(lang)s' % {
'res': _tx_resource_for_name(name), 'lang': lang}, shell=True)
target_langs = languages
# msgcat to wrap lines and msgfmt for compilation of .mo file
for lang in target_langs:
po_path = '%(path)s/%(lang)s/LC_MESSAGES/django%(ext)s.po' % {
'path': dir_, 'lang': lang, 'ext': 'js' if name.endswith('-js') else ''}
if not os.path.exists(po_path):
print("No %(lang)s translation for resource %(name)s" % {
'lang': lang, 'name': name})
continue
call('msgcat --no-location -o %s %s' % (po_path, po_path), shell=True)
res = call('msgfmt -c -o %s.mo %s' % (po_path[:-3], po_path), shell=True)
if res != 0:
errors.append((name, lang))
if errors:
print("\nWARNING: Errors have occurred in following cases:")
for resource, lang in errors:
print("\tResource %s for language %s" % (resource, lang))
exit(1)
if __name__ == "__main__":
RUNABLE_SCRIPTS = ('update_catalogs', 'lang_stats', 'fetch')
parser = ArgumentParser()
parser.add_argument('cmd', nargs=1)
parser.add_argument("-r", "--resources", action='append',
help="limit operation to the specified resources")
parser.add_argument("-l", "--languages", action='append',
help="limit operation to the specified languages")
options = parser.parse_args()
if options.cmd[0] in RUNABLE_SCRIPTS:
eval(options.cmd[0])(options.resources, options.languages)
else:
print("Available commands are: %s" % ", ".join(RUNABLE_SCRIPTS))
| bsd-3-clause |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/nltk/probability.py | 12 | 81647 | # -*- coding: utf-8 -*-
# Natural Language Toolkit: Probability and Statistics
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@csse.unimelb.edu.au> (additions)
# Trevor Cohn <tacohn@cs.mu.oz.au> (additions)
# Peter Ljunglöf <peter.ljunglof@heatherleaf.se> (additions)
# Liang Dong <ldong@clemson.edu> (additions)
# Geoffrey Sampson <sampson@cantab.net> (additions)
#
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
Classes for representing and processing probabilistic information.
The ``FreqDist`` class is used to encode "frequency distributions",
which count the number of times that each outcome of an experiment
occurs.
The ``ProbDistI`` class defines a standard interface for "probability
distributions", which encode the probability of each outcome for an
experiment. There are two types of probability distribution:
- "derived probability distributions" are created from frequency
distributions. They attempt to model the probability distribution
that generated the frequency distribution.
- "analytic probability distributions" are created directly from
parameters (such as variance).
The ``ConditionalFreqDist`` class and ``ConditionalProbDistI`` interface
are used to encode conditional distributions. Conditional probability
distributions can be derived or analytic; but currently the only
implementation of the ``ConditionalProbDistI`` interface is
``ConditionalProbDist``, a derived distribution.
"""
_NINF = float('-1e300')
import math
import random
import warnings
from operator import itemgetter
from itertools import imap, islice
from collections import defaultdict
##//////////////////////////////////////////////////////
## Frequency Distributions
##//////////////////////////////////////////////////////
# [SB] inherit from defaultdict?
# [SB] for NLTK 3.0, inherit from collections.Counter?
class FreqDist(dict):
"""
A frequency distribution for the outcomes of an experiment. A
frequency distribution records the number of times each outcome of
an experiment has occurred. For example, a frequency distribution
could be used to record the frequency of each word type in a
document. Formally, a frequency distribution can be defined as a
function mapping from each sample to the number of times that
sample occurred as an outcome.
Frequency distributions are generally constructed by running a
number of experiments, and incrementing the count for a sample
every time it is an outcome of an experiment. For example, the
following code will produce a frequency distribution that encodes
how often each word occurs in a text:
>>> from nltk.tokenize import word_tokenize
>>> from nltk.probability import FreqDist
>>> sent = 'This is an example sentence'
>>> fdist = FreqDist()
>>> for word in word_tokenize(sent):
... fdist.inc(word.lower())
An equivalent way to do this is with the initializer:
>>> fdist = FreqDist(word.lower() for word in word_tokenize(sent))
"""
def __init__(self, samples=None):
"""
Construct a new frequency distribution. If ``samples`` is
given, then the frequency distribution will be initialized
with the count of each object in ``samples``; otherwise, it
will be initialized to be empty.
In particular, ``FreqDist()`` returns an empty frequency
distribution; and ``FreqDist(samples)`` first creates an empty
frequency distribution, and then calls ``update`` with the
list ``samples``.
:param samples: The samples to initialize the frequency
distribution with.
:type samples: Sequence
"""
dict.__init__(self)
self._N = 0
self._reset_caches()
if samples:
self.update(samples)
def inc(self, sample, count=1):
"""
Increment this FreqDist's count for the given sample.
:param sample: The sample whose count should be incremented.
:type sample: any
:param count: The amount to increment the sample's count by.
:type count: int
:rtype: None
:raise NotImplementedError: If ``sample`` is not a
supported sample type.
"""
if count == 0: return
self[sample] = self.get(sample,0) + count
def __setitem__(self, sample, value):
"""
Set this FreqDist's count for the given sample.
:param sample: The sample whose count should be incremented.
:type sample: any hashable object
:param count: The new value for the sample's count
:type count: int
:rtype: None
:raise TypeError: If ``sample`` is not a supported sample type.
"""
self._N += (value - self.get(sample, 0))
dict.__setitem__(self, sample, value)
# Invalidate the caches
self._reset_caches()
def N(self):
"""
Return the total number of sample outcomes that have been
recorded by this FreqDist. For the number of unique
sample values (or bins) with counts greater than zero, use
``FreqDist.B()``.
:rtype: int
"""
return self._N
def B(self):
"""
Return the total number of sample values (or "bins") that
have counts greater than zero. For the total
number of sample outcomes recorded, use ``FreqDist.N()``.
(FreqDist.B() is the same as len(FreqDist).)
:rtype: int
"""
return len(self)
def samples(self):
"""
Return a list of all samples that have been recorded as
outcomes by this frequency distribution. Use ``fd[sample]``
to determine the count for each sample.
:rtype: list
"""
return self.keys()
def hapaxes(self):
"""
Return a list of all samples that occur once (hapax legomena)
:rtype: list
"""
return [item for item in self if self[item] == 1]
def Nr(self, r, bins=None):
"""
Return the number of samples with count r.
:type r: int
:param r: A sample count.
:type bins: int
:param bins: The number of possible sample outcomes. ``bins``
is used to calculate Nr(0). In particular, Nr(0) is
``bins-self.B()``. If ``bins`` is not specified, it
defaults to ``self.B()`` (so Nr(0) will be 0).
:rtype: int
"""
if r < 0: raise IndexError, 'FreqDist.Nr(): r must be non-negative'
# Special case for Nr(0):
if r == 0:
if bins is None: return 0
else: return bins-self.B()
# We have to search the entire distribution to find Nr. Since
# this is an expensive operation, and is likely to be used
# repeatedly, cache the results.
if self._Nr_cache is None:
self._cache_Nr_values()
if r >= len(self._Nr_cache): return 0
return self._Nr_cache[r]
def _cache_Nr_values(self):
Nr = [0]
for sample in self:
c = self.get(sample, 0)
if c >= len(Nr):
Nr += [0]*(c+1-len(Nr))
Nr[c] += 1
self._Nr_cache = Nr
def _cumulative_frequencies(self, samples=None):
"""
Return the cumulative frequencies of the specified samples.
If no samples are specified, all counts are returned, starting
with the largest.
:param samples: the samples whose frequencies should be returned.
:type sample: any
:rtype: list(float)
"""
cf = 0.0
if not samples:
samples = self.keys()
for sample in samples:
cf += self[sample]
yield cf
# slightly odd nomenclature freq() if FreqDist does counts and ProbDist does probs,
# here, freq() does probs
def freq(self, sample):
"""
Return the frequency of a given sample. The frequency of a
sample is defined as the count of that sample divided by the
total number of sample outcomes that have been recorded by
this FreqDist. The count of a sample is defined as the
number of times that sample outcome was recorded by this
FreqDist. Frequencies are always real numbers in the range
[0, 1].
:param sample: the sample whose frequency
should be returned.
:type sample: any
:rtype: float
"""
if self._N is 0:
return 0
return float(self[sample]) / self._N
def max(self):
"""
Return the sample with the greatest number of outcomes in this
frequency distribution. If two or more samples have the same
number of outcomes, return one of them; which sample is
returned is undefined. If no outcomes have occurred in this
frequency distribution, return None.
:return: The sample with the maximum number of outcomes in this
frequency distribution.
:rtype: any or None
"""
if self._max_cache is None:
if len(self) == 0:
raise ValueError('A FreqDist must have at least one sample before max is defined.')
self._max_cache = max([(a,b) for (b,a) in self.items()])[1]
return self._max_cache
def plot(self, *args, **kwargs):
"""
Plot samples from the frequency distribution
displaying the most frequent sample first. If an integer
parameter is supplied, stop after this many samples have been
plotted. If two integer parameters m, n are supplied, plot a
subset of the samples, beginning with m and stopping at n-1.
For a cumulative plot, specify cumulative=True.
(Requires Matplotlib to be installed.)
:param title: The title for the graph
:type title: str
:param cumulative: A flag to specify whether the plot is cumulative (default = False)
:type title: bool
"""
try:
import pylab
except ImportError:
raise ValueError('The plot function requires the matplotlib package (aka pylab). '
'See http://matplotlib.sourceforge.net/')
if len(args) == 0:
args = [len(self)]
samples = list(islice(self, *args))
cumulative = _get_kwarg(kwargs, 'cumulative', False)
if cumulative:
freqs = list(self._cumulative_frequencies(samples))
ylabel = "Cumulative Counts"
else:
freqs = [self[sample] for sample in samples]
ylabel = "Counts"
# percents = [f * 100 for f in freqs] only in ProbDist?
pylab.grid(True, color="silver")
if not "linewidth" in kwargs:
kwargs["linewidth"] = 2
if "title" in kwargs:
pylab.title(kwargs["title"])
del kwargs["title"]
pylab.plot(freqs, **kwargs)
pylab.xticks(range(len(samples)), [unicode(s) for s in samples], rotation=90)
pylab.xlabel("Samples")
pylab.ylabel(ylabel)
pylab.show()
def tabulate(self, *args, **kwargs):
"""
Tabulate the given samples from the frequency distribution (cumulative),
displaying the most frequent sample first. If an integer
parameter is supplied, stop after this many samples have been
plotted. If two integer parameters m, n are supplied, plot a
subset of the samples, beginning with m and stopping at n-1.
(Requires Matplotlib to be installed.)
:param samples: The samples to plot (default is all samples)
:type samples: list
"""
if len(args) == 0:
args = [len(self)]
samples = list(islice(self, *args))
cumulative = _get_kwarg(kwargs, 'cumulative', False)
if cumulative:
freqs = list(self._cumulative_frequencies(samples))
else:
freqs = [self[sample] for sample in samples]
# percents = [f * 100 for f in freqs] only in ProbDist?
for i in range(len(samples)):
print "%4s" % str(samples[i]),
print
for i in range(len(samples)):
print "%4d" % freqs[i],
print
def _sort_keys_by_value(self):
if not self._item_cache:
self._item_cache = sorted(dict.items(self), key=lambda x:(-x[1], x[0]))
def keys(self):
"""
Return the samples sorted in decreasing order of frequency.
:rtype: list(any)
"""
self._sort_keys_by_value()
return map(itemgetter(0), self._item_cache)
def values(self):
"""
Return the samples sorted in decreasing order of frequency.
:rtype: list(any)
"""
self._sort_keys_by_value()
return map(itemgetter(1), self._item_cache)
def items(self):
"""
Return the items sorted in decreasing order of frequency.
:rtype: list(tuple)
"""
self._sort_keys_by_value()
return self._item_cache[:]
def __iter__(self):
"""
Return the samples sorted in decreasing order of frequency.
:rtype: iter
"""
return iter(self.keys())
def iterkeys(self):
"""
Return the samples sorted in decreasing order of frequency.
:rtype: iter
"""
return iter(self.keys())
def itervalues(self):
"""
Return the values sorted in decreasing order.
:rtype: iter
"""
return iter(self.values())
def iteritems(self):
"""
Return the items sorted in decreasing order of frequency.
:rtype: iter of any
"""
self._sort_keys_by_value()
return iter(self._item_cache)
def copy(self):
"""
Create a copy of this frequency distribution.
:rtype: FreqDist
"""
return self.__class__(self)
def update(self, samples):
"""
Update the frequency distribution with the provided list of samples.
This is a faster way to add multiple samples to the distribution.
:param samples: The samples to add.
:type samples: list
"""
try:
sample_iter = samples.iteritems()
except:
sample_iter = imap(lambda x: (x,1), samples)
for sample, count in sample_iter:
self.inc(sample, count=count)
def pop(self, other):
self._N -= 1
self._reset_caches()
return dict.pop(self, other)
def popitem(self):
self._N -= 1
self._reset_caches()
return dict.popitem(self)
def clear(self):
self._N = 0
self._reset_caches()
dict.clear(self)
def _reset_caches(self):
self._Nr_cache = None
self._max_cache = None
self._item_cache = None
def __add__(self, other):
clone = self.copy()
clone.update(other)
return clone
def __le__(self, other):
if not isinstance(other, FreqDist): return False
return set(self).issubset(other) and all(self[key] <= other[key] for key in self)
def __lt__(self, other):
if not isinstance(other, FreqDist): return False
return self <= other and self != other
def __ge__(self, other):
if not isinstance(other, FreqDist): return False
return other <= self
def __gt__(self, other):
if not isinstance(other, FreqDist): return False
return other < self
def __repr__(self):
"""
Return a string representation of this FreqDist.
:rtype: string
"""
return '<FreqDist with %d samples and %d outcomes>' % (len(self), self.N())
def __str__(self):
"""
Return a string representation of this FreqDist.
:rtype: string
"""
items = ['%r: %r' % (s, self[s]) for s in self.keys()[:10]]
if len(self) > 10:
items.append('...')
return '<FreqDist: %s>' % ', '.join(items)
def __getitem__(self, sample):
return self.get(sample, 0)
##//////////////////////////////////////////////////////
## Probability Distributions
##//////////////////////////////////////////////////////
class ProbDistI(object):
"""
A probability distribution for the outcomes of an experiment. A
probability distribution specifies how likely it is that an
experiment will have any given outcome. For example, a
probability distribution could be used to predict the probability
that a token in a document will have a given type. Formally, a
probability distribution can be defined as a function mapping from
samples to nonnegative real numbers, such that the sum of every
number in the function's range is 1.0. A ``ProbDist`` is often
used to model the probability distribution of the experiment used
to generate a frequency distribution.
"""
SUM_TO_ONE = True
"""True if the probabilities of the samples in this probability
distribution will always sum to one."""
def __init__(self):
if self.__class__ == ProbDistI:
raise NotImplementedError("Interfaces can't be instantiated")
def prob(self, sample):
"""
Return the probability for a given sample. Probabilities
are always real numbers in the range [0, 1].
:param sample: The sample whose probability
should be returned.
:type sample: any
:rtype: float
"""
raise NotImplementedError()
def logprob(self, sample):
"""
Return the base 2 logarithm of the probability for a given sample.
:param sample: The sample whose probability
should be returned.
:type sample: any
:rtype: float
"""
# Default definition, in terms of prob()
p = self.prob(sample)
if p == 0:
# Use some approximation to infinity. What this does
# depends on your system's float implementation.
return _NINF
else:
return math.log(p, 2)
def max(self):
"""
Return the sample with the greatest probability. If two or
more samples have the same probability, return one of them;
which sample is returned is undefined.
:rtype: any
"""
raise NotImplementedError()
def samples(self):
"""
Return a list of all samples that have nonzero probabilities.
Use ``prob`` to find the probability of each sample.
:rtype: list
"""
raise NotImplementedError()
# cf self.SUM_TO_ONE
def discount(self):
"""
Return the ratio by which counts are discounted on average: c*/c
:rtype: float
"""
return 0.0
# Subclasses should define more efficient implementations of this,
# where possible.
def generate(self):
"""
Return a randomly selected sample from this probability distribution.
The probability of returning each sample ``samp`` is equal to
``self.prob(samp)``.
"""
p = random.random()
for sample in self.samples():
p -= self.prob(sample)
if p <= 0: return sample
# allow for some rounding error:
if p < .0001:
return sample
# we *should* never get here
if self.SUM_TO_ONE:
warnings.warn("Probability distribution %r sums to %r; generate()"
" is returning an arbitrary sample." % (self, 1-p))
return random.choice(list(self.samples()))
class UniformProbDist(ProbDistI):
"""
A probability distribution that assigns equal probability to each
sample in a given set; and a zero probability to all other
samples.
"""
def __init__(self, samples):
"""
Construct a new uniform probability distribution, that assigns
equal probability to each sample in ``samples``.
:param samples: The samples that should be given uniform
probability.
:type samples: list
:raise ValueError: If ``samples`` is empty.
"""
if len(samples) == 0:
raise ValueError('A Uniform probability distribution must '+
'have at least one sample.')
self._sampleset = set(samples)
self._prob = 1.0/len(self._sampleset)
self._samples = list(self._sampleset)
def prob(self, sample):
if sample in self._sampleset: return self._prob
else: return 0
def max(self): return self._samples[0]
def samples(self): return self._samples
def __repr__(self):
return '<UniformProbDist with %d samples>' % len(self._sampleset)
class DictionaryProbDist(ProbDistI):
"""
A probability distribution whose probabilities are directly
specified by a given dictionary. The given dictionary maps
samples to probabilities.
"""
def __init__(self, prob_dict=None, log=False, normalize=False):
"""
Construct a new probability distribution from the given
dictionary, which maps values to probabilities (or to log
probabilities, if ``log`` is true). If ``normalize`` is
true, then the probability values are scaled by a constant
factor such that they sum to 1.
If called without arguments, the resulting probability
distribution assigns zero probabiliy to all values.
"""
if prob_dict is None:
self._prob_dict = {}
else:
self._prob_dict = prob_dict.copy()
self._log = log
# Normalize the distribution, if requested.
if normalize:
if log:
value_sum = sum_logs(self._prob_dict.values())
if value_sum <= _NINF:
logp = math.log(1.0/len(prob_dict), 2)
for x in prob_dict:
self._prob_dict[x] = logp
else:
for (x, p) in self._prob_dict.items():
self._prob_dict[x] -= value_sum
else:
value_sum = sum(self._prob_dict.values())
if value_sum == 0:
p = 1.0/len(prob_dict)
for x in prob_dict:
self._prob_dict[x] = p
else:
norm_factor = 1.0/value_sum
for (x, p) in self._prob_dict.items():
self._prob_dict[x] *= norm_factor
def prob(self, sample):
if self._log:
if sample not in self._prob_dict: return 0
else: return 2**(self._prob_dict[sample])
else:
return self._prob_dict.get(sample, 0)
def logprob(self, sample):
if self._log:
return self._prob_dict.get(sample, _NINF)
else:
if sample not in self._prob_dict: return _NINF
elif self._prob_dict[sample] == 0: return _NINF
else: return math.log(self._prob_dict[sample], 2)
def max(self):
if not hasattr(self, '_max'):
self._max = max((p,v) for (v,p) in self._prob_dict.items())[1]
return self._max
def samples(self):
return self._prob_dict.keys()
def __repr__(self):
return '<ProbDist with %d samples>' % len(self._prob_dict)
class MLEProbDist(ProbDistI):
"""
The maximum likelihood estimate for the probability distribution
of the experiment used to generate a frequency distribution. The
"maximum likelihood estimate" approximates the probability of
each sample as the frequency of that sample in the frequency
distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the maximum likelihood estimate to create a probability
distribution for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
"""
self._freqdist = freqdist
def freqdist(self):
"""
Return the frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._freqdist
def prob(self, sample):
return self._freqdist.freq(sample)
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
return '<MLEProbDist based on %d samples>' % self._freqdist.N()
class LidstoneProbDist(ProbDistI):
"""
The Lidstone estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
"Lidstone estimate" is paramaterized by a real number *gamma*,
which typically ranges from 0 to 1. The Lidstone estimate
approximates the probability of a sample with count *c* from an
experiment with *N* outcomes and *B* bins as
``c+gamma)/(N+B*gamma)``. This is equivalant to adding
*gamma* to the count for each bin, and taking the maximum
likelihood estimate of the resulting frequency distribution.
"""
SUM_TO_ONE = False
def __init__(self, freqdist, gamma, bins=None):
"""
Use the Lidstone estimate to create a probability distribution
for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type gamma: float
:param gamma: A real number used to paramaterize the
estimate. The Lidstone estimate is equivalant to adding
*gamma* to the count for each bin, and taking the
maximum likelihood estimate of the resulting frequency
distribution.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
if (bins == 0) or (bins is None and freqdist.N() == 0):
name = self.__class__.__name__[:-8]
raise ValueError('A %s probability distribution ' % name +
'must have at least one bin.')
if (bins is not None) and (bins < freqdist.B()):
name = self.__class__.__name__[:-8]
raise ValueError('\nThe number of bins in a %s distribution ' % name +
'(%d) must be greater than or equal to\n' % bins +
'the number of bins in the FreqDist used ' +
'to create it (%d).' % freqdist.N())
self._freqdist = freqdist
self._gamma = float(gamma)
self._N = self._freqdist.N()
if bins is None: bins = freqdist.B()
self._bins = bins
self._divisor = self._N + bins * gamma
if self._divisor == 0.0:
# In extreme cases we force the probability to be 0,
# which it will be, since the count will be 0:
self._gamma = 0
self._divisor = 1
def freqdist(self):
"""
Return the frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._freqdist
def prob(self, sample):
c = self._freqdist[sample]
return (c + self._gamma) / self._divisor
def max(self):
# For Lidstone distributions, probability is monotonic with
# frequency, so the most probable sample is the one that
# occurs most frequently.
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def discount(self):
gb = self._gamma * self._bins
return gb / (self._N + gb)
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<LidstoneProbDist based on %d samples>' % self._freqdist.N()
class LaplaceProbDist(LidstoneProbDist):
"""
The Laplace estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
"Laplace estimate" approximates the probability of a sample with
count *c* from an experiment with *N* outcomes and *B* bins as
*(c+1)/(N+B)*. This is equivalant to adding one to the count for
each bin, and taking the maximum likelihood estimate of the
resulting frequency distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the Laplace estimate to create a probability distribution
for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
LidstoneProbDist.__init__(self, freqdist, 1, bins)
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
return '<LaplaceProbDist based on %d samples>' % self._freqdist.N()
class ELEProbDist(LidstoneProbDist):
"""
The expected likelihood estimate for the probability distribution
of the experiment used to generate a frequency distribution. The
"expected likelihood estimate" approximates the probability of a
sample with count *c* from an experiment with *N* outcomes and
*B* bins as *(c+0.5)/(N+B/2)*. This is equivalant to adding 0.5
to the count for each bin, and taking the maximum likelihood
estimate of the resulting frequency distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the expected likelihood estimate to create a probability
distribution for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
LidstoneProbDist.__init__(self, freqdist, 0.5, bins)
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<ELEProbDist based on %d samples>' % self._freqdist.N()
class HeldoutProbDist(ProbDistI):
"""
The heldout estimate for the probability distribution of the
experiment used to generate two frequency distributions. These
two frequency distributions are called the "heldout frequency
distribution" and the "base frequency distribution." The
"heldout estimate" uses uses the "heldout frequency
distribution" to predict the probability of each sample, given its
frequency in the "base frequency distribution".
In particular, the heldout estimate approximates the probability
for a sample that occurs *r* times in the base distribution as
the average frequency in the heldout distribution of all samples
that occur *r* times in the base distribution.
This average frequency is *Tr[r]/(Nr[r].N)*, where:
- *Tr[r]* is the total count in the heldout distribution for
all samples that occur *r* times in the base distribution.
- *Nr[r]* is the number of samples that occur *r* times in
the base distribution.
- *N* is the number of outcomes recorded by the heldout
frequency distribution.
In order to increase the efficiency of the ``prob`` member
function, *Tr[r]/(Nr[r].N)* is precomputed for each value of *r*
when the ``HeldoutProbDist`` is created.
:type _estimate: list(float)
:ivar _estimate: A list mapping from *r*, the number of
times that a sample occurs in the base distribution, to the
probability estimate for that sample. ``_estimate[r]`` is
calculated by finding the average frequency in the heldout
distribution of all samples that occur *r* times in the base
distribution. In particular, ``_estimate[r]`` =
*Tr[r]/(Nr[r].N)*.
:type _max_r: int
:ivar _max_r: The maximum number of times that any sample occurs
in the base distribution. ``_max_r`` is used to decide how
large ``_estimate`` must be.
"""
SUM_TO_ONE = False
def __init__(self, base_fdist, heldout_fdist, bins=None):
"""
Use the heldout estimate to create a probability distribution
for the experiment used to generate ``base_fdist`` and
``heldout_fdist``.
:type base_fdist: FreqDist
:param base_fdist: The base frequency distribution.
:type heldout_fdist: FreqDist
:param heldout_fdist: The heldout frequency distribution.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
self._base_fdist = base_fdist
self._heldout_fdist = heldout_fdist
# The max number of times any sample occurs in base_fdist.
self._max_r = base_fdist[base_fdist.max()]
# Calculate Tr, Nr, and N.
Tr = self._calculate_Tr()
Nr = [base_fdist.Nr(r, bins) for r in range(self._max_r+1)]
N = heldout_fdist.N()
# Use Tr, Nr, and N to compute the probability estimate for
# each value of r.
self._estimate = self._calculate_estimate(Tr, Nr, N)
def _calculate_Tr(self):
"""
Return the list *Tr*, where *Tr[r]* is the total count in
``heldout_fdist`` for all samples that occur *r*
times in ``base_fdist``.
:rtype: list(float)
"""
Tr = [0.0] * (self._max_r+1)
for sample in self._heldout_fdist:
r = self._base_fdist[sample]
Tr[r] += self._heldout_fdist[sample]
return Tr
def _calculate_estimate(self, Tr, Nr, N):
"""
Return the list *estimate*, where *estimate[r]* is the probability
estimate for any sample that occurs *r* times in the base frequency
distribution. In particular, *estimate[r]* is *Tr[r]/(N[r].N)*.
In the special case that *N[r]=0*, *estimate[r]* will never be used;
so we define *estimate[r]=None* for those cases.
:rtype: list(float)
:type Tr: list(float)
:param Tr: the list *Tr*, where *Tr[r]* is the total count in
the heldout distribution for all samples that occur *r*
times in base distribution.
:type Nr: list(float)
:param Nr: The list *Nr*, where *Nr[r]* is the number of
samples that occur *r* times in the base distribution.
:type N: int
:param N: The total number of outcomes recorded by the heldout
frequency distribution.
"""
estimate = []
for r in range(self._max_r+1):
if Nr[r] == 0: estimate.append(None)
else: estimate.append(Tr[r]/(Nr[r]*N))
return estimate
def base_fdist(self):
"""
Return the base frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._base_fdist
def heldout_fdist(self):
"""
Return the heldout frequency distribution that this
probability distribution is based on.
:rtype: FreqDist
"""
return self._heldout_fdist
def samples(self):
return self._base_fdist.keys()
def prob(self, sample):
# Use our precomputed probability estimate.
r = self._base_fdist[sample]
return self._estimate[r]
def max(self):
# Note: the Heldout estimation is *not* necessarily monotonic;
# so this implementation is currently broken. However, it
# should give the right answer *most* of the time. :)
return self._base_fdist.max()
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
s = '<HeldoutProbDist: %d base samples; %d heldout samples>'
return s % (self._base_fdist.N(), self._heldout_fdist.N())
class CrossValidationProbDist(ProbDistI):
"""
The cross-validation estimate for the probability distribution of
the experiment used to generate a set of frequency distribution.
The "cross-validation estimate" for the probability of a sample
is found by averaging the held-out estimates for the sample in
each pair of frequency distributions.
"""
SUM_TO_ONE = False
def __init__(self, freqdists, bins):
"""
Use the cross-validation estimate to create a probability
distribution for the experiment used to generate
``freqdists``.
:type freqdists: list(FreqDist)
:param freqdists: A list of the frequency distributions
generated by the experiment.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
self._freqdists = freqdists
# Create a heldout probability distribution for each pair of
# frequency distributions in freqdists.
self._heldout_probdists = []
for fdist1 in freqdists:
for fdist2 in freqdists:
if fdist1 is not fdist2:
probdist = HeldoutProbDist(fdist1, fdist2, bins)
self._heldout_probdists.append(probdist)
def freqdists(self):
"""
Return the list of frequency distributions that this ``ProbDist`` is based on.
:rtype: list(FreqDist)
"""
return self._freqdists
def samples(self):
# [xx] nb: this is not too efficient
return set(sum([fd.keys() for fd in self._freqdists], []))
def prob(self, sample):
# Find the average probability estimate returned by each
# heldout distribution.
prob = 0.0
for heldout_probdist in self._heldout_probdists:
prob += heldout_probdist.prob(sample)
return prob/len(self._heldout_probdists)
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<CrossValidationProbDist: %d-way>' % len(self._freqdists)
class WittenBellProbDist(ProbDistI):
"""
The Witten-Bell estimate of a probability distribution. This distribution
allocates uniform probability mass to as yet unseen events by using the
number of events that have only been seen once. The probability mass
reserved for unseen events is equal to *T / (N + T)*
where *T* is the number of observed event types and *N* is the total
number of observed events. This equates to the maximum likelihood estimate
of a new type event occurring. The remaining probability mass is discounted
such that all probability estimates sum to one, yielding:
- *p = T / Z (N + T)*, if count = 0
- *p = c / (N + T)*, otherwise
"""
def __init__(self, freqdist, bins=None):
"""
Creates a distribution of Witten-Bell probability estimates. This
distribution allocates uniform probability mass to as yet unseen
events by using the number of events that have only been seen once. The
probability mass reserved for unseen events is equal to *T / (N + T)*
where *T* is the number of observed event types and *N* is the total
number of observed events. This equates to the maximum likelihood
estimate of a new type event occurring. The remaining probability mass
is discounted such that all probability estimates sum to one,
yielding:
- *p = T / Z (N + T)*, if count = 0
- *p = c / (N + T)*, otherwise
The parameters *T* and *N* are taken from the ``freqdist`` parameter
(the ``B()`` and ``N()`` values). The normalising factor *Z* is
calculated using these values along with the ``bins`` parameter.
:param freqdist: The frequency counts upon which to base the
estimation.
:type freqdist: FreqDist
:param bins: The number of possible event types. This must be at least
as large as the number of bins in the ``freqdist``. If None, then
it's assumed to be equal to that of the ``freqdist``
:type bins: int
"""
assert bins is None or bins >= freqdist.B(),\
'Bins parameter must not be less than freqdist.B()'
if bins is None:
bins = freqdist.B()
self._freqdist = freqdist
self._T = self._freqdist.B()
self._Z = bins - self._freqdist.B()
self._N = self._freqdist.N()
# self._P0 is P(0), precalculated for efficiency:
if self._N==0:
# if freqdist is empty, we approximate P(0) by a UniformProbDist:
self._P0 = 1.0 / self._Z
else:
self._P0 = self._T / float(self._Z * (self._N + self._T))
def prob(self, sample):
# inherit docs from ProbDistI
c = self._freqdist[sample]
if c == 0:
return self._P0
else:
return c / float(self._N + self._T)
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def freqdist(self):
return self._freqdist
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<WittenBellProbDist based on %d samples>' % self._freqdist.N()
##//////////////////////////////////////////////////////
## Good-Turing Probablity Distributions
##//////////////////////////////////////////////////////
# Good-Turing frequency estimation was contributed by Alan Turing and
# his statistical assistant I.J. Good, during their collaboration in
# the WWII. It is a statistical technique for predicting the
# probability of occurrence of objects belonging to an unknown number
# of species, given past observations of such objects and their
# species. (In drawing balls from an urn, the 'objects' would be balls
# and the 'species' would be the distinct colors of the balls (finite
# but unknown in number).
#
# The situation frequency zero is quite common in the original
# Good-Turing estimation. Bill Gale and Geoffrey Sampson present a
# simple and effective approach, Simple Good-Turing. As a smoothing
# curve they simply use a power curve:
#
# Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
# relationsihp)
#
# They estimate a and b by simple linear regression technique on the
# logarithmic form of the equation:
#
# log Nr = a + b*log(r)
#
# However, they suggest that such a simple curve is probably only
# appropriate for high values of r. For low values of r, they use the
# measured Nr directly. (see M&S, p.213)
#
# Gale and Sampson propose to use r while the difference between r and
# r* is 1.96 greather than the standar deviation, and switch to r* if
# it is less or equal:
#
# |r - r*| > 1.96 * sqrt((r + 1)^2 (Nr+1 / Nr^2) (1 + Nr+1 / Nr))
#
# The 1.96 coefficient correspond to a 0.05 significance criterion,
# some implementations can use a coefficient of 1.65 for a 0.1
# significance criterion.
#
class GoodTuringProbDist(ProbDistI):
"""
The Good-Turing estimate of a probability distribution. This method
calculates the probability mass to assign to events with zero or low
counts based on the number of events with higher counts. It does so by
using the smoothed count *c\**:
- *c\* = (c + 1) N(c + 1) / N(c)* for c >= 1
- *things with frequency zero in training* = N(1) for c == 0
where *c* is the original count, *N(i)* is the number of event types
observed with count *i*. We can think the count of unseen as the count
of frequency one (see Jurafsky & Martin 2nd Edition, p101).
"""
def __init__(self, freqdist, bins=None):
"""
:param freqdist: The frequency counts upon which to base the
estimation.
:type freqdist: FreqDist
:param bins: The number of possible event types. This must be at least
as large as the number of bins in the ``freqdist``. If None, then
it's assumed to be equal to that of the ``freqdist``
:type bins: int
"""
assert bins is None or bins >= freqdist.B(),\
'Bins parameter must not be less than freqdist.B()'
if bins is None:
bins = freqdist.B()
self._freqdist = freqdist
self._bins = bins
def prob(self, sample):
count = self._freqdist[sample]
# unseen sample's frequency (count zero) uses frequency one's
if count == 0 and self._freqdist.N() != 0:
p0 = 1.0 * self._freqdist.Nr(1) / self._freqdist.N()
if self._bins == self._freqdist.B():
p0 = 0.0
else:
p0 = p0 / (1.0 * self._bins - self._freqdist.B())
nc = self._freqdist.Nr(count)
ncn = self._freqdist.Nr(count + 1)
# avoid divide-by-zero errors for sparse datasets
if nc == 0 or self._freqdist.N() == 0:
return 0
return 1.0 * (count + 1) * ncn / (nc * self._freqdist.N())
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def discount(self):
"""
:return: The probability mass transferred from the
seen samples to the unseen samples.
:rtype: float
"""
return 1.0 * self._freqdist.Nr(1) / self._freqdist.N()
def freqdist(self):
return self._freqdist
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<GoodTuringProbDist based on %d samples>' % self._freqdist.N()
##//////////////////////////////////////////////////////
## Simple Good-Turing Probablity Distributions
##//////////////////////////////////////////////////////
class SimpleGoodTuringProbDist(ProbDistI):
"""
SimpleGoodTuring ProbDist approximates from frequency to freqency of
frequency into a linear line under log space by linear regression.
Details of Simple Good-Turing algorithm can be found in:
- Good Turing smoothing without tears" (Gale & Sampson 1995),
Journal of Quantitative Linguistics, vol. 2 pp. 217-237.
- "Speech and Language Processing (Jurafsky & Martin),
2nd Edition, Chapter 4.5 p103 (log(Nc) = a + b*log(c))
- http://www.grsampson.net/RGoodTur.html
Given a set of pair (xi, yi), where the xi denotes the freqency and
yi denotes the freqency of freqency, we want to minimize their
square variation. E(x) and E(y) represent the mean of xi and yi.
- slope: b = sigma ((xi-E(x)(yi-E(y))) / sigma ((xi-E(x))(xi-E(x)))
- intercept: a = E(y) - b.E(x)
"""
def __init__(self, freqdist, bins=None):
"""
:param freqdist: The frequency counts upon which to base the
estimation.
:type freqdist: FreqDist
:param bins: The number of possible event types. This must be
larger than the number of bins in the ``freqdist``. If None,
then it's assumed to be equal to ``freqdist``.B() + 1
:type bins: int
"""
assert bins is None or bins > freqdist.B(),\
'Bins parameter must not be less than freqdist.B() + 1'
if bins is None:
bins = freqdist.B() + 1
self._freqdist = freqdist
self._bins = bins
r, nr = self._r_Nr()
self.find_best_fit(r, nr)
self._switch(r, nr)
self._renormalize(r, nr)
def _r_Nr(self):
"""
Split the frequency distribution in two list (r, Nr), where Nr(r) > 0
"""
r, nr = [], []
b, i = 0, 0
while b != self._freqdist.B():
nr_i = self._freqdist.Nr(i)
if nr_i > 0:
b += nr_i
r.append(i)
nr.append(nr_i)
i += 1
return (r, nr)
def find_best_fit(self, r, nr):
"""
Use simple linear regression to tune parameters self._slope and
self._intercept in the log-log space based on count and Nr(count)
(Work in log space to avoid floating point underflow.)
"""
# For higher sample frequencies the data points becomes horizontal
# along line Nr=1. To create a more evident linear model in log-log
# space, we average positive Nr values with the surrounding zero
# values. (Church and Gale, 1991)
if not r or not nr:
# Empty r or nr?
return
zr = []
for j in range(len(r)):
if j > 0:
i = r[j-1]
else:
i = 0
if j != len(r) - 1:
k = r[j+1]
else:
k = 2 * r[j] - i
zr_ = 2.0 * nr[j] / (k - i)
zr.append(zr_)
log_r = [math.log(i) for i in r]
log_zr = [math.log(i) for i in zr]
xy_cov = x_var = 0.0
x_mean = 1.0 * sum(log_r) / len(log_r)
y_mean = 1.0 * sum(log_zr) / len(log_zr)
for (x, y) in zip(log_r, log_zr):
xy_cov += (x - x_mean) * (y - y_mean)
x_var += (x - x_mean)**2
if x_var != 0:
self._slope = xy_cov / x_var
else:
self._slope = 0.0
self._intercept = y_mean - self._slope * x_mean
def _switch(self, r, nr):
"""
Calculate the r frontier where we must switch from Nr to Sr
when estimating E[Nr].
"""
for i, r_ in enumerate(r):
if len(r) == i + 1 or r[i+1] != r_ + 1:
# We are at the end of r, or there is a gap in r
self._switch_at = r_
break
Sr = self.smoothedNr
smooth_r_star = (r_ + 1) * Sr(r_+1) / Sr(r_)
unsmooth_r_star = 1.0 * (r_ + 1) * nr[i+1] / nr[i]
std = math.sqrt(self._variance(r_, nr[i], nr[i+1]))
if abs(unsmooth_r_star-smooth_r_star) <= 1.96 * std:
self._switch_at = r_
break
def _variance(self, r, nr, nr_1):
r = float(r)
nr = float(nr)
nr_1 = float(nr_1)
return (r + 1.0)**2 * (nr_1 / nr**2) * (1.0 + nr_1 / nr)
def _renormalize(self, r, nr):
"""
It is necessary to renormalize all the probability estimates to
ensure a proper probability distribution results. This can be done
by keeping the estimate of the probability mass for unseen items as
N(1)/N and renormalizing all the estimates for previously seen items
(as Gale and Sampson (1995) propose). (See M&S P.213, 1999)
"""
prob_cov = 0.0
for r_, nr_ in zip(r, nr):
prob_cov += nr_ * self._prob_measure(r_)
if prob_cov:
self._renormal = (1 - self._prob_measure(0)) / prob_cov
def smoothedNr(self, r):
"""
Return the number of samples with count r.
:param r: The amount of freqency.
:type r: int
:rtype: float
"""
# Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
# relationship)
# Estimate a and b by simple linear regression technique on
# the logarithmic form of the equation: log Nr = a + b*log(r)
return math.exp(self._intercept + self._slope * math.log(r))
def prob(self, sample):
"""
Return the sample's probability.
:param sample: sample of the event
:type sample: str
:rtype: float
"""
count = self._freqdist[sample]
p = self._prob_measure(count)
if count == 0:
if self._bins == self._freqdist.B():
p = 0.0
else:
p = p / (1.0 * self._bins - self._freqdist.B())
else:
p = p * self._renormal
return p
def _prob_measure(self, count):
if count == 0 and self._freqdist.N() == 0 :
return 1.0
elif count == 0 and self._freqdist.N() != 0:
return 1.0 * self._freqdist.Nr(1) / self._freqdist.N()
if self._switch_at > count:
Er_1 = 1.0 * self._freqdist.Nr(count+1)
Er = 1.0 * self._freqdist.Nr(count)
else:
Er_1 = self.smoothedNr(count+1)
Er = self.smoothedNr(count)
r_star = (count + 1) * Er_1 / Er
return r_star / self._freqdist.N()
def check(self):
prob_sum = 0.0
for i in range(0, len(self._Nr)):
prob_sum += self._Nr[i] * self._prob_measure(i) / self._renormal
print "Probability Sum:", prob_sum
#assert prob_sum != 1.0, "probability sum should be one!"
def discount(self):
"""
This function returns the total mass of probability transfers from the
seen samples to the unseen samples.
"""
return 1.0 * self.smoothedNr(1) / self._freqdist.N()
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def freqdist(self):
return self._freqdist
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<SimpleGoodTuringProbDist based on %d samples>'\
% self._freqdist.N()
class MutableProbDist(ProbDistI):
"""
An mutable probdist where the probabilities may be easily modified. This
simply copies an existing probdist, storing the probability values in a
mutable dictionary and providing an update method.
"""
def __init__(self, prob_dist, samples, store_logs=True):
"""
Creates the mutable probdist based on the given prob_dist and using
the list of samples given. These values are stored as log
probabilities if the store_logs flag is set.
:param prob_dist: the distribution from which to garner the
probabilities
:type prob_dist: ProbDist
:param samples: the complete set of samples
:type samples: sequence of any
:param store_logs: whether to store the probabilities as logarithms
:type store_logs: bool
"""
try:
import numpy
except ImportError:
print "Error: Please install numpy; for instructions see http://www.nltk.org/"
exit()
self._samples = samples
self._sample_dict = dict((samples[i], i) for i in range(len(samples)))
self._data = numpy.zeros(len(samples), numpy.float64)
for i in range(len(samples)):
if store_logs:
self._data[i] = prob_dist.logprob(samples[i])
else:
self._data[i] = prob_dist.prob(samples[i])
self._logs = store_logs
def samples(self):
# inherit documentation
return self._samples
def prob(self, sample):
# inherit documentation
i = self._sample_dict.get(sample)
if i is not None:
if self._logs:
return 2**(self._data[i])
else:
return self._data[i]
else:
return 0.0
def logprob(self, sample):
# inherit documentation
i = self._sample_dict.get(sample)
if i is not None:
if self._logs:
return self._data[i]
else:
return math.log(self._data[i], 2)
else:
return float('-inf')
def update(self, sample, prob, log=True):
"""
Update the probability for the given sample. This may cause the object
to stop being the valid probability distribution - the user must
ensure that they update the sample probabilities such that all samples
have probabilities between 0 and 1 and that all probabilities sum to
one.
:param sample: the sample for which to update the probability
:type sample: any
:param prob: the new probability
:type prob: float
:param log: is the probability already logged
:type log: bool
"""
i = self._sample_dict.get(sample)
assert i is not None
if self._logs:
if log: self._data[i] = prob
else: self._data[i] = math.log(prob, 2)
else:
if log: self._data[i] = 2**(prob)
else: self._data[i] = prob
##//////////////////////////////////////////////////////
## Probability Distribution Operations
##//////////////////////////////////////////////////////
def log_likelihood(test_pdist, actual_pdist):
if (not isinstance(test_pdist, ProbDistI) or
not isinstance(actual_pdist, ProbDistI)):
raise ValueError('expected a ProbDist.')
# Is this right?
return sum(actual_pdist.prob(s) * math.log(test_pdist.prob(s), 2)
for s in actual_pdist)
def entropy(pdist):
probs = [pdist.prob(s) for s in pdist.samples()]
return -sum([p * math.log(p,2) for p in probs])
##//////////////////////////////////////////////////////
## Conditional Distributions
##//////////////////////////////////////////////////////
class ConditionalFreqDist(defaultdict):
"""
A collection of frequency distributions for a single experiment
run under different conditions. Conditional frequency
distributions are used to record the number of times each sample
occurred, given the condition under which the experiment was run.
For example, a conditional frequency distribution could be used to
record the frequency of each word (type) in a document, given its
length. Formally, a conditional frequency distribution can be
defined as a function that maps from each condition to the
FreqDist for the experiment under that condition.
Conditional frequency distributions are typically constructed by
repeatedly running an experiment under a variety of conditions,
and incrementing the sample outcome counts for the appropriate
conditions. For example, the following code will produce a
conditional frequency distribution that encodes how often each
word type occurs, given the length of that word type:
>>> from nltk.probability import ConditionalFreqDist
>>> from nltk.tokenize import word_tokenize
>>> sent = "the the the dog dog some other words that we do not care about"
>>> cfdist = ConditionalFreqDist()
>>> for word in word_tokenize(sent):
... condition = len(word)
... cfdist[condition].inc(word)
An equivalent way to do this is with the initializer:
>>> cfdist = ConditionalFreqDist((len(word), word) for word in word_tokenize(sent))
The frequency distribution for each condition is accessed using
the indexing operator:
>>> cfdist[3]
<FreqDist with 6 outcomes>
>>> cfdist[3].freq('the')
0.5
>>> cfdist[3]['dog']
2
When the indexing operator is used to access the frequency
distribution for a condition that has not been accessed before,
``ConditionalFreqDist`` creates a new empty FreqDist for that
condition.
"""
def __init__(self, cond_samples=None):
"""
Construct a new empty conditional frequency distribution. In
particular, the count for every sample, under every condition,
is zero.
:param cond_samples: The samples to initialize the conditional
frequency distribution with
:type cond_samples: Sequence of (condition, sample) tuples
"""
defaultdict.__init__(self, FreqDist)
if cond_samples:
for (cond, sample) in cond_samples:
self[cond].inc(sample)
def conditions(self):
"""
Return a list of the conditions that have been accessed for
this ``ConditionalFreqDist``. Use the indexing operator to
access the frequency distribution for a given condition.
Note that the frequency distributions for some conditions
may contain zero sample outcomes.
:rtype: list
"""
return sorted(self.keys())
def N(self):
"""
Return the total number of sample outcomes that have been
recorded by this ``ConditionalFreqDist``.
:rtype: int
"""
return sum(fdist.N() for fdist in self.itervalues())
def plot(self, *args, **kwargs):
"""
Plot the given samples from the conditional frequency distribution.
For a cumulative plot, specify cumulative=True.
(Requires Matplotlib to be installed.)
:param samples: The samples to plot
:type samples: list
:param title: The title for the graph
:type title: str
:param conditions: The conditions to plot (default is all)
:type conditions: list
"""
try:
import pylab
except ImportError:
raise ValueError('The plot function requires the matplotlib package (aka pylab).'
'See http://matplotlib.sourceforge.net/')
cumulative = _get_kwarg(kwargs, 'cumulative', False)
conditions = _get_kwarg(kwargs, 'conditions', self.conditions())
title = _get_kwarg(kwargs, 'title', '')
samples = _get_kwarg(kwargs, 'samples',
sorted(set(v for c in conditions for v in self[c]))) # this computation could be wasted
if not "linewidth" in kwargs:
kwargs["linewidth"] = 2
for condition in conditions:
if cumulative:
freqs = list(self[condition]._cumulative_frequencies(samples))
ylabel = "Cumulative Counts"
legend_loc = 'lower right'
else:
freqs = [self[condition][sample] for sample in samples]
ylabel = "Counts"
legend_loc = 'upper right'
# percents = [f * 100 for f in freqs] only in ConditionalProbDist?
kwargs['label'] = str(condition)
pylab.plot(freqs, *args, **kwargs)
pylab.legend(loc=legend_loc)
pylab.grid(True, color="silver")
pylab.xticks(range(len(samples)), [unicode(s) for s in samples], rotation=90)
if title:
pylab.title(title)
pylab.xlabel("Samples")
pylab.ylabel(ylabel)
pylab.show()
def tabulate(self, *args, **kwargs):
"""
Tabulate the given samples from the conditional frequency distribution.
:param samples: The samples to plot
:type samples: list
:param title: The title for the graph
:type title: str
:param conditions: The conditions to plot (default is all)
:type conditions: list
"""
cumulative = _get_kwarg(kwargs, 'cumulative', False)
conditions = _get_kwarg(kwargs, 'conditions', self.conditions())
samples = _get_kwarg(kwargs, 'samples',
sorted(set(v for c in conditions for v in self[c]))) # this computation could be wasted
condition_size = max(len(str(c)) for c in conditions)
print ' ' * condition_size,
for s in samples:
print "%4s" % str(s),
print
for c in conditions:
print "%*s" % (condition_size, str(c)),
if cumulative:
freqs = list(self[c]._cumulative_frequencies(samples))
else:
freqs = [self[c][sample] for sample in samples]
for f in freqs:
print "%4d" % f,
print
def __le__(self, other):
if not isinstance(other, ConditionalFreqDist): return False
return set(self.conditions()).issubset(other.conditions()) \
and all(self[c] <= other[c] for c in self.conditions())
def __lt__(self, other):
if not isinstance(other, ConditionalFreqDist): return False
return self <= other and self != other
def __ge__(self, other):
if not isinstance(other, ConditionalFreqDist): return False
return other <= self
def __gt__(self, other):
if not isinstance(other, ConditionalFreqDist): return False
return other < self
def __repr__(self):
"""
Return a string representation of this ``ConditionalFreqDist``.
:rtype: str
"""
return '<ConditionalFreqDist with %d conditions>' % len(self)
class ConditionalProbDistI(defaultdict):
"""
A collection of probability distributions for a single experiment
run under different conditions. Conditional probability
distributions are used to estimate the likelihood of each sample,
given the condition under which the experiment was run. For
example, a conditional probability distribution could be used to
estimate the probability of each word type in a document, given
the length of the word type. Formally, a conditional probability
distribution can be defined as a function that maps from each
condition to the ``ProbDist`` for the experiment under that
condition.
"""
def __init__(self):
raise NotImplementedError("Interfaces can't be instantiated")
def conditions(self):
"""
Return a list of the conditions that are represented by
this ``ConditionalProbDist``. Use the indexing operator to
access the probability distribution for a given condition.
:rtype: list
"""
return self.keys()
def __repr__(self):
"""
Return a string representation of this ``ConditionalProbDist``.
:rtype: str
"""
return '<%s with %d conditions>' % (type(self).__name__, len(self))
class ConditionalProbDist(ConditionalProbDistI):
"""
A conditional probability distribution modelling the experiments
that were used to generate a conditional frequency distribution.
A ConditionalProbDist is constructed from a
``ConditionalFreqDist`` and a ``ProbDist`` factory:
- The ``ConditionalFreqDist`` specifies the frequency
distribution for each condition.
- The ``ProbDist`` factory is a function that takes a
condition's frequency distribution, and returns its
probability distribution. A ``ProbDist`` class's name (such as
``MLEProbDist`` or ``HeldoutProbDist``) can be used to specify
that class's constructor.
The first argument to the ``ProbDist`` factory is the frequency
distribution that it should model; and the remaining arguments are
specified by the ``factory_args`` parameter to the
``ConditionalProbDist`` constructor. For example, the following
code constructs a ``ConditionalProbDist``, where the probability
distribution for each condition is an ``ELEProbDist`` with 10 bins:
>>> from nltk.probability import ConditionalProbDist, ELEProbDist
>>> cpdist = ConditionalProbDist(cfdist, ELEProbDist, 10)
>>> print cpdist['run'].max()
'NN'
>>> print cpdist['run'].prob('NN')
0.0813
"""
def __init__(self, cfdist, probdist_factory,
*factory_args, **factory_kw_args):
"""
Construct a new conditional probability distribution, based on
the given conditional frequency distribution and ``ProbDist``
factory.
:type cfdist: ConditionalFreqDist
:param cfdist: The ``ConditionalFreqDist`` specifying the
frequency distribution for each condition.
:type probdist_factory: class or function
:param probdist_factory: The function or class that maps
a condition's frequency distribution to its probability
distribution. The function is called with the frequency
distribution as its first argument,
``factory_args`` as its remaining arguments, and
``factory_kw_args`` as keyword arguments.
:type factory_args: (any)
:param factory_args: Extra arguments for ``probdist_factory``.
These arguments are usually used to specify extra
properties for the probability distributions of individual
conditions, such as the number of bins they contain.
:type factory_kw_args: (any)
:param factory_kw_args: Extra keyword arguments for ``probdist_factory``.
"""
# self._probdist_factory = probdist_factory
# self._cfdist = cfdist
# self._factory_args = factory_args
# self._factory_kw_args = factory_kw_args
factory = lambda: probdist_factory(FreqDist(),
*factory_args, **factory_kw_args)
defaultdict.__init__(self, factory)
for condition in cfdist:
self[condition] = probdist_factory(cfdist[condition],
*factory_args, **factory_kw_args)
class DictionaryConditionalProbDist(ConditionalProbDistI):
"""
An alternative ConditionalProbDist that simply wraps a dictionary of
ProbDists rather than creating these from FreqDists.
"""
def __init__(self, probdist_dict):
"""
:param probdist_dict: a dictionary containing the probdists indexed
by the conditions
:type probdist_dict: dict any -> probdist
"""
defaultdict.__init__(self, DictionaryProbDist)
self.update(probdist_dict)
##//////////////////////////////////////////////////////
## Adding in log-space.
##//////////////////////////////////////////////////////
# If the difference is bigger than this, then just take the bigger one:
_ADD_LOGS_MAX_DIFF = math.log(1e-30, 2)
def add_logs(logx, logy):
"""
Given two numbers ``logx`` = *log(x)* and ``logy`` = *log(y)*, return
*log(x+y)*. Conceptually, this is the same as returning
``log(2**(logx)+2**(logy))``, but the actual implementation
avoids overflow errors that could result from direct computation.
"""
if (logx < logy + _ADD_LOGS_MAX_DIFF):
return logy
if (logy < logx + _ADD_LOGS_MAX_DIFF):
return logx
base = min(logx, logy)
return base + math.log(2**(logx-base) + 2**(logy-base), 2)
def sum_logs(logs):
if len(logs) == 0:
# Use some approximation to infinity. What this does
# depends on your system's float implementation.
return _NINF
else:
return reduce(add_logs, logs[1:], logs[0])
##//////////////////////////////////////////////////////
## Probabilistic Mix-in
##//////////////////////////////////////////////////////
class ProbabilisticMixIn(object):
"""
A mix-in class to associate probabilities with other classes
(trees, rules, etc.). To use the ``ProbabilisticMixIn`` class,
define a new class that derives from an existing class and from
ProbabilisticMixIn. You will need to define a new constructor for
the new class, which explicitly calls the constructors of both its
parent classes. For example:
>>> from nltk.probability import ProbabilisticMixIn
>>> class A:
... def __init__(self, x, y): self.data = (x,y)
...
>>> class ProbabilisticA(A, ProbabilisticMixIn):
... def __init__(self, x, y, **prob_kwarg):
... A.__init__(self, x, y)
... ProbabilisticMixIn.__init__(self, **prob_kwarg)
See the documentation for the ProbabilisticMixIn
``constructor<__init__>`` for information about the arguments it
expects.
You should generally also redefine the string representation
methods, the comparison methods, and the hashing method.
"""
def __init__(self, **kwargs):
"""
Initialize this object's probability. This initializer should
be called by subclass constructors. ``prob`` should generally be
the first argument for those constructors.
:param prob: The probability associated with the object.
:type prob: float
:param logprob: The log of the probability associated with
the object.
:type logprob: float
"""
if 'prob' in kwargs:
if 'logprob' in kwargs:
raise TypeError('Must specify either prob or logprob '
'(not both)')
else:
ProbabilisticMixIn.set_prob(self, kwargs['prob'])
elif 'logprob' in kwargs:
ProbabilisticMixIn.set_logprob(self, kwargs['logprob'])
else:
self.__prob = self.__logprob = None
def set_prob(self, prob):
"""
Set the probability associated with this object to ``prob``.
:param prob: The new probability
:type prob: float
"""
self.__prob = prob
self.__logprob = None
def set_logprob(self, logprob):
"""
Set the log probability associated with this object to
``logprob``. I.e., set the probability associated with this
object to ``2**(logprob)``.
:param logprob: The new log probability
:type logprob: float
"""
self.__logprob = logprob
self.__prob = None
def prob(self):
"""
Return the probability associated with this object.
:rtype: float
"""
if self.__prob is None:
if self.__logprob is None: return None
self.__prob = 2**(self.__logprob)
return self.__prob
def logprob(self):
"""
Return ``log(p)``, where ``p`` is the probability associated
with this object.
:rtype: float
"""
if self.__logprob is None:
if self.__prob is None: return None
self.__logprob = math.log(self.__prob, 2)
return self.__logprob
class ImmutableProbabilisticMixIn(ProbabilisticMixIn):
def set_prob(self, prob):
raise ValueError, '%s is immutable' % self.__class__.__name__
def set_logprob(self, prob):
raise ValueError, '%s is immutable' % self.__class__.__name__
## Helper function for processing keyword arguments
def _get_kwarg(kwargs, key, default):
if key in kwargs:
arg = kwargs[key]
del kwargs[key]
else:
arg = default
return arg
##//////////////////////////////////////////////////////
## Demonstration
##//////////////////////////////////////////////////////
def _create_rand_fdist(numsamples, numoutcomes):
"""
Create a new frequency distribution, with random samples. The
samples are numbers from 1 to ``numsamples``, and are generated by
summing two numbers, each of which has a uniform distribution.
"""
import random
fdist = FreqDist()
for x in range(numoutcomes):
y = (random.randint(1, (1+numsamples)/2) +
random.randint(0, numsamples/2))
fdist.inc(y)
return fdist
def _create_sum_pdist(numsamples):
"""
Return the true probability distribution for the experiment
``_create_rand_fdist(numsamples, x)``.
"""
fdist = FreqDist()
for x in range(1, (1+numsamples)/2+1):
for y in range(0, numsamples/2+1):
fdist.inc(x+y)
return MLEProbDist(fdist)
def demo(numsamples=6, numoutcomes=500):
"""
A demonstration of frequency distributions and probability
distributions. This demonstration creates three frequency
distributions with, and uses them to sample a random process with
``numsamples`` samples. Each frequency distribution is sampled
``numoutcomes`` times. These three frequency distributions are
then used to build six probability distributions. Finally, the
probability estimates of these distributions are compared to the
actual probability of each sample.
:type numsamples: int
:param numsamples: The number of samples to use in each demo
frequency distributions.
:type numoutcomes: int
:param numoutcomes: The total number of outcomes for each
demo frequency distribution. These outcomes are divided into
``numsamples`` bins.
:rtype: None
"""
# Randomly sample a stochastic process three times.
fdist1 = _create_rand_fdist(numsamples, numoutcomes)
fdist2 = _create_rand_fdist(numsamples, numoutcomes)
fdist3 = _create_rand_fdist(numsamples, numoutcomes)
# Use our samples to create probability distributions.
pdists = [
MLEProbDist(fdist1),
LidstoneProbDist(fdist1, 0.5, numsamples),
HeldoutProbDist(fdist1, fdist2, numsamples),
HeldoutProbDist(fdist2, fdist1, numsamples),
CrossValidationProbDist([fdist1, fdist2, fdist3], numsamples),
GoodTuringProbDist(fdist1),
SimpleGoodTuringProbDist(fdist1),
SimpleGoodTuringProbDist(fdist1, 7),
_create_sum_pdist(numsamples),
]
# Find the probability of each sample.
vals = []
for n in range(1,numsamples+1):
vals.append(tuple([n, fdist1.freq(n)] +
[pdist.prob(n) for pdist in pdists]))
# Print the results in a formatted table.
print ('%d samples (1-%d); %d outcomes were sampled for each FreqDist' %
(numsamples, numsamples, numoutcomes))
print '='*9*(len(pdists)+2)
FORMATSTR = ' FreqDist '+ '%8s '*(len(pdists)-1) + '| Actual'
print FORMATSTR % tuple(`pdist`[1:9] for pdist in pdists[:-1])
print '-'*9*(len(pdists)+2)
FORMATSTR = '%3d %8.6f ' + '%8.6f '*(len(pdists)-1) + '| %8.6f'
for val in vals:
print FORMATSTR % val
# Print the totals for each column (should all be 1.0)
zvals = zip(*vals)
def sum(lst): return reduce(lambda x,y:x+y, lst, 0)
sums = [sum(val) for val in zvals[1:]]
print '-'*9*(len(pdists)+2)
FORMATSTR = 'Total ' + '%8.6f '*(len(pdists)) + '| %8.6f'
print FORMATSTR % tuple(sums)
print '='*9*(len(pdists)+2)
# Display the distributions themselves, if they're short enough.
if len(`str(fdist1)`) < 70:
print ' fdist1:', str(fdist1)
print ' fdist2:', str(fdist2)
print ' fdist3:', str(fdist3)
print
print 'Generating:'
for pdist in pdists:
fdist = FreqDist(pdist.generate() for i in range(5000))
print '%20s %s' % (pdist.__class__.__name__[:20], str(fdist)[:55])
print
def gt_demo():
from nltk import corpus
emma_words = corpus.gutenberg.words('austen-emma.txt')
fd = FreqDist(emma_words)
gt = GoodTuringProbDist(fd)
sgt = SimpleGoodTuringProbDist(fd)
katz = SimpleGoodTuringProbDist(fd, 7)
print '%18s %8s %12s %14s %12s' \
% ("word", "freqency", "GoodTuring", "SimpleGoodTuring", "Katz-cutoff" )
for key in fd:
print '%18s %8d %12e %14e %12e' \
% (key, fd[key], gt.prob(key), sgt.prob(key), katz.prob(key))
if __name__ == '__main__':
demo(6, 10)
demo(5, 5000)
gt_demo()
__all__ = ['ConditionalFreqDist', 'ConditionalProbDist',
'ConditionalProbDistI', 'CrossValidationProbDist',
'DictionaryConditionalProbDist', 'DictionaryProbDist', 'ELEProbDist',
'FreqDist', 'GoodTuringProbDist', 'SimpleGoodTuringProbDist', 'HeldoutProbDist',
'ImmutableProbabilisticMixIn', 'LaplaceProbDist', 'LidstoneProbDist',
'MLEProbDist', 'MutableProbDist', 'ProbDistI', 'ProbabilisticMixIn',
'UniformProbDist', 'WittenBellProbDist', 'add_logs',
'log_likelihood', 'sum_logs', 'entropy']
| agpl-3.0 |
xuxiao19910803/edx | cms/djangoapps/contentstore/views/tests/test_container_page.py | 158 | 9232 | """
Unit tests for the container page.
"""
import re
import datetime
from pytz import UTC
from mock import patch, Mock
from django.http import Http404
from django.test.client import RequestFactory
from django.utils import http
import contentstore.views.component as views
from contentstore.views.tests.utils import StudioPageTestCase
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import ItemFactory
class ContainerPageTestCase(StudioPageTestCase):
"""
Unit tests for the container page.
"""
container_view = 'container_preview'
reorderable_child_view = 'reorderable_container_child_preview'
def setUp(self):
super(ContainerPageTestCase, self).setUp()
self.vertical = self._create_item(self.sequential.location, 'vertical', 'Unit')
self.html = self._create_item(self.vertical.location, "html", "HTML")
self.child_container = self._create_item(self.vertical.location, 'split_test', 'Split Test')
self.child_vertical = self._create_item(self.child_container.location, 'vertical', 'Child Vertical')
self.video = self._create_item(self.child_vertical.location, "video", "My Video")
self.store = modulestore()
past = datetime.datetime(1970, 1, 1, tzinfo=UTC)
future = datetime.datetime.now(UTC) + datetime.timedelta(days=1)
self.released_private_vertical = self._create_item(
parent_location=self.sequential.location, category='vertical', display_name='Released Private Unit',
start=past)
self.unreleased_private_vertical = self._create_item(
parent_location=self.sequential.location, category='vertical', display_name='Unreleased Private Unit',
start=future)
self.released_public_vertical = self._create_item(
parent_location=self.sequential.location, category='vertical', display_name='Released Public Unit',
start=past)
self.unreleased_public_vertical = self._create_item(
parent_location=self.sequential.location, category='vertical', display_name='Unreleased Public Unit',
start=future)
self.store.publish(self.unreleased_public_vertical.location, self.user.id)
self.store.publish(self.released_public_vertical.location, self.user.id)
def test_container_html(self):
self._test_html_content(
self.child_container,
expected_section_tag=(
'<section class="wrapper-xblock level-page is-hidden studio-xblock-wrapper" '
'data-locator="{0}" data-course-key="{0.course_key}">'.format(self.child_container.location)
),
expected_breadcrumbs=(
r'<a href="/course/{course}{section_parameters}" class="{classes}">\s*Week 1\s*</a>\s*'
r'<a href="/course/{course}{subsection_parameters}" class="{classes}">\s*Lesson 1\s*</a>\s*'
r'<a href="/container/{unit}" class="{classes}">\s*Unit\s*</a>'
).format(
course=re.escape(unicode(self.course.id)),
unit=re.escape(unicode(self.vertical.location)),
classes='navigation-item navigation-link navigation-parent',
section_parameters=re.escape(u'?show={}'.format(http.urlquote(self.chapter.location))),
subsection_parameters=re.escape(u'?show={}'.format(http.urlquote(self.sequential.location))),
),
)
def test_container_on_container_html(self):
"""
Create the scenario of an xblock with children (non-vertical) on the container page.
This should create a container page that is a child of another container page.
"""
draft_container = self._create_item(self.child_container.location, "wrapper", "Wrapper")
self._create_item(draft_container.location, "html", "Child HTML")
def test_container_html(xblock):
self._test_html_content(
xblock,
expected_section_tag=(
'<section class="wrapper-xblock level-page is-hidden studio-xblock-wrapper" '
'data-locator="{0}" data-course-key="{0.course_key}">'.format(draft_container.location)
),
expected_breadcrumbs=(
r'<a href="/course/{course}{section_parameters}" class="{classes}">\s*Week 1\s*</a>\s*'
r'<a href="/course/{course}{subsection_parameters}" class="{classes}">\s*Lesson 1\s*</a>\s*'
r'<a href="/container/{unit}" class="{classes}">\s*Unit\s*</a>\s*'
r'<a href="/container/{split_test}" class="{classes}">\s*Split Test\s*</a>'
).format(
course=re.escape(unicode(self.course.id)),
unit=re.escape(unicode(self.vertical.location)),
split_test=re.escape(unicode(self.child_container.location)),
classes='navigation-item navigation-link navigation-parent',
section_parameters=re.escape(u'?show={}'.format(http.urlquote(self.chapter.location))),
subsection_parameters=re.escape(u'?show={}'.format(http.urlquote(self.sequential.location))),
),
)
# Test the draft version of the container
test_container_html(draft_container)
# Now publish the unit and validate again
self.store.publish(self.vertical.location, self.user.id)
draft_container = self.store.get_item(draft_container.location)
test_container_html(draft_container)
def _test_html_content(self, xblock, expected_section_tag, expected_breadcrumbs):
"""
Get the HTML for a container page and verify the section tag is correct
and the breadcrumbs trail is correct.
"""
html = self.get_page_html(xblock)
self.assertIn(expected_section_tag, html)
self.assertRegexpMatches(html, expected_breadcrumbs)
def test_public_container_preview_html(self):
"""
Verify that a public xblock's container preview returns the expected HTML.
"""
published_unit = self.store.publish(self.vertical.location, self.user.id)
published_child_container = self.store.get_item(self.child_container.location)
published_child_vertical = self.store.get_item(self.child_vertical.location)
self.validate_preview_html(published_unit, self.container_view)
self.validate_preview_html(published_child_container, self.container_view)
self.validate_preview_html(published_child_vertical, self.reorderable_child_view)
def test_draft_container_preview_html(self):
"""
Verify that a draft xblock's container preview returns the expected HTML.
"""
self.validate_preview_html(self.vertical, self.container_view)
self.validate_preview_html(self.child_container, self.container_view)
self.validate_preview_html(self.child_vertical, self.reorderable_child_view)
def _create_item(self, parent_location, category, display_name, **kwargs):
"""
creates an item in the module store, without publishing it.
"""
return ItemFactory.create(
parent_location=parent_location,
category=category,
display_name=display_name,
publish_item=False,
user_id=self.user.id,
**kwargs
)
def test_public_child_container_preview_html(self):
"""
Verify that a public container rendered as a child of the container page returns the expected HTML.
"""
empty_child_container = self._create_item(self.vertical.location, 'split_test', 'Split Test')
published_empty_child_container = self.store.publish(empty_child_container.location, self.user.id)
self.validate_preview_html(published_empty_child_container, self.reorderable_child_view, can_add=False)
def test_draft_child_container_preview_html(self):
"""
Verify that a draft container rendered as a child of the container page returns the expected HTML.
"""
empty_child_container = self._create_item(self.vertical.location, 'split_test', 'Split Test')
self.validate_preview_html(empty_child_container, self.reorderable_child_view, can_add=False)
@patch('contentstore.views.component.render_to_response', Mock(return_value=Mock(status_code=200, content='')))
def test_container_page_with_valid_and_invalid_usage_key_string(self):
"""
Check that invalid 'usage_key_string' raises Http404.
"""
request = RequestFactory().get('foo')
request.user = self.user
# Check for invalid 'usage_key_strings'
self.assertRaises(
Http404, views.container_handler,
request,
usage_key_string='i4x://InvalidOrg/InvalidCourse/vertical/static/InvalidContent',
)
# Check 200 response if 'usage_key_string' is correct
response = views.container_handler(
request=request,
usage_key_string=unicode(self.vertical.location)
)
self.assertEqual(response.status_code, 200)
| agpl-3.0 |
Endika/OpenUpgrade | openerp/report/render/rml2html/__init__.py | 381 | 1081 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from rml2html import parseString
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jamespcole/home-assistant | homeassistant/components/arlo/alarm_control_panel.py | 1 | 4381 | """Support for Arlo Alarm Control Panels."""
import logging
import voluptuous as vol
from homeassistant.components.alarm_control_panel import (
PLATFORM_SCHEMA, AlarmControlPanel)
from homeassistant.const import (
ATTR_ATTRIBUTION, STATE_ALARM_ARMED_AWAY, STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT, STATE_ALARM_DISARMED)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import ATTRIBUTION, DATA_ARLO, SIGNAL_UPDATE_ARLO
_LOGGER = logging.getLogger(__name__)
ARMED = 'armed'
CONF_HOME_MODE_NAME = 'home_mode_name'
CONF_AWAY_MODE_NAME = 'away_mode_name'
CONF_NIGHT_MODE_NAME = 'night_mode_name'
DEPENDENCIES = ['arlo']
DISARMED = 'disarmed'
ICON = 'mdi:security'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOME_MODE_NAME, default=ARMED): cv.string,
vol.Optional(CONF_AWAY_MODE_NAME, default=ARMED): cv.string,
vol.Optional(CONF_NIGHT_MODE_NAME, default=ARMED): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Arlo Alarm Control Panels."""
arlo = hass.data[DATA_ARLO]
if not arlo.base_stations:
return
home_mode_name = config.get(CONF_HOME_MODE_NAME)
away_mode_name = config.get(CONF_AWAY_MODE_NAME)
night_mode_name = config.get(CONF_NIGHT_MODE_NAME)
base_stations = []
for base_station in arlo.base_stations:
base_stations.append(ArloBaseStation(base_station, home_mode_name,
away_mode_name, night_mode_name))
add_entities(base_stations, True)
class ArloBaseStation(AlarmControlPanel):
"""Representation of an Arlo Alarm Control Panel."""
def __init__(self, data, home_mode_name, away_mode_name, night_mode_name):
"""Initialize the alarm control panel."""
self._base_station = data
self._home_mode_name = home_mode_name
self._away_mode_name = away_mode_name
self._night_mode_name = night_mode_name
self._state = None
@property
def icon(self):
"""Return icon."""
return ICON
async def async_added_to_hass(self):
"""Register callbacks."""
async_dispatcher_connect(
self.hass, SIGNAL_UPDATE_ARLO, self._update_callback)
@callback
def _update_callback(self):
"""Call update method."""
self.async_schedule_update_ha_state(True)
@property
def state(self):
"""Return the state of the device."""
return self._state
def update(self):
"""Update the state of the device."""
_LOGGER.debug("Updating Arlo Alarm Control Panel %s", self.name)
mode = self._base_station.mode
if mode:
self._state = self._get_state_from_mode(mode)
else:
self._state = None
async def async_alarm_disarm(self, code=None):
"""Send disarm command."""
self._base_station.mode = DISARMED
async def async_alarm_arm_away(self, code=None):
"""Send arm away command. Uses custom mode."""
self._base_station.mode = self._away_mode_name
async def async_alarm_arm_home(self, code=None):
"""Send arm home command. Uses custom mode."""
self._base_station.mode = self._home_mode_name
async def async_alarm_arm_night(self, code=None):
"""Send arm night command. Uses custom mode."""
self._base_station.mode = self._night_mode_name
@property
def name(self):
"""Return the name of the base station."""
return self._base_station.name
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
'device_id': self._base_station.device_id
}
def _get_state_from_mode(self, mode):
"""Convert Arlo mode to Home Assistant state."""
if mode == ARMED:
return STATE_ALARM_ARMED_AWAY
if mode == DISARMED:
return STATE_ALARM_DISARMED
if mode == self._home_mode_name:
return STATE_ALARM_ARMED_HOME
if mode == self._away_mode_name:
return STATE_ALARM_ARMED_AWAY
if mode == self._night_mode_name:
return STATE_ALARM_ARMED_NIGHT
return mode
| apache-2.0 |
google/physical-web | web-service/handlers.py | 1 | 3651 | #!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from urllib import unquote_plus
import helpers
import json
import logging
import models
import webapp2
################################################################################
class Index(webapp2.RequestHandler):
def get(self):
self.response.out.write('')
def head(self):
pass
################################################################################
class GoUrl(webapp2.RequestHandler):
def get(self):
return self._redirect()
def head(self):
return self._redirect()
def _redirect(self):
url = self.request.get('url')
url = url.encode('ascii', 'ignore')
self.redirect(url)
################################################################################
class RefreshUrl(webapp2.RequestHandler):
def post(self):
url = self.request.get('url')
helpers.RefreshUrl(url)
################################################################################
class FaviconUrl(webapp2.RequestHandler):
def get(self):
url = unquote_plus(self.request.get('url'))
response = helpers.FaviconUrl(url)
if response:
self.response.headers['Content-Type'] = response.headers['Content-Type']
self.response.write(response.content)
else:
self.error('404')
################################################################################
class ResolveScan(webapp2.RequestHandler):
def post(self):
input_data = self.request.body
try:
input_object = json.loads(input_data) # TODO: Data is not sanitised.
objects = input_object.get('objects', [])
secure_only = bool(input_object.get('secureOnly', helpers.DEFAULT_SECURE_ONLY))
except:
objects = []
secure_only = helpers.DEFAULT_SECURE_ONLY
output = helpers.BuildResponse(objects, secure_only)
self.response.headers['Content-Type'] = 'application/json'
json_data = json.dumps(output);
self.response.write(json_data)
################################################################################
class DemoMetadata(webapp2.RequestHandler):
def get(self):
objects = [
{'url': 'http://www.caltrain.com/schedules/realtime/stations/mountainviewstation-mobile.html'},
{'url': 'http://benfry.com/distellamap/'},
{'url': 'http://en.wikipedia.org/wiki/Le_D%C3%A9jeuner_sur_l%E2%80%99herbe'},
{'url': 'http://sfmoma.org'}
]
output = helpers.BuildResponse(objects)
self.response.headers['Content-Type'] = 'application/json'
json_data = json.dumps(output);
self.response.write(json_data)
def head(self):
pass
################################################################################
app = webapp2.WSGIApplication([
('/', Index),
('/resolve-scan', ResolveScan),
('/refresh-url', RefreshUrl),
('/favicon', FaviconUrl),
('/go', GoUrl),
('/demo', DemoMetadata)
], debug=True)
| apache-2.0 |
andrewmoses/ssquiz | flask/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py | 34 | 55274 | # sql/sqltypes.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""SQL specific types.
"""
import datetime as dt
import codecs
from .type_api import TypeEngine, TypeDecorator, to_instance
from .elements import quoted_name, type_coerce, _defer_name
from .. import exc, util, processors
from .base import _bind_or_error, SchemaEventTarget
from . import operators
from .. import event
from ..util import pickle
import decimal
if util.jython:
import array
class _DateAffinity(object):
"""Mixin date/time specific expression adaptations.
Rules are implemented within Date,Time,Interval,DateTime, Numeric,
Integer. Based on http://www.postgresql.org/docs/current/static
/functions-datetime.html.
"""
@property
def _expression_adaptations(self):
raise NotImplementedError()
class Comparator(TypeEngine.Comparator):
_blank_dict = util.immutabledict()
def _adapt_expression(self, op, other_comparator):
othertype = other_comparator.type._type_affinity
return (
op, to_instance(
self.type._expression_adaptations.
get(op, self._blank_dict).
get(othertype, NULLTYPE))
)
comparator_factory = Comparator
class Concatenable(object):
"""A mixin that marks a type as supporting 'concatenation',
typically strings."""
class Comparator(TypeEngine.Comparator):
def _adapt_expression(self, op, other_comparator):
if (op is operators.add and
isinstance(
other_comparator,
(Concatenable.Comparator, NullType.Comparator)
)):
return operators.concat_op, self.expr.type
else:
return op, self.expr.type
comparator_factory = Comparator
class String(Concatenable, TypeEngine):
"""The base for all string and character types.
In SQL, corresponds to VARCHAR. Can also take Python unicode objects
and encode to the database's encoding in bind params (and the reverse for
result sets.)
The `length` field is usually required when the `String` type is
used within a CREATE TABLE statement, as VARCHAR requires a length
on most databases.
"""
__visit_name__ = 'string'
def __init__(self, length=None, collation=None,
convert_unicode=False,
unicode_error=None,
_warn_on_bytestring=False
):
"""
Create a string-holding type.
:param length: optional, a length for the column for use in
DDL and CAST expressions. May be safely omitted if no ``CREATE
TABLE`` will be issued. Certain databases may require a
``length`` for use in DDL, and will raise an exception when
the ``CREATE TABLE`` DDL is issued if a ``VARCHAR``
with no length is included. Whether the value is
interpreted as bytes or characters is database specific.
:param collation: Optional, a column-level collation for
use in DDL and CAST expressions. Renders using the
COLLATE keyword supported by SQLite, MySQL, and Postgresql.
E.g.::
>>> from sqlalchemy import cast, select, String
>>> print select([cast('some string', String(collation='utf8'))])
SELECT CAST(:param_1 AS VARCHAR COLLATE utf8) AS anon_1
.. versionadded:: 0.8 Added support for COLLATE to all
string types.
:param convert_unicode: When set to ``True``, the
:class:`.String` type will assume that
input is to be passed as Python ``unicode`` objects,
and results returned as Python ``unicode`` objects.
If the DBAPI in use does not support Python unicode
(which is fewer and fewer these days), SQLAlchemy
will encode/decode the value, using the
value of the ``encoding`` parameter passed to
:func:`.create_engine` as the encoding.
When using a DBAPI that natively supports Python
unicode objects, this flag generally does not
need to be set. For columns that are explicitly
intended to store non-ASCII data, the :class:`.Unicode`
or :class:`.UnicodeText`
types should be used regardless, which feature
the same behavior of ``convert_unicode`` but
also indicate an underlying column type that
directly supports unicode, such as ``NVARCHAR``.
For the extremely rare case that Python ``unicode``
is to be encoded/decoded by SQLAlchemy on a backend
that does natively support Python ``unicode``,
the value ``force`` can be passed here which will
cause SQLAlchemy's encode/decode services to be
used unconditionally.
:param unicode_error: Optional, a method to use to handle Unicode
conversion errors. Behaves like the ``errors`` keyword argument to
the standard library's ``string.decode()`` functions. This flag
requires that ``convert_unicode`` is set to ``force`` - otherwise,
SQLAlchemy is not guaranteed to handle the task of unicode
conversion. Note that this flag adds significant performance
overhead to row-fetching operations for backends that already
return unicode objects natively (which most DBAPIs do). This
flag should only be used as a last resort for reading
strings from a column with varied or corrupted encodings.
"""
if unicode_error is not None and convert_unicode != 'force':
raise exc.ArgumentError("convert_unicode must be 'force' "
"when unicode_error is set.")
self.length = length
self.collation = collation
self.convert_unicode = convert_unicode
self.unicode_error = unicode_error
self._warn_on_bytestring = _warn_on_bytestring
def literal_processor(self, dialect):
def process(value):
value = value.replace("'", "''")
return "'%s'" % value
return process
def bind_processor(self, dialect):
if self.convert_unicode or dialect.convert_unicode:
if dialect.supports_unicode_binds and \
self.convert_unicode != 'force':
if self._warn_on_bytestring:
def process(value):
if isinstance(value, util.binary_type):
util.warn_limited(
"Unicode type received non-unicode "
"bind param value %r.",
(util.ellipses_string(value),))
return value
return process
else:
return None
else:
encoder = codecs.getencoder(dialect.encoding)
warn_on_bytestring = self._warn_on_bytestring
def process(value):
if isinstance(value, util.text_type):
return encoder(value, self.unicode_error)[0]
elif warn_on_bytestring and value is not None:
util.warn_limited(
"Unicode type received non-unicode bind "
"param value %r.",
(util.ellipses_string(value),))
return value
return process
else:
return None
def result_processor(self, dialect, coltype):
wants_unicode = self.convert_unicode or dialect.convert_unicode
needs_convert = wants_unicode and \
(dialect.returns_unicode_strings is not True or
self.convert_unicode in ('force', 'force_nocheck'))
needs_isinstance = (
needs_convert and
dialect.returns_unicode_strings and
self.convert_unicode != 'force_nocheck'
)
if needs_convert:
to_unicode = processors.to_unicode_processor_factory(
dialect.encoding, self.unicode_error)
if needs_isinstance:
return processors.to_conditional_unicode_processor_factory(
dialect.encoding, self.unicode_error)
else:
return processors.to_unicode_processor_factory(
dialect.encoding, self.unicode_error)
else:
return None
@property
def python_type(self):
if self.convert_unicode:
return util.text_type
else:
return str
def get_dbapi_type(self, dbapi):
return dbapi.STRING
class Text(String):
"""A variably sized string type.
In SQL, usually corresponds to CLOB or TEXT. Can also take Python
unicode objects and encode to the database's encoding in bind
params (and the reverse for result sets.) In general, TEXT objects
do not have a length; while some databases will accept a length
argument here, it will be rejected by others.
"""
__visit_name__ = 'text'
class Unicode(String):
"""A variable length Unicode string type.
The :class:`.Unicode` type is a :class:`.String` subclass
that assumes input and output as Python ``unicode`` data,
and in that regard is equivalent to the usage of the
``convert_unicode`` flag with the :class:`.String` type.
However, unlike plain :class:`.String`, it also implies an
underlying column type that is explicitly supporting of non-ASCII
data, such as ``NVARCHAR`` on Oracle and SQL Server.
This can impact the output of ``CREATE TABLE`` statements
and ``CAST`` functions at the dialect level, and can
also affect the handling of bound parameters in some
specific DBAPI scenarios.
The encoding used by the :class:`.Unicode` type is usually
determined by the DBAPI itself; most modern DBAPIs
feature support for Python ``unicode`` objects as bound
values and result set values, and the encoding should
be configured as detailed in the notes for the target
DBAPI in the :ref:`dialect_toplevel` section.
For those DBAPIs which do not support, or are not configured
to accommodate Python ``unicode`` objects
directly, SQLAlchemy does the encoding and decoding
outside of the DBAPI. The encoding in this scenario
is determined by the ``encoding`` flag passed to
:func:`.create_engine`.
When using the :class:`.Unicode` type, it is only appropriate
to pass Python ``unicode`` objects, and not plain ``str``.
If a plain ``str`` is passed under Python 2, a warning
is emitted. If you notice your application emitting these warnings but
you're not sure of the source of them, the Python
``warnings`` filter, documented at
http://docs.python.org/library/warnings.html,
can be used to turn these warnings into exceptions
which will illustrate a stack trace::
import warnings
warnings.simplefilter('error')
For an application that wishes to pass plain bytestrings
and Python ``unicode`` objects to the ``Unicode`` type
equally, the bytestrings must first be decoded into
unicode. The recipe at :ref:`coerce_to_unicode` illustrates
how this is done.
See also:
:class:`.UnicodeText` - unlengthed textual counterpart
to :class:`.Unicode`.
"""
__visit_name__ = 'unicode'
def __init__(self, length=None, **kwargs):
"""
Create a :class:`.Unicode` object.
Parameters are the same as that of :class:`.String`,
with the exception that ``convert_unicode``
defaults to ``True``.
"""
kwargs.setdefault('convert_unicode', True)
kwargs.setdefault('_warn_on_bytestring', True)
super(Unicode, self).__init__(length=length, **kwargs)
class UnicodeText(Text):
"""An unbounded-length Unicode string type.
See :class:`.Unicode` for details on the unicode
behavior of this object.
Like :class:`.Unicode`, usage the :class:`.UnicodeText` type implies a
unicode-capable type being used on the backend, such as
``NCLOB``, ``NTEXT``.
"""
__visit_name__ = 'unicode_text'
def __init__(self, length=None, **kwargs):
"""
Create a Unicode-converting Text type.
Parameters are the same as that of :class:`.Text`,
with the exception that ``convert_unicode``
defaults to ``True``.
"""
kwargs.setdefault('convert_unicode', True)
kwargs.setdefault('_warn_on_bytestring', True)
super(UnicodeText, self).__init__(length=length, **kwargs)
class Integer(_DateAffinity, TypeEngine):
"""A type for ``int`` integers."""
__visit_name__ = 'integer'
def get_dbapi_type(self, dbapi):
return dbapi.NUMBER
@property
def python_type(self):
return int
def literal_processor(self, dialect):
def process(value):
return str(value)
return process
@util.memoized_property
def _expression_adaptations(self):
# TODO: need a dictionary object that will
# handle operators generically here, this is incomplete
return {
operators.add: {
Date: Date,
Integer: self.__class__,
Numeric: Numeric,
},
operators.mul: {
Interval: Interval,
Integer: self.__class__,
Numeric: Numeric,
},
operators.div: {
Integer: self.__class__,
Numeric: Numeric,
},
operators.truediv: {
Integer: self.__class__,
Numeric: Numeric,
},
operators.sub: {
Integer: self.__class__,
Numeric: Numeric,
},
}
class SmallInteger(Integer):
"""A type for smaller ``int`` integers.
Typically generates a ``SMALLINT`` in DDL, and otherwise acts like
a normal :class:`.Integer` on the Python side.
"""
__visit_name__ = 'small_integer'
class BigInteger(Integer):
"""A type for bigger ``int`` integers.
Typically generates a ``BIGINT`` in DDL, and otherwise acts like
a normal :class:`.Integer` on the Python side.
"""
__visit_name__ = 'big_integer'
class Numeric(_DateAffinity, TypeEngine):
"""A type for fixed precision numbers, such as ``NUMERIC`` or ``DECIMAL``.
This type returns Python ``decimal.Decimal`` objects by default, unless
the :paramref:`.Numeric.asdecimal` flag is set to False, in which case
they are coerced to Python ``float`` objects.
.. note::
The :class:`.Numeric` type is designed to receive data from a database
type that is explicitly known to be a decimal type
(e.g. ``DECIMAL``, ``NUMERIC``, others) and not a floating point
type (e.g. ``FLOAT``, ``REAL``, others).
If the database column on the server is in fact a floating-point type
type, such as ``FLOAT`` or ``REAL``, use the :class:`.Float`
type or a subclass, otherwise numeric coercion between
``float``/``Decimal`` may or may not function as expected.
.. note::
The Python ``decimal.Decimal`` class is generally slow
performing; cPython 3.3 has now switched to use the `cdecimal
<http://pypi.python.org/pypi/cdecimal/>`_ library natively. For
older Python versions, the ``cdecimal`` library can be patched
into any application where it will replace the ``decimal``
library fully, however this needs to be applied globally and
before any other modules have been imported, as follows::
import sys
import cdecimal
sys.modules["decimal"] = cdecimal
Note that the ``cdecimal`` and ``decimal`` libraries are **not
compatible with each other**, so patching ``cdecimal`` at the
global level is the only way it can be used effectively with
various DBAPIs that hardcode to import the ``decimal`` library.
"""
__visit_name__ = 'numeric'
_default_decimal_return_scale = 10
def __init__(self, precision=None, scale=None,
decimal_return_scale=None, asdecimal=True):
"""
Construct a Numeric.
:param precision: the numeric precision for use in DDL ``CREATE
TABLE``.
:param scale: the numeric scale for use in DDL ``CREATE TABLE``.
:param asdecimal: default True. Return whether or not
values should be sent as Python Decimal objects, or
as floats. Different DBAPIs send one or the other based on
datatypes - the Numeric type will ensure that return values
are one or the other across DBAPIs consistently.
:param decimal_return_scale: Default scale to use when converting
from floats to Python decimals. Floating point values will typically
be much longer due to decimal inaccuracy, and most floating point
database types don't have a notion of "scale", so by default the
float type looks for the first ten decimal places when converting.
Specfiying this value will override that length. Types which
do include an explicit ".scale" value, such as the base
:class:`.Numeric` as well as the MySQL float types, will use the
value of ".scale" as the default for decimal_return_scale, if not
otherwise specified.
.. versionadded:: 0.9.0
When using the ``Numeric`` type, care should be taken to ensure
that the asdecimal setting is apppropriate for the DBAPI in use -
when Numeric applies a conversion from Decimal->float or float->
Decimal, this conversion incurs an additional performance overhead
for all result columns received.
DBAPIs that return Decimal natively (e.g. psycopg2) will have
better accuracy and higher performance with a setting of ``True``,
as the native translation to Decimal reduces the amount of floating-
point issues at play, and the Numeric type itself doesn't need
to apply any further conversions. However, another DBAPI which
returns floats natively *will* incur an additional conversion
overhead, and is still subject to floating point data loss - in
which case ``asdecimal=False`` will at least remove the extra
conversion overhead.
"""
self.precision = precision
self.scale = scale
self.decimal_return_scale = decimal_return_scale
self.asdecimal = asdecimal
@property
def _effective_decimal_return_scale(self):
if self.decimal_return_scale is not None:
return self.decimal_return_scale
elif getattr(self, "scale", None) is not None:
return self.scale
else:
return self._default_decimal_return_scale
def get_dbapi_type(self, dbapi):
return dbapi.NUMBER
def literal_processor(self, dialect):
def process(value):
return str(value)
return process
@property
def python_type(self):
if self.asdecimal:
return decimal.Decimal
else:
return float
def bind_processor(self, dialect):
if dialect.supports_native_decimal:
return None
else:
return processors.to_float
def result_processor(self, dialect, coltype):
if self.asdecimal:
if dialect.supports_native_decimal:
# we're a "numeric", DBAPI will give us Decimal directly
return None
else:
util.warn('Dialect %s+%s does *not* support Decimal '
'objects natively, and SQLAlchemy must '
'convert from floating point - rounding '
'errors and other issues may occur. Please '
'consider storing Decimal numbers as strings '
'or integers on this platform for lossless '
'storage.' % (dialect.name, dialect.driver))
# we're a "numeric", DBAPI returns floats, convert.
return processors.to_decimal_processor_factory(
decimal.Decimal,
self.scale if self.scale is not None
else self._default_decimal_return_scale)
else:
if dialect.supports_native_decimal:
return processors.to_float
else:
return None
@util.memoized_property
def _expression_adaptations(self):
return {
operators.mul: {
Interval: Interval,
Numeric: self.__class__,
Integer: self.__class__,
},
operators.div: {
Numeric: self.__class__,
Integer: self.__class__,
},
operators.truediv: {
Numeric: self.__class__,
Integer: self.__class__,
},
operators.add: {
Numeric: self.__class__,
Integer: self.__class__,
},
operators.sub: {
Numeric: self.__class__,
Integer: self.__class__,
}
}
class Float(Numeric):
"""Type representing floating point types, such as ``FLOAT`` or ``REAL``.
This type returns Python ``float`` objects by default, unless the
:paramref:`.Float.asdecimal` flag is set to True, in which case they
are coerced to ``decimal.Decimal`` objects.
.. note::
The :class:`.Float` type is designed to receive data from a database
type that is explicitly known to be a floating point type
(e.g. ``FLOAT``, ``REAL``, others)
and not a decimal type (e.g. ``DECIMAL``, ``NUMERIC``, others).
If the database column on the server is in fact a Numeric
type, such as ``DECIMAL`` or ``NUMERIC``, use the :class:`.Numeric`
type or a subclass, otherwise numeric coercion between
``float``/``Decimal`` may or may not function as expected.
"""
__visit_name__ = 'float'
scale = None
def __init__(self, precision=None, asdecimal=False,
decimal_return_scale=None, **kwargs):
"""
Construct a Float.
:param precision: the numeric precision for use in DDL ``CREATE
TABLE``.
:param asdecimal: the same flag as that of :class:`.Numeric`, but
defaults to ``False``. Note that setting this flag to ``True``
results in floating point conversion.
:param decimal_return_scale: Default scale to use when converting
from floats to Python decimals. Floating point values will typically
be much longer due to decimal inaccuracy, and most floating point
database types don't have a notion of "scale", so by default the
float type looks for the first ten decimal places when converting.
Specfiying this value will override that length. Note that the
MySQL float types, which do include "scale", will use "scale"
as the default for decimal_return_scale, if not otherwise specified.
.. versionadded:: 0.9.0
:param \**kwargs: deprecated. Additional arguments here are ignored
by the default :class:`.Float` type. For database specific
floats that support additional arguments, see that dialect's
documentation for details, such as
:class:`sqlalchemy.dialects.mysql.FLOAT`.
"""
self.precision = precision
self.asdecimal = asdecimal
self.decimal_return_scale = decimal_return_scale
if kwargs:
util.warn_deprecated("Additional keyword arguments "
"passed to Float ignored.")
def result_processor(self, dialect, coltype):
if self.asdecimal:
return processors.to_decimal_processor_factory(
decimal.Decimal,
self._effective_decimal_return_scale)
else:
return None
@util.memoized_property
def _expression_adaptations(self):
return {
operators.mul: {
Interval: Interval,
Numeric: self.__class__,
},
operators.div: {
Numeric: self.__class__,
},
operators.truediv: {
Numeric: self.__class__,
},
operators.add: {
Numeric: self.__class__,
},
operators.sub: {
Numeric: self.__class__,
}
}
class DateTime(_DateAffinity, TypeEngine):
"""A type for ``datetime.datetime()`` objects.
Date and time types return objects from the Python ``datetime``
module. Most DBAPIs have built in support for the datetime
module, with the noted exception of SQLite. In the case of
SQLite, date and time types are stored as strings which are then
converted back to datetime objects when rows are returned.
"""
__visit_name__ = 'datetime'
def __init__(self, timezone=False):
"""Construct a new :class:`.DateTime`.
:param timezone: boolean. If True, and supported by the
backend, will produce 'TIMESTAMP WITH TIMEZONE'. For backends
that don't support timezone aware timestamps, has no
effect.
"""
self.timezone = timezone
def get_dbapi_type(self, dbapi):
return dbapi.DATETIME
@property
def python_type(self):
return dt.datetime
@util.memoized_property
def _expression_adaptations(self):
return {
operators.add: {
Interval: self.__class__,
},
operators.sub: {
Interval: self.__class__,
DateTime: Interval,
},
}
class Date(_DateAffinity, TypeEngine):
"""A type for ``datetime.date()`` objects."""
__visit_name__ = 'date'
def get_dbapi_type(self, dbapi):
return dbapi.DATETIME
@property
def python_type(self):
return dt.date
@util.memoized_property
def _expression_adaptations(self):
return {
operators.add: {
Integer: self.__class__,
Interval: DateTime,
Time: DateTime,
},
operators.sub: {
# date - integer = date
Integer: self.__class__,
# date - date = integer.
Date: Integer,
Interval: DateTime,
# date - datetime = interval,
# this one is not in the PG docs
# but works
DateTime: Interval,
},
}
class Time(_DateAffinity, TypeEngine):
"""A type for ``datetime.time()`` objects."""
__visit_name__ = 'time'
def __init__(self, timezone=False):
self.timezone = timezone
def get_dbapi_type(self, dbapi):
return dbapi.DATETIME
@property
def python_type(self):
return dt.time
@util.memoized_property
def _expression_adaptations(self):
return {
operators.add: {
Date: DateTime,
Interval: self.__class__
},
operators.sub: {
Time: Interval,
Interval: self.__class__,
},
}
class _Binary(TypeEngine):
"""Define base behavior for binary types."""
def __init__(self, length=None):
self.length = length
def literal_processor(self, dialect):
def process(value):
value = value.decode(dialect.encoding).replace("'", "''")
return "'%s'" % value
return process
@property
def python_type(self):
return util.binary_type
# Python 3 - sqlite3 doesn't need the `Binary` conversion
# here, though pg8000 does to indicate "bytea"
def bind_processor(self, dialect):
if dialect.dbapi is None:
return None
DBAPIBinary = dialect.dbapi.Binary
def process(value):
if value is not None:
return DBAPIBinary(value)
else:
return None
return process
# Python 3 has native bytes() type
# both sqlite3 and pg8000 seem to return it,
# psycopg2 as of 2.5 returns 'memoryview'
if util.py2k:
def result_processor(self, dialect, coltype):
if util.jython:
def process(value):
if value is not None:
if isinstance(value, array.array):
return value.tostring()
return str(value)
else:
return None
else:
process = processors.to_str
return process
else:
def result_processor(self, dialect, coltype):
def process(value):
if value is not None:
value = bytes(value)
return value
return process
def coerce_compared_value(self, op, value):
"""See :meth:`.TypeEngine.coerce_compared_value` for a description."""
if isinstance(value, util.string_types):
return self
else:
return super(_Binary, self).coerce_compared_value(op, value)
def get_dbapi_type(self, dbapi):
return dbapi.BINARY
class LargeBinary(_Binary):
"""A type for large binary byte data.
The Binary type generates BLOB or BYTEA when tables are created,
and also converts incoming values using the ``Binary`` callable
provided by each DB-API.
"""
__visit_name__ = 'large_binary'
def __init__(self, length=None):
"""
Construct a LargeBinary type.
:param length: optional, a length for the column for use in
DDL statements, for those BLOB types that accept a length
(i.e. MySQL). It does *not* produce a *lengthed* BINARY/VARBINARY
type - use the BINARY/VARBINARY types specifically for those.
May be safely omitted if no ``CREATE
TABLE`` will be issued. Certain databases may require a
*length* for use in DDL, and will raise an exception when
the ``CREATE TABLE`` DDL is issued.
"""
_Binary.__init__(self, length=length)
class Binary(LargeBinary):
"""Deprecated. Renamed to LargeBinary."""
def __init__(self, *arg, **kw):
util.warn_deprecated('The Binary type has been renamed to '
'LargeBinary.')
LargeBinary.__init__(self, *arg, **kw)
class SchemaType(SchemaEventTarget):
"""Mark a type as possibly requiring schema-level DDL for usage.
Supports types that must be explicitly created/dropped (i.e. PG ENUM type)
as well as types that are complimented by table or schema level
constraints, triggers, and other rules.
:class:`.SchemaType` classes can also be targets for the
:meth:`.DDLEvents.before_parent_attach` and
:meth:`.DDLEvents.after_parent_attach` events, where the events fire off
surrounding the association of the type object with a parent
:class:`.Column`.
.. seealso::
:class:`.Enum`
:class:`.Boolean`
"""
def __init__(self, name=None, schema=None, metadata=None,
inherit_schema=False, quote=None, _create_events=True):
if name is not None:
self.name = quoted_name(name, quote)
else:
self.name = None
self.schema = schema
self.metadata = metadata
self.inherit_schema = inherit_schema
self._create_events = _create_events
if _create_events and self.metadata:
event.listen(
self.metadata,
"before_create",
util.portable_instancemethod(self._on_metadata_create)
)
event.listen(
self.metadata,
"after_drop",
util.portable_instancemethod(self._on_metadata_drop)
)
def _set_parent(self, column):
column._on_table_attach(util.portable_instancemethod(self._set_table))
def _set_table(self, column, table):
if self.inherit_schema:
self.schema = table.schema
if not self._create_events:
return
event.listen(
table,
"before_create",
util.portable_instancemethod(
self._on_table_create)
)
event.listen(
table,
"after_drop",
util.portable_instancemethod(self._on_table_drop)
)
if self.metadata is None:
# TODO: what's the difference between self.metadata
# and table.metadata here ?
event.listen(
table.metadata,
"before_create",
util.portable_instancemethod(self._on_metadata_create)
)
event.listen(
table.metadata,
"after_drop",
util.portable_instancemethod(self._on_metadata_drop)
)
def copy(self, **kw):
return self.adapt(self.__class__, _create_events=True)
def adapt(self, impltype, **kw):
schema = kw.pop('schema', self.schema)
metadata = kw.pop('metadata', self.metadata)
_create_events = kw.pop('_create_events', False)
return impltype(name=self.name,
schema=schema,
inherit_schema=self.inherit_schema,
metadata=metadata,
_create_events=_create_events,
**kw)
@property
def bind(self):
return self.metadata and self.metadata.bind or None
def create(self, bind=None, checkfirst=False):
"""Issue CREATE ddl for this type, if applicable."""
if bind is None:
bind = _bind_or_error(self)
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t.create(bind=bind, checkfirst=checkfirst)
def drop(self, bind=None, checkfirst=False):
"""Issue DROP ddl for this type, if applicable."""
if bind is None:
bind = _bind_or_error(self)
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t.drop(bind=bind, checkfirst=checkfirst)
def _on_table_create(self, target, bind, **kw):
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_table_create(target, bind, **kw)
def _on_table_drop(self, target, bind, **kw):
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_table_drop(target, bind, **kw)
def _on_metadata_create(self, target, bind, **kw):
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_metadata_create(target, bind, **kw)
def _on_metadata_drop(self, target, bind, **kw):
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_metadata_drop(target, bind, **kw)
class Enum(String, SchemaType):
"""Generic Enum Type.
The Enum type provides a set of possible string values which the
column is constrained towards.
By default, uses the backend's native ENUM type if available,
else uses VARCHAR + a CHECK constraint.
.. seealso::
:class:`~.postgresql.ENUM` - PostgreSQL-specific type,
which has additional functionality.
"""
__visit_name__ = 'enum'
def __init__(self, *enums, **kw):
"""Construct an enum.
Keyword arguments which don't apply to a specific backend are ignored
by that backend.
:param \*enums: string or unicode enumeration labels. If unicode
labels are present, the `convert_unicode` flag is auto-enabled.
:param convert_unicode: Enable unicode-aware bind parameter and
result-set processing for this Enum's data. This is set
automatically based on the presence of unicode label strings.
:param metadata: Associate this type directly with a ``MetaData``
object. For types that exist on the target database as an
independent schema construct (Postgresql), this type will be
created and dropped within ``create_all()`` and ``drop_all()``
operations. If the type is not associated with any ``MetaData``
object, it will associate itself with each ``Table`` in which it is
used, and will be created when any of those individual tables are
created, after a check is performed for its existence. The type is
only dropped when ``drop_all()`` is called for that ``Table``
object's metadata, however.
:param name: The name of this type. This is required for Postgresql
and any future supported database which requires an explicitly
named type, or an explicitly named constraint in order to generate
the type and/or a table that uses it.
:param native_enum: Use the database's native ENUM type when
available. Defaults to True. When False, uses VARCHAR + check
constraint for all backends.
:param schema: Schema name of this type. For types that exist on the
target database as an independent schema construct (Postgresql),
this parameter specifies the named schema in which the type is
present.
.. note::
The ``schema`` of the :class:`.Enum` type does not
by default make use of the ``schema`` established on the
owning :class:`.Table`. If this behavior is desired,
set the ``inherit_schema`` flag to ``True``.
:param quote: Set explicit quoting preferences for the type's name.
:param inherit_schema: When ``True``, the "schema" from the owning
:class:`.Table` will be copied to the "schema" attribute of this
:class:`.Enum`, replacing whatever value was passed for the
``schema`` attribute. This also takes effect when using the
:meth:`.Table.tometadata` operation.
.. versionadded:: 0.8
"""
self.enums = enums
self.native_enum = kw.pop('native_enum', True)
convert_unicode = kw.pop('convert_unicode', None)
if convert_unicode is None:
for e in enums:
if isinstance(e, util.text_type):
convert_unicode = True
break
else:
convert_unicode = False
if self.enums:
length = max(len(x) for x in self.enums)
else:
length = 0
String.__init__(self,
length=length,
convert_unicode=convert_unicode,
)
SchemaType.__init__(self, **kw)
def __repr__(self):
return util.generic_repr(self,
additional_kw=[('native_enum', True)],
to_inspect=[Enum, SchemaType],
)
def _should_create_constraint(self, compiler):
return not self.native_enum or \
not compiler.dialect.supports_native_enum
@util.dependencies("sqlalchemy.sql.schema")
def _set_table(self, schema, column, table):
if self.native_enum:
SchemaType._set_table(self, column, table)
e = schema.CheckConstraint(
type_coerce(column, self).in_(self.enums),
name=_defer_name(self.name),
_create_rule=util.portable_instancemethod(
self._should_create_constraint),
_type_bound=True
)
assert e.table is table
def adapt(self, impltype, **kw):
schema = kw.pop('schema', self.schema)
metadata = kw.pop('metadata', self.metadata)
_create_events = kw.pop('_create_events', False)
if issubclass(impltype, Enum):
return impltype(name=self.name,
schema=schema,
metadata=metadata,
convert_unicode=self.convert_unicode,
native_enum=self.native_enum,
inherit_schema=self.inherit_schema,
_create_events=_create_events,
*self.enums,
**kw)
else:
# TODO: why would we be here?
return super(Enum, self).adapt(impltype, **kw)
class PickleType(TypeDecorator):
"""Holds Python objects, which are serialized using pickle.
PickleType builds upon the Binary type to apply Python's
``pickle.dumps()`` to incoming objects, and ``pickle.loads()`` on
the way out, allowing any pickleable Python object to be stored as
a serialized binary field.
To allow ORM change events to propagate for elements associated
with :class:`.PickleType`, see :ref:`mutable_toplevel`.
"""
impl = LargeBinary
def __init__(self, protocol=pickle.HIGHEST_PROTOCOL,
pickler=None, comparator=None):
"""
Construct a PickleType.
:param protocol: defaults to ``pickle.HIGHEST_PROTOCOL``.
:param pickler: defaults to cPickle.pickle or pickle.pickle if
cPickle is not available. May be any object with
pickle-compatible ``dumps` and ``loads`` methods.
:param comparator: a 2-arg callable predicate used
to compare values of this type. If left as ``None``,
the Python "equals" operator is used to compare values.
"""
self.protocol = protocol
self.pickler = pickler or pickle
self.comparator = comparator
super(PickleType, self).__init__()
def __reduce__(self):
return PickleType, (self.protocol,
None,
self.comparator)
def bind_processor(self, dialect):
impl_processor = self.impl.bind_processor(dialect)
dumps = self.pickler.dumps
protocol = self.protocol
if impl_processor:
def process(value):
if value is not None:
value = dumps(value, protocol)
return impl_processor(value)
else:
def process(value):
if value is not None:
value = dumps(value, protocol)
return value
return process
def result_processor(self, dialect, coltype):
impl_processor = self.impl.result_processor(dialect, coltype)
loads = self.pickler.loads
if impl_processor:
def process(value):
value = impl_processor(value)
if value is None:
return None
return loads(value)
else:
def process(value):
if value is None:
return None
return loads(value)
return process
def compare_values(self, x, y):
if self.comparator:
return self.comparator(x, y)
else:
return x == y
class Boolean(TypeEngine, SchemaType):
"""A bool datatype.
Boolean typically uses BOOLEAN or SMALLINT on the DDL side, and on
the Python side deals in ``True`` or ``False``.
"""
__visit_name__ = 'boolean'
def __init__(
self, create_constraint=True, name=None, _create_events=True):
"""Construct a Boolean.
:param create_constraint: defaults to True. If the boolean
is generated as an int/smallint, also create a CHECK constraint
on the table that ensures 1 or 0 as a value.
:param name: if a CHECK constraint is generated, specify
the name of the constraint.
"""
self.create_constraint = create_constraint
self.name = name
self._create_events = _create_events
def _should_create_constraint(self, compiler):
return not compiler.dialect.supports_native_boolean
@util.dependencies("sqlalchemy.sql.schema")
def _set_table(self, schema, column, table):
if not self.create_constraint:
return
e = schema.CheckConstraint(
type_coerce(column, self).in_([0, 1]),
name=_defer_name(self.name),
_create_rule=util.portable_instancemethod(
self._should_create_constraint),
_type_bound=True
)
assert e.table is table
@property
def python_type(self):
return bool
def literal_processor(self, dialect):
if dialect.supports_native_boolean:
def process(value):
return "true" if value else "false"
else:
def process(value):
return str(1 if value else 0)
return process
def bind_processor(self, dialect):
if dialect.supports_native_boolean:
return None
else:
return processors.boolean_to_int
def result_processor(self, dialect, coltype):
if dialect.supports_native_boolean:
return None
else:
return processors.int_to_boolean
class Interval(_DateAffinity, TypeDecorator):
"""A type for ``datetime.timedelta()`` objects.
The Interval type deals with ``datetime.timedelta`` objects. In
PostgreSQL, the native ``INTERVAL`` type is used; for others, the
value is stored as a date which is relative to the "epoch"
(Jan. 1, 1970).
Note that the ``Interval`` type does not currently provide date arithmetic
operations on platforms which do not support interval types natively. Such
operations usually require transformation of both sides of the expression
(such as, conversion of both sides into integer epoch values first) which
currently is a manual procedure (such as via
:attr:`~sqlalchemy.sql.expression.func`).
"""
impl = DateTime
epoch = dt.datetime.utcfromtimestamp(0)
def __init__(self, native=True,
second_precision=None,
day_precision=None):
"""Construct an Interval object.
:param native: when True, use the actual
INTERVAL type provided by the database, if
supported (currently Postgresql, Oracle).
Otherwise, represent the interval data as
an epoch value regardless.
:param second_precision: For native interval types
which support a "fractional seconds precision" parameter,
i.e. Oracle and Postgresql
:param day_precision: for native interval types which
support a "day precision" parameter, i.e. Oracle.
"""
super(Interval, self).__init__()
self.native = native
self.second_precision = second_precision
self.day_precision = day_precision
def adapt(self, cls, **kw):
if self.native and hasattr(cls, '_adapt_from_generic_interval'):
return cls._adapt_from_generic_interval(self, **kw)
else:
return self.__class__(
native=self.native,
second_precision=self.second_precision,
day_precision=self.day_precision,
**kw)
@property
def python_type(self):
return dt.timedelta
def bind_processor(self, dialect):
impl_processor = self.impl.bind_processor(dialect)
epoch = self.epoch
if impl_processor:
def process(value):
if value is not None:
value = epoch + value
return impl_processor(value)
else:
def process(value):
if value is not None:
value = epoch + value
return value
return process
def result_processor(self, dialect, coltype):
impl_processor = self.impl.result_processor(dialect, coltype)
epoch = self.epoch
if impl_processor:
def process(value):
value = impl_processor(value)
if value is None:
return None
return value - epoch
else:
def process(value):
if value is None:
return None
return value - epoch
return process
@util.memoized_property
def _expression_adaptations(self):
return {
operators.add: {
Date: DateTime,
Interval: self.__class__,
DateTime: DateTime,
Time: Time,
},
operators.sub: {
Interval: self.__class__
},
operators.mul: {
Numeric: self.__class__
},
operators.truediv: {
Numeric: self.__class__
},
operators.div: {
Numeric: self.__class__
}
}
@property
def _type_affinity(self):
return Interval
def coerce_compared_value(self, op, value):
"""See :meth:`.TypeEngine.coerce_compared_value` for a description."""
return self.impl.coerce_compared_value(op, value)
class REAL(Float):
"""The SQL REAL type."""
__visit_name__ = 'REAL'
class FLOAT(Float):
"""The SQL FLOAT type."""
__visit_name__ = 'FLOAT'
class NUMERIC(Numeric):
"""The SQL NUMERIC type."""
__visit_name__ = 'NUMERIC'
class DECIMAL(Numeric):
"""The SQL DECIMAL type."""
__visit_name__ = 'DECIMAL'
class INTEGER(Integer):
"""The SQL INT or INTEGER type."""
__visit_name__ = 'INTEGER'
INT = INTEGER
class SMALLINT(SmallInteger):
"""The SQL SMALLINT type."""
__visit_name__ = 'SMALLINT'
class BIGINT(BigInteger):
"""The SQL BIGINT type."""
__visit_name__ = 'BIGINT'
class TIMESTAMP(DateTime):
"""The SQL TIMESTAMP type."""
__visit_name__ = 'TIMESTAMP'
def get_dbapi_type(self, dbapi):
return dbapi.TIMESTAMP
class DATETIME(DateTime):
"""The SQL DATETIME type."""
__visit_name__ = 'DATETIME'
class DATE(Date):
"""The SQL DATE type."""
__visit_name__ = 'DATE'
class TIME(Time):
"""The SQL TIME type."""
__visit_name__ = 'TIME'
class TEXT(Text):
"""The SQL TEXT type."""
__visit_name__ = 'TEXT'
class CLOB(Text):
"""The CLOB type.
This type is found in Oracle and Informix.
"""
__visit_name__ = 'CLOB'
class VARCHAR(String):
"""The SQL VARCHAR type."""
__visit_name__ = 'VARCHAR'
class NVARCHAR(Unicode):
"""The SQL NVARCHAR type."""
__visit_name__ = 'NVARCHAR'
class CHAR(String):
"""The SQL CHAR type."""
__visit_name__ = 'CHAR'
class NCHAR(Unicode):
"""The SQL NCHAR type."""
__visit_name__ = 'NCHAR'
class BLOB(LargeBinary):
"""The SQL BLOB type."""
__visit_name__ = 'BLOB'
class BINARY(_Binary):
"""The SQL BINARY type."""
__visit_name__ = 'BINARY'
class VARBINARY(_Binary):
"""The SQL VARBINARY type."""
__visit_name__ = 'VARBINARY'
class BOOLEAN(Boolean):
"""The SQL BOOLEAN type."""
__visit_name__ = 'BOOLEAN'
class NullType(TypeEngine):
"""An unknown type.
:class:`.NullType` is used as a default type for those cases where
a type cannot be determined, including:
* During table reflection, when the type of a column is not recognized
by the :class:`.Dialect`
* When constructing SQL expressions using plain Python objects of
unknown types (e.g. ``somecolumn == my_special_object``)
* When a new :class:`.Column` is created, and the given type is passed
as ``None`` or is not passed at all.
The :class:`.NullType` can be used within SQL expression invocation
without issue, it just has no behavior either at the expression
construction level or at the bind-parameter/result processing level.
:class:`.NullType` will result in a :exc:`.CompileError` if the compiler
is asked to render the type itself, such as if it is used in a
:func:`.cast` operation or within a schema creation operation such as that
invoked by :meth:`.MetaData.create_all` or the :class:`.CreateTable`
construct.
"""
__visit_name__ = 'null'
_isnull = True
def literal_processor(self, dialect):
def process(value):
return "NULL"
return process
class Comparator(TypeEngine.Comparator):
def _adapt_expression(self, op, other_comparator):
if isinstance(other_comparator, NullType.Comparator) or \
not operators.is_commutative(op):
return op, self.expr.type
else:
return other_comparator._adapt_expression(op, self)
comparator_factory = Comparator
class MatchType(Boolean):
"""Refers to the return type of the MATCH operator.
As the :meth:`.ColumnOperators.match` is probably the most open-ended
operator in generic SQLAlchemy Core, we can't assume the return type
at SQL evaluation time, as MySQL returns a floating point, not a boolean,
and other backends might do something different. So this type
acts as a placeholder, currently subclassing :class:`.Boolean`.
The type allows dialects to inject result-processing functionality
if needed, and on MySQL will return floating-point values.
.. versionadded:: 1.0.0
"""
NULLTYPE = NullType()
BOOLEANTYPE = Boolean()
STRINGTYPE = String()
INTEGERTYPE = Integer()
MATCHTYPE = MatchType()
_type_map = {
int: Integer(),
float: Numeric(),
bool: BOOLEANTYPE,
decimal.Decimal: Numeric(),
dt.date: Date(),
dt.datetime: DateTime(),
dt.time: Time(),
dt.timedelta: Interval(),
util.NoneType: NULLTYPE
}
if util.py3k:
_type_map[bytes] = LargeBinary()
_type_map[str] = Unicode()
else:
_type_map[unicode] = Unicode()
_type_map[str] = String()
# back-assign to type_api
from . import type_api
type_api.BOOLEANTYPE = BOOLEANTYPE
type_api.STRINGTYPE = STRINGTYPE
type_api.INTEGERTYPE = INTEGERTYPE
type_api.NULLTYPE = NULLTYPE
type_api.MATCHTYPE = MATCHTYPE
type_api._type_map = _type_map
TypeEngine.Comparator.BOOLEANTYPE = BOOLEANTYPE
| bsd-3-clause |
redhat-openstack/django | tests/m2m_through/tests.py | 117 | 12908 | from __future__ import absolute_import
from datetime import datetime
from operator import attrgetter
from django.test import TestCase
from .models import (Person, Group, Membership, CustomMembership,
PersonSelfRefM2M, Friendship)
class M2mThroughTests(TestCase):
def setUp(self):
self.bob = Person.objects.create(name='Bob')
self.jim = Person.objects.create(name='Jim')
self.jane = Person.objects.create(name='Jane')
self.rock = Group.objects.create(name='Rock')
self.roll = Group.objects.create(name='Roll')
def test_m2m_through(self):
# We start out by making sure that the Group 'rock' has no members.
self.assertQuerysetEqual(
self.rock.members.all(),
[]
)
# To make Jim a member of Group Rock, simply create a Membership object.
m1 = Membership.objects.create(person=self.jim, group=self.rock)
# We can do the same for Jane and Rock.
m2 = Membership.objects.create(person=self.jane, group=self.rock)
# Let's check to make sure that it worked. Jane and Jim should be members of Rock.
self.assertQuerysetEqual(
self.rock.members.all(), [
'Jane',
'Jim'
],
attrgetter("name")
)
# Now we can add a bunch more Membership objects to test with.
m3 = Membership.objects.create(person=self.bob, group=self.roll)
m4 = Membership.objects.create(person=self.jim, group=self.roll)
m5 = Membership.objects.create(person=self.jane, group=self.roll)
# We can get Jim's Group membership as with any ForeignKey.
self.assertQuerysetEqual(
self.jim.group_set.all(), [
'Rock',
'Roll'
],
attrgetter("name")
)
# Querying the intermediary model works like normal.
self.assertEqual(
repr(Membership.objects.get(person=self.jane, group=self.rock)),
'<Membership: Jane is a member of Rock>'
)
# It's not only get that works. Filter works like normal as well.
self.assertQuerysetEqual(
Membership.objects.filter(person=self.jim), [
'<Membership: Jim is a member of Rock>',
'<Membership: Jim is a member of Roll>'
]
)
self.rock.members.clear()
# Now there will be no members of Rock.
self.assertQuerysetEqual(
self.rock.members.all(),
[]
)
def test_forward_descriptors(self):
# Due to complications with adding via an intermediary model,
# the add method is not provided.
self.assertRaises(AttributeError, lambda: self.rock.members.add(self.bob))
# Create is also disabled as it suffers from the same problems as add.
self.assertRaises(AttributeError, lambda: self.rock.members.create(name='Anne'))
# Remove has similar complications, and is not provided either.
self.assertRaises(AttributeError, lambda: self.rock.members.remove(self.jim))
m1 = Membership.objects.create(person=self.jim, group=self.rock)
m2 = Membership.objects.create(person=self.jane, group=self.rock)
# Here we back up the list of all members of Rock.
backup = list(self.rock.members.all())
# ...and we verify that it has worked.
self.assertEqual(
[p.name for p in backup],
['Jane', 'Jim']
)
# The clear function should still work.
self.rock.members.clear()
# Now there will be no members of Rock.
self.assertQuerysetEqual(
self.rock.members.all(),
[]
)
# Assignment should not work with models specifying a through model for many of
# the same reasons as adding.
self.assertRaises(AttributeError, setattr, self.rock, "members", backup)
# Let's re-save those instances that we've cleared.
m1.save()
m2.save()
# Verifying that those instances were re-saved successfully.
self.assertQuerysetEqual(
self.rock.members.all(),[
'Jane',
'Jim'
],
attrgetter("name")
)
def test_reverse_descriptors(self):
# Due to complications with adding via an intermediary model,
# the add method is not provided.
self.assertRaises(AttributeError, lambda: self.bob.group_set.add(self.rock))
# Create is also disabled as it suffers from the same problems as add.
self.assertRaises(AttributeError, lambda: self.bob.group_set.create(name="funk"))
# Remove has similar complications, and is not provided either.
self.assertRaises(AttributeError, lambda: self.jim.group_set.remove(self.rock))
m1 = Membership.objects.create(person=self.jim, group=self.rock)
m2 = Membership.objects.create(person=self.jim, group=self.roll)
# Here we back up the list of all of Jim's groups.
backup = list(self.jim.group_set.all())
self.assertEqual(
[g.name for g in backup],
['Rock', 'Roll']
)
# The clear function should still work.
self.jim.group_set.clear()
# Now Jim will be in no groups.
self.assertQuerysetEqual(
self.jim.group_set.all(),
[]
)
# Assignment should not work with models specifying a through model for many of
# the same reasons as adding.
self.assertRaises(AttributeError, setattr, self.jim, "group_set", backup)
# Let's re-save those instances that we've cleared.
m1.save()
m2.save()
# Verifying that those instances were re-saved successfully.
self.assertQuerysetEqual(
self.jim.group_set.all(),[
'Rock',
'Roll'
],
attrgetter("name")
)
def test_custom_tests(self):
# Let's see if we can query through our second relationship.
self.assertQuerysetEqual(
self.rock.custom_members.all(),
[]
)
# We can query in the opposite direction as well.
self.assertQuerysetEqual(
self.bob.custom.all(),
[]
)
cm1 = CustomMembership.objects.create(person=self.bob, group=self.rock)
cm2 = CustomMembership.objects.create(person=self.jim, group=self.rock)
# If we get the number of people in Rock, it should be both Bob and Jim.
self.assertQuerysetEqual(
self.rock.custom_members.all(),[
'Bob',
'Jim'
],
attrgetter("name")
)
# Bob should only be in one custom group.
self.assertQuerysetEqual(
self.bob.custom.all(),[
'Rock'
],
attrgetter("name")
)
# Let's make sure our new descriptors don't conflict with the FK related_name.
self.assertQuerysetEqual(
self.bob.custom_person_related_name.all(),[
'<CustomMembership: Bob is a member of Rock>'
]
)
def test_self_referential_tests(self):
# Let's first create a person who has no friends.
tony = PersonSelfRefM2M.objects.create(name="Tony")
self.assertQuerysetEqual(
tony.friends.all(),
[]
)
chris = PersonSelfRefM2M.objects.create(name="Chris")
f = Friendship.objects.create(first=tony, second=chris, date_friended=datetime.now())
# Tony should now show that Chris is his friend.
self.assertQuerysetEqual(
tony.friends.all(),[
'Chris'
],
attrgetter("name")
)
# But we haven't established that Chris is Tony's Friend.
self.assertQuerysetEqual(
chris.friends.all(),
[]
)
f2 = Friendship.objects.create(first=chris, second=tony, date_friended=datetime.now())
# Having added Chris as a friend, let's make sure that his friend set reflects
# that addition.
self.assertQuerysetEqual(
chris.friends.all(),[
'Tony'
],
attrgetter("name")
)
# Chris gets mad and wants to get rid of all of his friends.
chris.friends.clear()
# Now he should not have any more friends.
self.assertQuerysetEqual(
chris.friends.all(),
[]
)
# Since this isn't a symmetrical relation, Tony's friend link still exists.
self.assertQuerysetEqual(
tony.friends.all(),[
'Chris'
],
attrgetter("name")
)
def test_query_tests(self):
m1 = Membership.objects.create(person=self.jim, group=self.rock)
m2 = Membership.objects.create(person=self.jane, group=self.rock)
m3 = Membership.objects.create(person=self.bob, group=self.roll)
m4 = Membership.objects.create(person=self.jim, group=self.roll)
m5 = Membership.objects.create(person=self.jane, group=self.roll)
m2.invite_reason = "She was just awesome."
m2.date_joined = datetime(2006, 1, 1)
m2.save()
m3.date_joined = datetime(2004, 1, 1)
m3.save()
m5.date_joined = datetime(2004, 1, 1)
m5.save()
# We can query for the related model by using its attribute name (members, in
# this case).
self.assertQuerysetEqual(
Group.objects.filter(members__name='Bob'),[
'Roll'
],
attrgetter("name")
)
# To query through the intermediary model, we specify its model name.
# In this case, membership.
self.assertQuerysetEqual(
Group.objects.filter(membership__invite_reason="She was just awesome."),[
'Rock'
],
attrgetter("name")
)
# If we want to query in the reverse direction by the related model, use its
# model name (group, in this case).
self.assertQuerysetEqual(
Person.objects.filter(group__name="Rock"),[
'Jane',
'Jim'
],
attrgetter("name")
)
cm1 = CustomMembership.objects.create(person=self.bob, group=self.rock)
cm2 = CustomMembership.objects.create(person=self.jim, group=self.rock)
# If the m2m field has specified a related_name, using that will work.
self.assertQuerysetEqual(
Person.objects.filter(custom__name="Rock"),[
'Bob',
'Jim'
],
attrgetter("name")
)
# To query through the intermediary model in the reverse direction, we again
# specify its model name (membership, in this case).
self.assertQuerysetEqual(
Person.objects.filter(membership__invite_reason="She was just awesome."),[
'Jane'
],
attrgetter("name")
)
# Let's see all of the groups that Jane joined after 1 Jan 2005:
self.assertQuerysetEqual(
Group.objects.filter(membership__date_joined__gt=datetime(2005, 1, 1), membership__person=self.jane),[
'Rock'
],
attrgetter("name")
)
# Queries also work in the reverse direction: Now let's see all of the people
# that have joined Rock since 1 Jan 2005:
self.assertQuerysetEqual(
Person.objects.filter(membership__date_joined__gt=datetime(2005, 1, 1), membership__group=self.rock),[
'Jane',
'Jim'
],
attrgetter("name")
)
# Conceivably, queries through membership could return correct, but non-unique
# querysets. To demonstrate this, we query for all people who have joined a
# group after 2004:
self.assertQuerysetEqual(
Person.objects.filter(membership__date_joined__gt=datetime(2004, 1, 1)),[
'Jane',
'Jim',
'Jim'
],
attrgetter("name")
)
# Jim showed up twice, because he joined two groups ('Rock', and 'Roll'):
self.assertEqual(
[(m.person.name, m.group.name) for m in Membership.objects.filter(date_joined__gt=datetime(2004, 1, 1))],
[('Jane', 'Rock'), ('Jim', 'Rock'), ('Jim', 'Roll')]
)
# QuerySet's distinct() method can correct this problem.
self.assertQuerysetEqual(
Person.objects.filter(membership__date_joined__gt=datetime(2004, 1, 1)).distinct(),[
'Jane',
'Jim'
],
attrgetter("name")
)
| bsd-3-clause |
3EleVen/kernel_common | tools/perf/scripts/python/syscall-counts.py | 11181 | 1522 | # system call counts
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
Ichag/odoo | addons/account_anglo_saxon/invoice.py | 50 | 13086 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C)
# 2004-2010 Tiny SPRL (<http://tiny.be>).
# 2009-2010 Veritos (http://veritos.nl).
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.tools.float_utils import float_round as round
class account_invoice_line(osv.osv):
_inherit = "account.invoice.line"
_columns = {
'move_id': fields.many2one('stock.move', string="Move line", help="If the invoice was generated from a stock.picking, reference to the related move line."),
}
def move_line_get(self, cr, uid, invoice_id, context=None):
res = super(account_invoice_line,self).move_line_get(cr, uid, invoice_id, context=context)
inv = self.pool.get('account.invoice').browse(cr, uid, invoice_id, context=context)
if inv.type in ('out_invoice','out_refund'):
for i_line in inv.invoice_line:
res.extend(self._anglo_saxon_sale_move_lines(cr, uid, i_line, res, context=context))
elif inv.type in ('in_invoice','in_refund'):
for i_line in inv.invoice_line:
res.extend(self._anglo_saxon_purchase_move_lines(cr, uid, i_line, res, context=context))
return res
def product_id_change(self, cr, uid, ids, product, uom_id, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, currency_id=False, company_id=None, context=None):
fiscal_pool = self.pool.get('account.fiscal.position')
res = super(account_invoice_line, self).product_id_change(cr, uid, ids, product, uom_id, qty, name, type, partner_id, fposition_id, price_unit, currency_id, company_id, context)
if not product:
return res
if type in ('in_invoice','in_refund'):
product_obj = self.pool.get('product.product').browse(cr, uid, product, context=context)
oa = product_obj.property_stock_account_input and product_obj.property_stock_account_input.id
if not oa:
oa = product_obj.categ_id.property_stock_account_input_categ and product_obj.categ_id.property_stock_account_input_categ.id
if oa:
fpos = fposition_id and fiscal_pool.browse(cr, uid, fposition_id, context=context) or False
a = fiscal_pool.map_account(cr, uid, fpos, oa)
res['value'].update({'account_id':a})
return res
def _get_price(self, cr, uid, inv, company_currency, i_line, price_unit):
cur_obj = self.pool.get('res.currency')
decimal_precision = self.pool.get('decimal.precision')
if inv.currency_id.id != company_currency:
price = cur_obj.compute(cr, uid, company_currency, inv.currency_id.id, price_unit * i_line.quantity, context={'date': inv.date_invoice})
else:
price = price_unit * i_line.quantity
return round(price, decimal_precision.precision_get(cr, uid, 'Account'))
def _anglo_saxon_sale_move_lines(self, cr, uid, i_line, res, context=None):
"""Return the additional move lines for sales invoices and refunds.
i_line: An account.invoice.line object.
res: The move line entries produced so far by the parent move_line_get.
"""
inv = i_line.invoice_id
fiscal_pool = self.pool.get('account.fiscal.position')
fpos = inv.fiscal_position or False
company_currency = inv.company_id.currency_id.id
if i_line.product_id.type != 'service' and i_line.product_id.valuation == 'real_time':
# debit account dacc will be the output account
# first check the product, if empty check the category
dacc = i_line.product_id.property_stock_account_output and i_line.product_id.property_stock_account_output.id
if not dacc:
dacc = i_line.product_id.categ_id.property_stock_account_output_categ and i_line.product_id.categ_id.property_stock_account_output_categ.id
# in both cases the credit account cacc will be the expense account
# first check the product, if empty check the category
cacc = i_line.product_id.property_account_expense and i_line.product_id.property_account_expense.id
if not cacc:
cacc = i_line.product_id.categ_id.property_account_expense_categ and i_line.product_id.categ_id.property_account_expense_categ.id
if dacc and cacc:
price_unit = i_line.move_id and i_line.move_id.price_unit or i_line.product_id.standard_price
return [
{
'type':'src',
'name': i_line.name[:64],
'price_unit':price_unit,
'quantity':i_line.quantity,
'price':self._get_price(cr, uid, inv, company_currency, i_line, price_unit),
'account_id':dacc,
'product_id':i_line.product_id.id,
'uos_id':i_line.uos_id.id,
'account_analytic_id': False,
'taxes':i_line.invoice_line_tax_id,
},
{
'type':'src',
'name': i_line.name[:64],
'price_unit':price_unit,
'quantity':i_line.quantity,
'price': -1 * self._get_price(cr, uid, inv, company_currency, i_line, price_unit),
'account_id':fiscal_pool.map_account(cr, uid, fpos, cacc),
'product_id':i_line.product_id.id,
'uos_id':i_line.uos_id.id,
'account_analytic_id': False,
'taxes':i_line.invoice_line_tax_id,
},
]
return []
def _anglo_saxon_purchase_move_lines(self, cr, uid, i_line, res, context=None):
"""Return the additional move lines for purchase invoices and refunds.
i_line: An account.invoice.line object.
res: The move line entries produced so far by the parent move_line_get.
"""
inv = i_line.invoice_id
company_currency = inv.company_id.currency_id.id
if i_line.product_id and i_line.product_id.valuation == 'real_time':
if i_line.product_id.type != 'service':
# get the price difference account at the product
acc = i_line.product_id.property_account_creditor_price_difference and i_line.product_id.property_account_creditor_price_difference.id
if not acc:
# if not found on the product get the price difference account at the category
acc = i_line.product_id.categ_id.property_account_creditor_price_difference_categ and i_line.product_id.categ_id.property_account_creditor_price_difference_categ.id
a = None
# oa will be the stock input account
# first check the product, if empty check the category
oa = i_line.product_id.property_stock_account_input and i_line.product_id.property_stock_account_input.id
if not oa:
oa = i_line.product_id.categ_id.property_stock_account_input_categ and i_line.product_id.categ_id.property_stock_account_input_categ.id
if oa:
# get the fiscal position
fpos = i_line.invoice_id.fiscal_position or False
a = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, oa)
diff_res = []
decimal_precision = self.pool.get('decimal.precision')
account_prec = decimal_precision.precision_get(cr, uid, 'Account')
# calculate and write down the possible price difference between invoice price and product price
for line in res:
if line.get('invl_id', 0) == i_line.id and a == line['account_id']:
uom = i_line.product_id.uos_id or i_line.product_id.uom_id
valuation_price_unit = self.pool.get('product.uom')._compute_price(cr, uid, uom.id, i_line.product_id.standard_price, i_line.uos_id.id)
if i_line.product_id.cost_method != 'standard' and i_line.purchase_line_id:
#for average/fifo/lifo costing method, fetch real cost price from incomming moves
stock_move_obj = self.pool.get('stock.move')
valuation_stock_move = stock_move_obj.search(cr, uid, [('purchase_line_id', '=', i_line.purchase_line_id.id)], limit=1, context=context)
if valuation_stock_move:
valuation_price_unit = stock_move_obj.browse(cr, uid, valuation_stock_move[0], context=context).price_unit
if inv.currency_id.id != company_currency:
valuation_price_unit = self.pool.get('res.currency').compute(cr, uid, company_currency, inv.currency_id.id, valuation_price_unit, context={'date': inv.date_invoice})
if valuation_price_unit != i_line.price_unit and line['price_unit'] == i_line.price_unit and acc:
# price with discount and without tax included
price_unit = self.pool['account.tax'].compute_all(cr, uid, line['taxes'],
i_line.price_unit * (1-(i_line.discount or 0.0)/100.0), line['quantity'])['total']
price_line = round(valuation_price_unit * line['quantity'], account_prec)
price_diff = round(price_unit - price_line, account_prec)
line.update({'price': price_line})
diff_res.append({
'type': 'src',
'name': i_line.name[:64],
'price_unit': round(price_diff / line['quantity'], account_prec),
'quantity': line['quantity'],
'price': price_diff,
'account_id': acc,
'product_id': line['product_id'],
'uos_id': line['uos_id'],
'account_analytic_id': line['account_analytic_id'],
'taxes': line.get('taxes', []),
})
return diff_res
return []
class account_invoice(osv.osv):
_inherit = "account.invoice"
def _prepare_refund(self, cr, uid, invoice, date=None, period_id=None, description=None, journal_id=None, context=None):
invoice_data = super(account_invoice, self)._prepare_refund(cr, uid, invoice, date, period_id,
description, journal_id, context=context)
if invoice.type == 'in_invoice':
fiscal_position = self.pool.get('account.fiscal.position')
for _, _, line_dict in invoice_data['invoice_line']:
if line_dict.get('product_id'):
product = self.pool.get('product.product').browse(cr, uid, line_dict['product_id'], context=context)
counterpart_acct_id = product.property_stock_account_output and \
product.property_stock_account_output.id
if not counterpart_acct_id:
counterpart_acct_id = product.categ_id.property_stock_account_output_categ and \
product.categ_id.property_stock_account_output_categ.id
if counterpart_acct_id:
fpos = invoice.fiscal_position or False
line_dict['account_id'] = fiscal_position.map_account(cr, uid,
fpos,
counterpart_acct_id)
return invoice_data
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
scripnichenko/glance | glance/db/sqlalchemy/migrate_repo/versions/034_add_virtual_size.py | 19 | 1157 | # Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
def upgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
images = sqlalchemy.Table('images', meta, autoload=True)
virtual_size = sqlalchemy.Column('virtual_size',
sqlalchemy.BigInteger)
images.create_column(virtual_size)
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
images = sqlalchemy.Table('images', meta, autoload=True)
images.columns['virtual_size'].drop()
| apache-2.0 |
shlyakpavel/s720-w832-KK-kernel | kernel/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-3.0 |
bukepo/openthread | tests/toranj/test-008-permit-join.py | 9 | 3421 | #!/usr/bin/env python3
#
# Copyright (c) 2018, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import time
import wpan
from wpan import verify
# -----------------------------------------------------------------------------------------------------------------------
# Test description: check wpantund `permit-join` functionality and timeout
test_name = __file__[:-3] if __file__.endswith('.py') else __file__
print('-' * 120)
print('Starting \'{}\''.format(test_name))
# -----------------------------------------------------------------------------------------------------------------------
# Creating `wpan.Nodes` instances
node = wpan.Node()
# -----------------------------------------------------------------------------------------------------------------------
# Init all nodes
wpan.Node.init_all_nodes()
# -----------------------------------------------------------------------------------------------------------------------
# Build network topology
node.form("permit-join-test")
# -----------------------------------------------------------------------------------------------------------------------
# Test implementation
verify(node.get(wpan.WPAN_NETWORK_ALLOW_JOIN) == 'false')
node.permit_join()
verify(node.get(wpan.WPAN_NETWORK_ALLOW_JOIN) == 'true')
node.permit_join('0')
verify(node.get(wpan.WPAN_NETWORK_ALLOW_JOIN) == 'false')
node.permit_join(port='1234')
verify(node.get(wpan.WPAN_NETWORK_ALLOW_JOIN) == 'true')
node.permit_join('0')
verify(node.get(wpan.WPAN_NETWORK_ALLOW_JOIN) == 'false')
# check the timeout
node.permit_join('1')
verify(node.get(wpan.WPAN_NETWORK_ALLOW_JOIN) == 'true')
time.sleep(1.5)
verify(node.get(wpan.WPAN_NETWORK_ALLOW_JOIN) == 'false')
# -----------------------------------------------------------------------------------------------------------------------
# Test finished
wpan.Node.finalize_all_nodes()
print('\'{}\' passed.'.format(test_name))
| bsd-3-clause |
allevin/PyGithub | github/Membership.py | 5 | 5901 | # -*- coding: utf-8 -*-
############################ Copyrights and license ############################
# #
# Copyright 2012 Steve English <steve.english@navetas.com> #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Cameron White <cawhite@pdx.edu> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2013 poulp <mathieu.nerv@gmail.com> #
# Copyright 2014 Tomas Radej <tradej@redhat.com> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 E. Dunham <github@edunham.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2017 Balázs Rostás <rostas.balazs@gmail.com> #
# Copyright 2017 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2017 Simon <spam@esemi.ru> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 bryanhuntesl <31992054+bryanhuntesl@users.noreply.github.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2018 itsbruce <it.is.bruce@gmail.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import github.GithubObject
class Membership(github.GithubObject.CompletableGithubObject):
"""
This class represents Membership of an organization. The reference can be found here http://developer.github.com/v3/orgs/
"""
def __repr__(self):
return self.get__repr__({"url": self._url.value})
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
@property
def state(self):
"""
:type: string
"""
self._completeIfNotSet(self._state)
return self._state.value
@property
def role(self):
"""
:type: string
"""
self._completeIfNotSet(self._role)
return self._role.value
@property
def organization_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._organization_url)
return self._organization_url.value
@property
def organization(self):
"""
:type: :class:`github.Organization.Organization`
"""
self._completeIfNotSet(self._organization)
return self._organization.value
@property
def user(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._user)
return self._user.value
def _initAttributes(self):
self._url = github.GithubObject.NotSet
self._state = github.GithubObject.NotSet
self._role = github.GithubObject.NotSet
self._organization_url = github.GithubObject.NotSet
self._organization = github.GithubObject.NotSet
self._user = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "state" in attributes: # pragma no branch
self._state = self._makeStringAttribute(attributes["state"])
if "role" in attributes: # pragma no branch
self._role = self._makeStringAttribute(attributes["role"])
if "organization_url" in attributes: # pragma no branch
self._organization_url = self._makeStringAttribute(
attributes["organization_url"]
)
if "organization" in attributes: # pragma no branch
self._organization = self._makeClassAttribute(
github.Organization.Organization, attributes["organization"]
)
if "user" in attributes: # pragma no branch
self._user = self._makeClassAttribute(
github.NamedUser.NamedUser, attributes["user"]
)
| lgpl-3.0 |
sacharya/nova | nova/virt/baremetal/base.py | 1 | 2612 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright (c) 2011 University of Southern California / ISI
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.virt.baremetal import baremetal_states
class NodeDriver(object):
def __init__(self, virtapi):
self.virtapi = virtapi
def cache_images(self, context, node, instance, **kwargs):
raise NotImplementedError()
def destroy_images(self, context, node, instance):
raise NotImplementedError()
def activate_bootloader(self, context, node, instance, **kwargs):
raise NotImplementedError()
def deactivate_bootloader(self, context, node, instance):
raise NotImplementedError()
def activate_node(self, context, node, instance):
"""For operations after power on."""
raise NotImplementedError()
def deactivate_node(self, context, node, instance):
"""For operations before power off."""
raise NotImplementedError()
def get_console_output(self, node, instance):
raise NotImplementedError()
def dhcp_options_for_instance(self, instance):
"""Optional override to return the DHCP options to use for instance.
If no DHCP options are needed, this should not be overridden or None
should be returned.
"""
return None
class PowerManager(object):
def __init__(self, **kwargs):
self.state = baremetal_states.DELETED
pass
def activate_node(self):
self.state = baremetal_states.ACTIVE
return self.state
def reboot_node(self):
self.state = baremetal_states.ACTIVE
return self.state
def deactivate_node(self):
self.state = baremetal_states.DELETED
return self.state
def is_power_on(self):
"""Returns True or False according as the node's power state."""
return True
# TODO(NTTdocomo): split out console methods to its own class
def start_console(self):
pass
def stop_console(self):
pass
| apache-2.0 |
TheMutley/openpilot | pyextra/werkzeug/debug/tbtools.py | 104 | 18451 | # -*- coding: utf-8 -*-
"""
werkzeug.debug.tbtools
~~~~~~~~~~~~~~~~~~~~~~
This module provides various traceback related utility functions.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD.
"""
import re
import os
import sys
import json
import inspect
import traceback
import codecs
from tokenize import TokenError
from werkzeug.utils import cached_property, escape
from werkzeug.debug.console import Console
from werkzeug._compat import range_type, PY2, text_type, string_types, \
to_native, to_unicode
from werkzeug.filesystem import get_filesystem_encoding
_coding_re = re.compile(br'coding[:=]\s*([-\w.]+)')
_line_re = re.compile(br'^(.*?)$', re.MULTILINE)
_funcdef_re = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
UTF8_COOKIE = b'\xef\xbb\xbf'
system_exceptions = (SystemExit, KeyboardInterrupt)
try:
system_exceptions += (GeneratorExit,)
except NameError:
pass
HEADER = u'''\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title>%(title)s // Werkzeug Debugger</title>
<link rel="stylesheet" href="?__debugger__=yes&cmd=resource&f=style.css"
type="text/css">
<!-- We need to make sure this has a favicon so that the debugger does
not by accident trigger a request to /favicon.ico which might
change the application state. -->
<link rel="shortcut icon"
href="?__debugger__=yes&cmd=resource&f=console.png">
<script src="?__debugger__=yes&cmd=resource&f=jquery.js"></script>
<script src="?__debugger__=yes&cmd=resource&f=debugger.js"></script>
<script type="text/javascript">
var TRACEBACK = %(traceback_id)d,
CONSOLE_MODE = %(console)s,
EVALEX = %(evalex)s,
EVALEX_TRUSTED = %(evalex_trusted)s,
SECRET = "%(secret)s";
</script>
</head>
<body style="background-color: #fff">
<div class="debugger">
'''
FOOTER = u'''\
<div class="footer">
Brought to you by <strong class="arthur">DON'T PANIC</strong>, your
friendly Werkzeug powered traceback interpreter.
</div>
</div>
<div class="pin-prompt">
<div class="inner">
<h3>Console Locked</h3>
<p>
The console is locked and needs to be unlocked by entering the PIN.
You can find the PIN printed out on the standard output of your
shell that runs the server.
<form>
<p>PIN:
<input type=text name=pin size=14>
<input type=submit name=btn value="Confirm Pin">
</form>
</div>
</div>
</body>
</html>
'''
PAGE_HTML = HEADER + u'''\
<h1>%(exception_type)s</h1>
<div class="detail">
<p class="errormsg">%(exception)s</p>
</div>
<h2 class="traceback">Traceback <em>(most recent call last)</em></h2>
%(summary)s
<div class="plain">
<form action="/?__debugger__=yes&cmd=paste" method="post">
<p>
<input type="hidden" name="language" value="pytb">
This is the Copy/Paste friendly version of the traceback. <span
class="pastemessage">You can also paste this traceback into
a <a href="https://gist.github.com/">gist</a>:
<input type="submit" value="create paste"></span>
</p>
<textarea cols="50" rows="10" name="code" readonly>%(plaintext)s</textarea>
</form>
</div>
<div class="explanation">
The debugger caught an exception in your WSGI application. You can now
look at the traceback which led to the error. <span class="nojavascript">
If you enable JavaScript you can also use additional features such as code
execution (if the evalex feature is enabled), automatic pasting of the
exceptions and much more.</span>
</div>
''' + FOOTER + '''
<!--
%(plaintext_cs)s
-->
'''
CONSOLE_HTML = HEADER + u'''\
<h1>Interactive Console</h1>
<div class="explanation">
In this console you can execute Python expressions in the context of the
application. The initial namespace was created by the debugger automatically.
</div>
<div class="console"><div class="inner">The Console requires JavaScript.</div></div>
''' + FOOTER
SUMMARY_HTML = u'''\
<div class="%(classes)s">
%(title)s
<ul>%(frames)s</ul>
%(description)s
</div>
'''
FRAME_HTML = u'''\
<div class="frame" id="frame-%(id)d">
<h4>File <cite class="filename">"%(filename)s"</cite>,
line <em class="line">%(lineno)s</em>,
in <code class="function">%(function_name)s</code></h4>
<div class="source">%(lines)s</div>
</div>
'''
SOURCE_LINE_HTML = u'''\
<tr class="%(classes)s">
<td class=lineno>%(lineno)s</td>
<td>%(code)s</td>
</tr>
'''
def render_console_html(secret, evalex_trusted=True):
return CONSOLE_HTML % {
'evalex': 'true',
'evalex_trusted': evalex_trusted and 'true' or 'false',
'console': 'true',
'title': 'Console',
'secret': secret,
'traceback_id': -1
}
def get_current_traceback(ignore_system_exceptions=False,
show_hidden_frames=False, skip=0):
"""Get the current exception info as `Traceback` object. Per default
calling this method will reraise system exceptions such as generator exit,
system exit or others. This behavior can be disabled by passing `False`
to the function as first parameter.
"""
exc_type, exc_value, tb = sys.exc_info()
if ignore_system_exceptions and exc_type in system_exceptions:
raise
for x in range_type(skip):
if tb.tb_next is None:
break
tb = tb.tb_next
tb = Traceback(exc_type, exc_value, tb)
if not show_hidden_frames:
tb.filter_hidden_frames()
return tb
class Line(object):
"""Helper for the source renderer."""
__slots__ = ('lineno', 'code', 'in_frame', 'current')
def __init__(self, lineno, code):
self.lineno = lineno
self.code = code
self.in_frame = False
self.current = False
def classes(self):
rv = ['line']
if self.in_frame:
rv.append('in-frame')
if self.current:
rv.append('current')
return rv
classes = property(classes)
def render(self):
return SOURCE_LINE_HTML % {
'classes': u' '.join(self.classes),
'lineno': self.lineno,
'code': escape(self.code)
}
class Traceback(object):
"""Wraps a traceback."""
def __init__(self, exc_type, exc_value, tb):
self.exc_type = exc_type
self.exc_value = exc_value
if not isinstance(exc_type, str):
exception_type = exc_type.__name__
if exc_type.__module__ not in ('__builtin__', 'exceptions'):
exception_type = exc_type.__module__ + '.' + exception_type
else:
exception_type = exc_type
self.exception_type = exception_type
# we only add frames to the list that are not hidden. This follows
# the the magic variables as defined by paste.exceptions.collector
self.frames = []
while tb:
self.frames.append(Frame(exc_type, exc_value, tb))
tb = tb.tb_next
def filter_hidden_frames(self):
"""Remove the frames according to the paste spec."""
if not self.frames:
return
new_frames = []
hidden = False
for frame in self.frames:
hide = frame.hide
if hide in ('before', 'before_and_this'):
new_frames = []
hidden = False
if hide == 'before_and_this':
continue
elif hide in ('reset', 'reset_and_this'):
hidden = False
if hide == 'reset_and_this':
continue
elif hide in ('after', 'after_and_this'):
hidden = True
if hide == 'after_and_this':
continue
elif hide or hidden:
continue
new_frames.append(frame)
# if we only have one frame and that frame is from the codeop
# module, remove it.
if len(new_frames) == 1 and self.frames[0].module == 'codeop':
del self.frames[:]
# if the last frame is missing something went terrible wrong :(
elif self.frames[-1] in new_frames:
self.frames[:] = new_frames
def is_syntax_error(self):
"""Is it a syntax error?"""
return isinstance(self.exc_value, SyntaxError)
is_syntax_error = property(is_syntax_error)
def exception(self):
"""String representation of the exception."""
buf = traceback.format_exception_only(self.exc_type, self.exc_value)
rv = ''.join(buf).strip()
return rv.decode('utf-8', 'replace') if PY2 else rv
exception = property(exception)
def log(self, logfile=None):
"""Log the ASCII traceback into a file object."""
if logfile is None:
logfile = sys.stderr
tb = self.plaintext.rstrip() + u'\n'
if PY2:
tb = tb.encode('utf-8', 'replace')
logfile.write(tb)
def paste(self):
"""Create a paste and return the paste id."""
data = json.dumps({
'description': 'Werkzeug Internal Server Error',
'public': False,
'files': {
'traceback.txt': {
'content': self.plaintext
}
}
}).encode('utf-8')
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
rv = urlopen('https://api.github.com/gists', data=data)
resp = json.loads(rv.read().decode('utf-8'))
rv.close()
return {
'url': resp['html_url'],
'id': resp['id']
}
def render_summary(self, include_title=True):
"""Render the traceback for the interactive console."""
title = ''
frames = []
classes = ['traceback']
if not self.frames:
classes.append('noframe-traceback')
if include_title:
if self.is_syntax_error:
title = u'Syntax Error'
else:
title = u'Traceback <em>(most recent call last)</em>:'
for frame in self.frames:
frames.append(u'<li%s>%s' % (
frame.info and u' title="%s"' % escape(frame.info) or u'',
frame.render()
))
if self.is_syntax_error:
description_wrapper = u'<pre class=syntaxerror>%s</pre>'
else:
description_wrapper = u'<blockquote>%s</blockquote>'
return SUMMARY_HTML % {
'classes': u' '.join(classes),
'title': title and u'<h3>%s</h3>' % title or u'',
'frames': u'\n'.join(frames),
'description': description_wrapper % escape(self.exception)
}
def render_full(self, evalex=False, secret=None,
evalex_trusted=True):
"""Render the Full HTML page with the traceback info."""
exc = escape(self.exception)
return PAGE_HTML % {
'evalex': evalex and 'true' or 'false',
'evalex_trusted': evalex_trusted and 'true' or 'false',
'console': 'false',
'title': exc,
'exception': exc,
'exception_type': escape(self.exception_type),
'summary': self.render_summary(include_title=False),
'plaintext': escape(self.plaintext),
'plaintext_cs': re.sub('-{2,}', '-', self.plaintext),
'traceback_id': self.id,
'secret': secret
}
def generate_plaintext_traceback(self):
"""Like the plaintext attribute but returns a generator"""
yield u'Traceback (most recent call last):'
for frame in self.frames:
yield u' File "%s", line %s, in %s' % (
frame.filename,
frame.lineno,
frame.function_name
)
yield u' ' + frame.current_line.strip()
yield self.exception
def plaintext(self):
return u'\n'.join(self.generate_plaintext_traceback())
plaintext = cached_property(plaintext)
id = property(lambda x: id(x))
class Frame(object):
"""A single frame in a traceback."""
def __init__(self, exc_type, exc_value, tb):
self.lineno = tb.tb_lineno
self.function_name = tb.tb_frame.f_code.co_name
self.locals = tb.tb_frame.f_locals
self.globals = tb.tb_frame.f_globals
fn = inspect.getsourcefile(tb) or inspect.getfile(tb)
if fn[-4:] in ('.pyo', '.pyc'):
fn = fn[:-1]
# if it's a file on the file system resolve the real filename.
if os.path.isfile(fn):
fn = os.path.realpath(fn)
self.filename = to_unicode(fn, get_filesystem_encoding())
self.module = self.globals.get('__name__')
self.loader = self.globals.get('__loader__')
self.code = tb.tb_frame.f_code
# support for paste's traceback extensions
self.hide = self.locals.get('__traceback_hide__', False)
info = self.locals.get('__traceback_info__')
if info is not None:
try:
info = text_type(info)
except UnicodeError:
info = str(info).decode('utf-8', 'replace')
self.info = info
def render(self):
"""Render a single frame in a traceback."""
return FRAME_HTML % {
'id': self.id,
'filename': escape(self.filename),
'lineno': self.lineno,
'function_name': escape(self.function_name),
'lines': self.render_line_context(),
}
def render_line_context(self):
before, current, after = self.get_context_lines()
rv = []
def render_line(line, cls):
line = line.expandtabs().rstrip()
stripped_line = line.strip()
prefix = len(line) - len(stripped_line)
rv.append(
'<pre class="line %s"><span class="ws">%s</span>%s</pre>' % (
cls, ' ' * prefix, escape(stripped_line) or ' '))
for line in before:
render_line(line, 'before')
render_line(current, 'current')
for line in after:
render_line(line, 'after')
return '\n'.join(rv)
def get_annotated_lines(self):
"""Helper function that returns lines with extra information."""
lines = [Line(idx + 1, x) for idx, x in enumerate(self.sourcelines)]
# find function definition and mark lines
if hasattr(self.code, 'co_firstlineno'):
lineno = self.code.co_firstlineno - 1
while lineno > 0:
if _funcdef_re.match(lines[lineno].code):
break
lineno -= 1
try:
offset = len(inspect.getblock([x.code + '\n' for x
in lines[lineno:]]))
except TokenError:
offset = 0
for line in lines[lineno:lineno + offset]:
line.in_frame = True
# mark current line
try:
lines[self.lineno - 1].current = True
except IndexError:
pass
return lines
def eval(self, code, mode='single'):
"""Evaluate code in the context of the frame."""
if isinstance(code, string_types):
if PY2 and isinstance(code, unicode): # noqa
code = UTF8_COOKIE + code.encode('utf-8')
code = compile(code, '<interactive>', mode)
return eval(code, self.globals, self.locals)
@cached_property
def sourcelines(self):
"""The sourcecode of the file as list of unicode strings."""
# get sourcecode from loader or file
source = None
if self.loader is not None:
try:
if hasattr(self.loader, 'get_source'):
source = self.loader.get_source(self.module)
elif hasattr(self.loader, 'get_source_by_code'):
source = self.loader.get_source_by_code(self.code)
except Exception:
# we munch the exception so that we don't cause troubles
# if the loader is broken.
pass
if source is None:
try:
f = open(to_native(self.filename, get_filesystem_encoding()),
mode='rb')
except IOError:
return []
try:
source = f.read()
finally:
f.close()
# already unicode? return right away
if isinstance(source, text_type):
return source.splitlines()
# yes. it should be ascii, but we don't want to reject too many
# characters in the debugger if something breaks
charset = 'utf-8'
if source.startswith(UTF8_COOKIE):
source = source[3:]
else:
for idx, match in enumerate(_line_re.finditer(source)):
match = _coding_re.search(match.group())
if match is not None:
charset = match.group(1)
break
if idx > 1:
break
# on broken cookies we fall back to utf-8 too
charset = to_native(charset)
try:
codecs.lookup(charset)
except LookupError:
charset = 'utf-8'
return source.decode(charset, 'replace').splitlines()
def get_context_lines(self, context=5):
before = self.sourcelines[self.lineno - context - 1:self.lineno - 1]
past = self.sourcelines[self.lineno:self.lineno + context]
return (
before,
self.current_line,
past,
)
@property
def current_line(self):
try:
return self.sourcelines[self.lineno - 1]
except IndexError:
return u''
@cached_property
def console(self):
return Console(self.globals, self.locals)
id = property(lambda x: id(x))
| mit |
stefan-jonasson/home-assistant | homeassistant/components/climate/flexit.py | 7 | 4748 | """
Platform for Flexit AC units with CI66 Modbus adapter.
Example configuration:
climate:
- platform: flexit
name: Main AC
slave: 21
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/climate.flexit/
"""
import logging
import voluptuous as vol
from homeassistant.const import (
CONF_NAME, CONF_SLAVE, TEMP_CELSIUS,
ATTR_TEMPERATURE, DEVICE_DEFAULT_NAME)
from homeassistant.components.climate import (ClimateDevice, PLATFORM_SCHEMA)
import homeassistant.components.modbus as modbus
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pyflexit==0.3']
DEPENDENCIES = ['modbus']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_SLAVE): vol.All(int, vol.Range(min=0, max=32)),
vol.Optional(CONF_NAME, default=DEVICE_DEFAULT_NAME): cv.string
})
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Flexit Platform."""
modbus_slave = config.get(CONF_SLAVE, None)
name = config.get(CONF_NAME, None)
add_devices([Flexit(modbus_slave, name)], True)
class Flexit(ClimateDevice):
"""Representation of a Flexit AC unit."""
def __init__(self, modbus_slave, name):
"""Initialize the unit."""
from pyflexit import pyflexit
self._name = name
self._slave = modbus_slave
self._target_temperature = None
self._current_temperature = None
self._current_fan_mode = None
self._current_operation = None
self._fan_list = ['Off', 'Low', 'Medium', 'High']
self._current_operation = None
self._filter_hours = None
self._filter_alarm = None
self._heat_recovery = None
self._heater_enabled = False
self._heating = None
self._cooling = None
self._alarm = False
self.unit = pyflexit.pyflexit(modbus.HUB, modbus_slave)
def update(self):
"""Update unit attributes."""
if not self.unit.update():
_LOGGER.warning("Modbus read failed")
self._target_temperature = self.unit.get_target_temp
self._current_temperature = self.unit.get_temp
self._current_fan_mode =\
self._fan_list[self.unit.get_fan_speed]
self._filter_hours = self.unit.get_filter_hours
# Mechanical heat recovery, 0-100%
self._heat_recovery = self.unit.get_heat_recovery
# Heater active 0-100%
self._heating = self.unit.get_heating
# Cooling active 0-100%
self._cooling = self.unit.get_cooling
# Filter alarm 0/1
self._filter_alarm = self.unit.get_filter_alarm
# Heater enabled or not. Does not mean it's necessarily heating
self._heater_enabled = self.unit.get_heater_enabled
# Current operation mode
self._current_operation = self.unit.get_operation
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return {
'filter_hours': self._filter_hours,
'filter_alarm': self._filter_alarm,
'heat_recovery': self._heat_recovery,
'heating': self._heating,
'heater_enabled': self._heater_enabled,
'cooling': self._cooling
}
@property
def should_poll(self):
"""Return the polling state."""
return True
@property
def name(self):
"""Return the name of the climate device."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
return self._current_operation
@property
def current_fan_mode(self):
"""Return the fan setting."""
return self._current_fan_mode
@property
def fan_list(self):
"""Return the list of available fan modes."""
return self._fan_list
def set_temperature(self, **kwargs):
"""Set new target temperature."""
if kwargs.get(ATTR_TEMPERATURE) is not None:
self._target_temperature = kwargs.get(ATTR_TEMPERATURE)
self.unit.set_temp(self._target_temperature)
def set_fan_mode(self, fan):
"""Set new fan mode."""
self.unit.set_fan_speed(self._fan_list.index(fan))
| mit |
ales-erjavec/scipy | scipy/weave/converters.py | 92 | 2703 | from __future__ import absolute_import, print_function
from . import common_info
from . import c_spec
#----------------------------------------------------------------------------
# The "standard" conversion classes
#----------------------------------------------------------------------------
default = [c_spec.int_converter(),
c_spec.float_converter(),
c_spec.complex_converter(),
c_spec.unicode_converter(),
c_spec.string_converter(),
c_spec.list_converter(),
c_spec.dict_converter(),
c_spec.tuple_converter(),
c_spec.file_converter(),
c_spec.instance_converter(),]
#----------------------------------------------------------------------------
# add numpy array converters to the default
# converter list.
#----------------------------------------------------------------------------
try:
from . import standard_array_spec
default.append(standard_array_spec.array_converter())
except ImportError:
pass
#----------------------------------------------------------------------------
# add numpy scalar converters to the default
# converter list.
#----------------------------------------------------------------------------
try:
from . import numpy_scalar_spec
default.append(numpy_scalar_spec.numpy_complex_scalar_converter())
except ImportError:
pass
#----------------------------------------------------------------------------
# Add VTK support
#----------------------------------------------------------------------------
try:
from . import vtk_spec
default.insert(0,vtk_spec.vtk_converter())
except IndexError:
pass
#----------------------------------------------------------------------------
# Add "sentinal" catchall converter
#
# if everything else fails, this one is the last hope (it always works)
#----------------------------------------------------------------------------
default.append(c_spec.catchall_converter())
standard_info = [common_info.basic_module_info()]
standard_info += [x.generate_build_info() for x in default]
#----------------------------------------------------------------------------
# Blitz conversion classes
#
# same as default, but will convert numpy arrays to blitz C++ classes
#----------------------------------------------------------------------------
try:
from . import blitz_spec
blitz = [blitz_spec.array_converter()] + default
#-----------------------------------
# Add "sentinal" catchall converter
#
# if everything else fails, this one
# is the last hope (it always works)
#-----------------------------------
blitz.append(c_spec.catchall_converter())
except:
pass
| bsd-3-clause |
40423226/2017springcd_bg1 | blog/theme/glow/primitive.py | 161 | 4838 | from javascript import JSConstructor, JSObject
from .vector import vec
class primitive:
def __init__(self, prim, **kwargs):
for _key in kwargs.keys():
if isinstance(kwargs[_key], vec):
kwargs[_key]=kwargs[_key]._vec
self._prim=prim(kwargs)
def rotate(self, **kwargs):
if 'axis' in kwargs:
#for now lets assume axis is a vector
kwargs['axis']=kwargs['axis']._vec
self._prim.rotate(kwargs)
@property
def pos(self):
_v=vec()
_v._set_vec(self._prim.pos)
return _v
@pos.setter
def pos(self, value):
if isinstance(value, vec):
self._prim.pos=value._vec
else:
print("Error! pos must be a vector")
@property
def color(self):
_v=vec()
_v._set_vec(self._prim.color)
return _v
@color.setter
def color(self, value):
if isinstance(value, vec):
self._prim.color=value._vec
else:
print("Error! color must be a vec")
@property
def axis(self):
_v=vec()
_v._set_vec(self._prim.axis)
return _v
@axis.setter
def axis(self, value):
if isinstance(value, vec):
self._prim.axis=value._vec
else:
print("Error! axis must be a vec")
@property
def size(self):
return self._prim.size
@size.setter
def size(self, value):
self._prim.size=value
@property
def up(self):
_v=vec()
_v._set_vec(self._prim.up)
return _v
@up.setter
def up(self, value):
if isinstance(value, vec):
self._prim.up=value._vec
else:
print("Error! up must be a vec")
@property
def opacity(self):
return self._prim.opacity
@opacity.setter
def opacity(self, value):
self._prim.opacity=value
@property
def shininess(self):
return self._prim.shininess
@shininess.setter
def shininess(self, value):
self._prim.shininess=value
@property
def emissive(self):
return self._prim.emissive
@emissive.setter
def emissive(self, value):
self._prim.emissive=value
@property
def texture(self):
return self._prim.texture
@texture.setter
def texture(self, **kwargs):
self._prim.texture=kwargs
@property
def visible(self):
return self._prim.visible
@visible.setter
def visible(self, flag):
assert isinstance(flag, bool)
self._prim.visble=flag
class arrow(primitive):
def __init__(self, **kwargs):
primitive.__init__(self, JSConstructor(glowscript.arrow), **kwargs)
class box(primitive):
def __init__(self, **kwargs):
primitive.__init__(self, JSConstructor(glowscript.box), **kwargs)
class cone(primitive):
def __init__(self, **kwargs):
primitive.__init__(self, JSConstructor(glowscript.cone), **kwargs)
class curve(primitive):
def __init__(self, **kwargs):
primitive.__init__(self, JSConstructor(glowscript.curve), **kwargs)
def push(self, v):
if isinstance(v, vec):
self._prim.push(v._vec)
elif isinstance(v, dict):
for _key in v.keys():
if isinstance(_key, vec):
v[_key]=v[_key]._vec
self._prim.push(v)
def append(self, v):
self.push(v)
class cylinder(primitive):
def __init__(self, **kwargs):
primitive.__init__(self, JSConstructor(glowscript.cylinder), **kwargs)
class helix(cylinder):
def __init__(self, **kwargs):
primitive.__init__(self, JSConstructor(glowscript.helix), **kwargs)
class pyramid(primitive):
def __init__(self, **kwargs):
primitive.__init__(self, JSConstructor(glowscript.pyramid), **kwargs)
#class ring(curve):
class sphere(primitive):
def __init__(self, **kwargs):
primitive.__init__(self, JSConstructor(glowscript.sphere), **kwargs)
#triangle
#class triangle:
# def __init__(self, **kwargs):
# self._tri = JSConstructor(glowscript.triangle)(kwargs)
#vertex
#class vertex:
# def __init__(self, **kwargs):
# self._ver = JSConstructor(glowscript.vertex)(kwargs)
#quad
#compound
#class compound(box):
# def __init__(self, **kwargs):
# box.__init__(self, kwargs)
# I'm not sure if the declarations below are correct. Will fix later.
class distinct_light:
def __init__(self, **kwargs):
self._dl=JSConstructor(glowscript.distant_light)(kwargs)
class local_light:
def __init__(self, **kwargs):
self._ll=JSConstructor(glowscript.local_light)(kwargs)
class draw:
def __init__(self, **kwargs):
self._draw=JSConstructor(glowscript.draw)(kwargs)
class label:
def __init__(self, **kwargs):
self._label=JSConstructor(glowscript.label)(kwargs)
def attach_trail(object, **kwargs):
if isinstance(object, primitive):
JSObject(glowscript.attach_trail)(object._prim, kwargs)
else:
JSObject(glowscript.attach_trail)(object, kwargs)
| agpl-3.0 |
robbiet480/home-assistant | homeassistant/components/bme680/sensor.py | 6 | 13404 | """Support for BME680 Sensor over SMBus."""
import logging
import threading
from time import monotonic, sleep
import bme680 # pylint: disable=import-error
from smbus import SMBus # pylint: disable=import-error
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_MONITORED_CONDITIONS,
CONF_NAME,
TEMP_FAHRENHEIT,
UNIT_PERCENTAGE,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util.temperature import celsius_to_fahrenheit
_LOGGER = logging.getLogger(__name__)
CONF_I2C_ADDRESS = "i2c_address"
CONF_I2C_BUS = "i2c_bus"
CONF_OVERSAMPLING_TEMP = "oversampling_temperature"
CONF_OVERSAMPLING_PRES = "oversampling_pressure"
CONF_OVERSAMPLING_HUM = "oversampling_humidity"
CONF_FILTER_SIZE = "filter_size"
CONF_GAS_HEATER_TEMP = "gas_heater_temperature"
CONF_GAS_HEATER_DURATION = "gas_heater_duration"
CONF_AQ_BURN_IN_TIME = "aq_burn_in_time"
CONF_AQ_HUM_BASELINE = "aq_humidity_baseline"
CONF_AQ_HUM_WEIGHTING = "aq_humidity_bias"
CONF_TEMP_OFFSET = "temp_offset"
DEFAULT_NAME = "BME680 Sensor"
DEFAULT_I2C_ADDRESS = 0x77
DEFAULT_I2C_BUS = 1
DEFAULT_OVERSAMPLING_TEMP = 8 # Temperature oversampling x 8
DEFAULT_OVERSAMPLING_PRES = 4 # Pressure oversampling x 4
DEFAULT_OVERSAMPLING_HUM = 2 # Humidity oversampling x 2
DEFAULT_FILTER_SIZE = 3 # IIR Filter Size
DEFAULT_GAS_HEATER_TEMP = 320 # Temperature in celsius 200 - 400
DEFAULT_GAS_HEATER_DURATION = 150 # Heater duration in ms 1 - 4032
DEFAULT_AQ_BURN_IN_TIME = 300 # 300 second burn in time for AQ gas measurement
DEFAULT_AQ_HUM_BASELINE = 40 # 40%, an optimal indoor humidity.
DEFAULT_AQ_HUM_WEIGHTING = 25 # 25% Weighting of humidity to gas in AQ score
DEFAULT_TEMP_OFFSET = 0 # No calibration out of the box.
SENSOR_TEMP = "temperature"
SENSOR_HUMID = "humidity"
SENSOR_PRESS = "pressure"
SENSOR_GAS = "gas"
SENSOR_AQ = "airquality"
SENSOR_TYPES = {
SENSOR_TEMP: ["Temperature", None],
SENSOR_HUMID: ["Humidity", UNIT_PERCENTAGE],
SENSOR_PRESS: ["Pressure", "mb"],
SENSOR_GAS: ["Gas Resistance", "Ohms"],
SENSOR_AQ: ["Air Quality", UNIT_PERCENTAGE],
}
DEFAULT_MONITORED = [SENSOR_TEMP, SENSOR_HUMID, SENSOR_PRESS, SENSOR_AQ]
OVERSAMPLING_VALUES = {0, 1, 2, 4, 8, 16}
FILTER_VALUES = {0, 1, 3, 7, 15, 31, 63, 127}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_I2C_ADDRESS, default=DEFAULT_I2C_ADDRESS): cv.positive_int,
vol.Optional(CONF_MONITORED_CONDITIONS, default=DEFAULT_MONITORED): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
vol.Optional(CONF_I2C_BUS, default=DEFAULT_I2C_BUS): cv.positive_int,
vol.Optional(
CONF_OVERSAMPLING_TEMP, default=DEFAULT_OVERSAMPLING_TEMP
): vol.All(vol.Coerce(int), vol.In(OVERSAMPLING_VALUES)),
vol.Optional(
CONF_OVERSAMPLING_PRES, default=DEFAULT_OVERSAMPLING_PRES
): vol.All(vol.Coerce(int), vol.In(OVERSAMPLING_VALUES)),
vol.Optional(CONF_OVERSAMPLING_HUM, default=DEFAULT_OVERSAMPLING_HUM): vol.All(
vol.Coerce(int), vol.In(OVERSAMPLING_VALUES)
),
vol.Optional(CONF_FILTER_SIZE, default=DEFAULT_FILTER_SIZE): vol.All(
vol.Coerce(int), vol.In(FILTER_VALUES)
),
vol.Optional(CONF_GAS_HEATER_TEMP, default=DEFAULT_GAS_HEATER_TEMP): vol.All(
vol.Coerce(int), vol.Range(200, 400)
),
vol.Optional(
CONF_GAS_HEATER_DURATION, default=DEFAULT_GAS_HEATER_DURATION
): vol.All(vol.Coerce(int), vol.Range(1, 4032)),
vol.Optional(
CONF_AQ_BURN_IN_TIME, default=DEFAULT_AQ_BURN_IN_TIME
): cv.positive_int,
vol.Optional(CONF_AQ_HUM_BASELINE, default=DEFAULT_AQ_HUM_BASELINE): vol.All(
vol.Coerce(int), vol.Range(1, 100)
),
vol.Optional(CONF_AQ_HUM_WEIGHTING, default=DEFAULT_AQ_HUM_WEIGHTING): vol.All(
vol.Coerce(int), vol.Range(1, 100)
),
vol.Optional(CONF_TEMP_OFFSET, default=DEFAULT_TEMP_OFFSET): vol.All(
vol.Coerce(float), vol.Range(-100.0, 100.0)
),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the BME680 sensor."""
SENSOR_TYPES[SENSOR_TEMP][1] = hass.config.units.temperature_unit
name = config[CONF_NAME]
sensor_handler = await hass.async_add_job(_setup_bme680, config)
if sensor_handler is None:
return
dev = []
for variable in config[CONF_MONITORED_CONDITIONS]:
dev.append(
BME680Sensor(sensor_handler, variable, SENSOR_TYPES[variable][1], name)
)
async_add_entities(dev)
return
def _setup_bme680(config):
"""Set up and configure the BME680 sensor."""
sensor_handler = None
sensor = None
try:
# pylint: disable=no-member
i2c_address = config[CONF_I2C_ADDRESS]
bus = SMBus(config[CONF_I2C_BUS])
sensor = bme680.BME680(i2c_address, bus)
# Configure Oversampling
os_lookup = {
0: bme680.OS_NONE,
1: bme680.OS_1X,
2: bme680.OS_2X,
4: bme680.OS_4X,
8: bme680.OS_8X,
16: bme680.OS_16X,
}
sensor.set_temperature_oversample(os_lookup[config[CONF_OVERSAMPLING_TEMP]])
sensor.set_temp_offset(config[CONF_TEMP_OFFSET])
sensor.set_humidity_oversample(os_lookup[config[CONF_OVERSAMPLING_HUM]])
sensor.set_pressure_oversample(os_lookup[config[CONF_OVERSAMPLING_PRES]])
# Configure IIR Filter
filter_lookup = {
0: bme680.FILTER_SIZE_0,
1: bme680.FILTER_SIZE_1,
3: bme680.FILTER_SIZE_3,
7: bme680.FILTER_SIZE_7,
15: bme680.FILTER_SIZE_15,
31: bme680.FILTER_SIZE_31,
63: bme680.FILTER_SIZE_63,
127: bme680.FILTER_SIZE_127,
}
sensor.set_filter(filter_lookup[config[CONF_FILTER_SIZE]])
# Configure the Gas Heater
if (
SENSOR_GAS in config[CONF_MONITORED_CONDITIONS]
or SENSOR_AQ in config[CONF_MONITORED_CONDITIONS]
):
sensor.set_gas_status(bme680.ENABLE_GAS_MEAS)
sensor.set_gas_heater_duration(config[CONF_GAS_HEATER_DURATION])
sensor.set_gas_heater_temperature(config[CONF_GAS_HEATER_TEMP])
sensor.select_gas_heater_profile(0)
else:
sensor.set_gas_status(bme680.DISABLE_GAS_MEAS)
except (RuntimeError, OSError):
_LOGGER.error("BME680 sensor not detected at 0x%02x", i2c_address)
return None
sensor_handler = BME680Handler(
sensor,
(
SENSOR_GAS in config[CONF_MONITORED_CONDITIONS]
or SENSOR_AQ in config[CONF_MONITORED_CONDITIONS]
),
config[CONF_AQ_BURN_IN_TIME],
config[CONF_AQ_HUM_BASELINE],
config[CONF_AQ_HUM_WEIGHTING],
)
sleep(0.5) # Wait for device to stabilize
if not sensor_handler.sensor_data.temperature:
_LOGGER.error("BME680 sensor failed to Initialize")
return None
return sensor_handler
class BME680Handler:
"""BME680 sensor working in i2C bus."""
class SensorData:
"""Sensor data representation."""
def __init__(self):
"""Initialize the sensor data object."""
self.temperature = None
self.humidity = None
self.pressure = None
self.gas_resistance = None
self.air_quality = None
def __init__(
self,
sensor,
gas_measurement=False,
burn_in_time=300,
hum_baseline=40,
hum_weighting=25,
):
"""Initialize the sensor handler."""
self.sensor_data = BME680Handler.SensorData()
self._sensor = sensor
self._gas_sensor_running = False
self._hum_baseline = hum_baseline
self._hum_weighting = hum_weighting
self._gas_baseline = None
if gas_measurement:
threading.Thread(
target=self._run_gas_sensor,
kwargs={"burn_in_time": burn_in_time},
name="BME680Handler_run_gas_sensor",
).start()
self.update(first_read=True)
def _run_gas_sensor(self, burn_in_time):
"""Calibrate the Air Quality Gas Baseline."""
if self._gas_sensor_running:
return
self._gas_sensor_running = True
# Pause to allow initial data read for device validation.
sleep(1)
start_time = monotonic()
curr_time = monotonic()
burn_in_data = []
_LOGGER.info(
"Beginning %d second gas sensor burn in for Air Quality", burn_in_time
)
while curr_time - start_time < burn_in_time:
curr_time = monotonic()
if self._sensor.get_sensor_data() and self._sensor.data.heat_stable:
gas_resistance = self._sensor.data.gas_resistance
burn_in_data.append(gas_resistance)
self.sensor_data.gas_resistance = gas_resistance
_LOGGER.debug(
"AQ Gas Resistance Baseline reading %2f Ohms", gas_resistance
)
sleep(1)
_LOGGER.debug(
"AQ Gas Resistance Burn In Data (Size: %d): \n\t%s",
len(burn_in_data),
burn_in_data,
)
self._gas_baseline = sum(burn_in_data[-50:]) / 50.0
_LOGGER.info("Completed gas sensor burn in for Air Quality")
_LOGGER.info("AQ Gas Resistance Baseline: %f", self._gas_baseline)
while True:
if self._sensor.get_sensor_data() and self._sensor.data.heat_stable:
self.sensor_data.gas_resistance = self._sensor.data.gas_resistance
self.sensor_data.air_quality = self._calculate_aq_score()
sleep(1)
def update(self, first_read=False):
"""Read sensor data."""
if first_read:
# Attempt first read, it almost always fails first attempt
self._sensor.get_sensor_data()
if self._sensor.get_sensor_data():
self.sensor_data.temperature = self._sensor.data.temperature
self.sensor_data.humidity = self._sensor.data.humidity
self.sensor_data.pressure = self._sensor.data.pressure
def _calculate_aq_score(self):
"""Calculate the Air Quality Score."""
hum_baseline = self._hum_baseline
hum_weighting = self._hum_weighting
gas_baseline = self._gas_baseline
gas_resistance = self.sensor_data.gas_resistance
gas_offset = gas_baseline - gas_resistance
hum = self.sensor_data.humidity
hum_offset = hum - hum_baseline
# Calculate hum_score as the distance from the hum_baseline.
if hum_offset > 0:
hum_score = (
(100 - hum_baseline - hum_offset) / (100 - hum_baseline) * hum_weighting
)
else:
hum_score = (hum_baseline + hum_offset) / hum_baseline * hum_weighting
# Calculate gas_score as the distance from the gas_baseline.
if gas_offset > 0:
gas_score = (gas_resistance / gas_baseline) * (100 - hum_weighting)
else:
gas_score = 100 - hum_weighting
# Calculate air quality score.
return hum_score + gas_score
class BME680Sensor(Entity):
"""Implementation of the BME680 sensor."""
def __init__(self, bme680_client, sensor_type, temp_unit, name):
"""Initialize the sensor."""
self.client_name = name
self._name = SENSOR_TYPES[sensor_type][0]
self.bme680_client = bme680_client
self.temp_unit = temp_unit
self.type = sensor_type
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
@property
def name(self):
"""Return the name of the sensor."""
return f"{self.client_name} {self._name}"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of the sensor."""
return self._unit_of_measurement
async def async_update(self):
"""Get the latest data from the BME680 and update the states."""
await self.hass.async_add_job(self.bme680_client.update)
if self.type == SENSOR_TEMP:
temperature = round(self.bme680_client.sensor_data.temperature, 1)
if self.temp_unit == TEMP_FAHRENHEIT:
temperature = round(celsius_to_fahrenheit(temperature), 1)
self._state = temperature
elif self.type == SENSOR_HUMID:
self._state = round(self.bme680_client.sensor_data.humidity, 1)
elif self.type == SENSOR_PRESS:
self._state = round(self.bme680_client.sensor_data.pressure, 1)
elif self.type == SENSOR_GAS:
self._state = int(round(self.bme680_client.sensor_data.gas_resistance, 0))
elif self.type == SENSOR_AQ:
aq_score = self.bme680_client.sensor_data.air_quality
if aq_score is not None:
self._state = round(aq_score, 1)
| apache-2.0 |
huang4fstudio/django | tests/shortcuts/tests.py | 199 | 5776 | from django.test import SimpleTestCase, ignore_warnings, override_settings
from django.test.utils import require_jinja2
from django.utils.deprecation import RemovedInDjango110Warning
@override_settings(
ROOT_URLCONF='shortcuts.urls',
)
class ShortcutTests(SimpleTestCase):
def test_render_to_response(self):
response = self.client.get('/render_to_response/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'FOO.BAR..\n')
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
def test_render_to_response_with_multiple_templates(self):
response = self.client.get('/render_to_response/multiple_templates/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'FOO.BAR..\n')
@ignore_warnings(category=RemovedInDjango110Warning)
def test_render_to_response_with_request_context(self):
response = self.client.get('/render_to_response/request_context/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'FOO.BAR../render_to_response/request_context/\n')
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
def test_render_to_response_with_content_type(self):
response = self.client.get('/render_to_response/content_type/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'FOO.BAR..\n')
self.assertEqual(response['Content-Type'], 'application/x-rendertest')
@ignore_warnings(category=RemovedInDjango110Warning)
def test_render_to_response_with_dirs(self):
response = self.client.get('/render_to_response/dirs/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'spam eggs\n')
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
def test_render_to_response_with_status(self):
response = self.client.get('/render_to_response/status/')
self.assertEqual(response.status_code, 403)
self.assertEqual(response.content, b'FOO.BAR..\n')
@require_jinja2
def test_render_to_response_with_using(self):
response = self.client.get('/render_to_response/using/')
self.assertEqual(response.content, b'DTL\n')
response = self.client.get('/render_to_response/using/?using=django')
self.assertEqual(response.content, b'DTL\n')
response = self.client.get('/render_to_response/using/?using=jinja2')
self.assertEqual(response.content, b'Jinja2\n')
@ignore_warnings(category=RemovedInDjango110Warning)
def test_render_to_response_with_context_instance_misuse(self):
"""
For backwards-compatibility, ensure that it's possible to pass a
RequestContext instance in the dictionary argument instead of the
context_instance argument.
"""
response = self.client.get('/render_to_response/context_instance_misuse/')
self.assertContains(response, 'context processor output')
def test_render(self):
response = self.client.get('/render/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'FOO.BAR../render/\n')
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
self.assertFalse(hasattr(response.context.request, 'current_app'))
def test_render_with_multiple_templates(self):
response = self.client.get('/render/multiple_templates/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'FOO.BAR../render/multiple_templates/\n')
@ignore_warnings(category=RemovedInDjango110Warning)
def test_render_with_base_context(self):
response = self.client.get('/render/base_context/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'FOO.BAR..\n')
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
def test_render_with_content_type(self):
response = self.client.get('/render/content_type/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'FOO.BAR../render/content_type/\n')
self.assertEqual(response['Content-Type'], 'application/x-rendertest')
def test_render_with_status(self):
response = self.client.get('/render/status/')
self.assertEqual(response.status_code, 403)
self.assertEqual(response.content, b'FOO.BAR../render/status/\n')
@require_jinja2
def test_render_with_using(self):
response = self.client.get('/render/using/')
self.assertEqual(response.content, b'DTL\n')
response = self.client.get('/render/using/?using=django')
self.assertEqual(response.content, b'DTL\n')
response = self.client.get('/render/using/?using=jinja2')
self.assertEqual(response.content, b'Jinja2\n')
@ignore_warnings(category=RemovedInDjango110Warning)
def test_render_with_current_app(self):
response = self.client.get('/render/current_app/')
self.assertEqual(response.context.request.current_app, "foobar_app")
@ignore_warnings(category=RemovedInDjango110Warning)
def test_render_with_dirs(self):
response = self.client.get('/render/dirs/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'spam eggs\n')
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
@ignore_warnings(category=RemovedInDjango110Warning)
def test_render_with_current_app_conflict(self):
with self.assertRaises(ValueError):
self.client.get('/render/current_app_conflict/')
| bsd-3-clause |
shabab12/edx-platform | common/djangoapps/lang_pref/middleware.py | 26 | 2001 | """
Middleware for Language Preferences
"""
from openedx.core.djangoapps.user_api.preferences.api import get_user_preference, delete_user_preference
from lang_pref import LANGUAGE_KEY
from django.utils.translation import LANGUAGE_SESSION_KEY
from django.utils.translation.trans_real import parse_accept_lang_header
from lang_pref.api import released_languages
class LanguagePreferenceMiddleware(object):
"""
Middleware for user preferences.
Ensures that, once set, a user's preferences are reflected in the page
whenever they are logged in.
"""
def process_request(self, request):
"""
If a user's UserPreference contains a language preference, use the user's preference.
"""
languages = released_languages()
system_released_languages = [seq[0] for seq in languages]
# If the user is logged in, check for their language preference
if request.user.is_authenticated():
# Get the user's language preference
user_pref = get_user_preference(request.user, LANGUAGE_KEY)
# Set it to the LANGUAGE_SESSION_KEY (Django-specific session setting governing language pref)
if user_pref:
if user_pref in system_released_languages:
request.session[LANGUAGE_SESSION_KEY] = user_pref
else:
delete_user_preference(request.user, LANGUAGE_KEY)
else:
preferred_language = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
lang_headers = [seq[0] for seq in parse_accept_lang_header(preferred_language)]
# Setting the session language to the browser language, if it is supported.
for browser_lang in lang_headers:
if browser_lang in system_released_languages:
if request.session.get(LANGUAGE_SESSION_KEY, None) is None:
request.session[LANGUAGE_SESSION_KEY] = unicode(browser_lang)
break
| agpl-3.0 |
bitmazk/django-user-tags | user_tags/tests/forms_tests.py | 1 | 7599 | """Tests for the forms of the ``user_tags`` app."""
from django.test import TestCase
from django.contrib.contenttypes.models import ContentType
from django_libs.tests.factories import UserFactory
from user_tags.tests.test_app.forms import DummyModelForm
from user_tags.models import TaggedItem, UserTag, UserTagGroup
class UserTagsFormMixinTestCase(TestCase):
"""Tests for the ``UserTagsFormMixin`` mixin class."""
longMessage = True
def setUp(self):
"""Creates a user and valid set of form data."""
self.user = UserFactory()
self.data = {
'name': 'dummy',
'tags': 'great day,family, cinema ',
'global_tags': 'foo, bar',
}
def test_adds_fields_to_the_form(self):
"""
A form that inherits from ``UserTagsFormMixin`` should have the
fields that are defined on the model's ``TAG_FIELDS`` options dict
as form fields.
"""
form = DummyModelForm(self.user)
self.assertTrue('tags' in form.fields)
self.assertEqual(form.fields['tags'].help_text.encode(), b'Help text')
self.assertTrue('global_tags' in form.fields)
def test_form_valid(self):
"""Form should be valid when valid data is given."""
form = DummyModelForm(self.user, data=self.data)
self.assertTrue(form.is_valid())
def test_save_returns_instance(self):
"""
Save should return the saved instance when creating a new object.
"""
form = DummyModelForm(self.user, data=self.data)
instance = form.save()
self.assertTrue(instance.pk)
def test_creates_tag_group(self):
"""
If the user has entered tags for a given tag field, the correct
user tags related objects should be created.
"""
form = DummyModelForm(self.user, data=self.data)
instance = form.save()
tag_group = UserTagGroup.objects.get(name='tags')
user_tags = UserTag.objects.filter(user_tag_group=tag_group)
self.assertEqual(user_tags.count(), 3)
global_tag_group = UserTagGroup.objects.get(name='global_tags')
global_tags = UserTag.objects.filter(user_tag_group=global_tag_group)
self.assertEqual(global_tags.count(), 2)
tagged_item = TaggedItem.objects.get(
content_type=ContentType.objects.get_for_model(instance),
object_id=instance.pk)
self.assertEqual(tagged_item.user_tags.all().count(), 5)
def test_tag_group_without_user(self):
"""
For a tag group that has ``'with_user': False`` in the ``TAG_FIELDS``
option dict, the created tag group should not be bound to any user.
"""
form = DummyModelForm(self.user, data=self.data)
form.save()
global_tag_group = UserTagGroup.objects.get(name='global_tags')
self.assertEqual(global_tag_group.user, None)
def test_form_should_be_valid_when_instance_given(self):
"""
When instantiated with an instance, the form should, of course,
be valid.
"""
form = DummyModelForm(self.user, data=self.data)
instance = form.save()
form = DummyModelForm(self.user, data=self.data, instance=instance)
self.assertTrue(form.is_valid())
def test_save_instance_re_creates_everything(self):
"""
When instantiated with an instance that already has tags, those tags
should be deleted when the form is saved. Only the newly submitted
tags will get re-created.
In this test we don't touch the two existing 'global_tags' but we
re-submit two new 'tags' (before that group had three tags). So in
total we should have four tags now, not five.
"""
form = DummyModelForm(self.user, data=self.data)
instance = form.save()
data2 = self.data.copy()
data2.update({'tags': 'family, cinema', })
form = DummyModelForm(self.user, data=data2, instance=instance)
instance = form.save()
tagged_item = TaggedItem.objects.get(
content_type=ContentType.objects.get_for_model(instance),
object_id=instance.pk)
self.assertEqual(tagged_item.user_tags.all().count(), 4)
def test_get_user_from_instance(self):
"""
If form was not instanciated with user parameter, it will try to get
the user from the instance.
"""
form = DummyModelForm(self.user, data=self.data)
instance = form.save()
instance.user = self.user
form = DummyModelForm(instance=instance, data=self.data)
self.assertTrue(form.is_valid())
self.assertTrue(form.save())
def test_get_user_method(self):
"""
If form was not instantiated with suer parameter and the instance does
not have a user field, it will try to call a ``get_user`` method on
the form.
"""
form = DummyModelForm(self.user, data=self.data)
instance = form.save()
form = DummyModelForm(instance=instance, data=self.data)
def get_user():
return self.user
form.get_user = get_user
self.assertTrue(form.is_valid())
self.assertTrue(form.save())
def test_no_user_given(self):
"""
If form was not instanciated with user parameter and instance has no
user attribute and not get_user method, so be it. This tag is probably
supposed to be global to the project.
"""
form = DummyModelForm(self.user, data=self.data)
instance = form.save()
form = DummyModelForm(instance=instance, data=self.data)
self.assertTrue(form.is_valid())
self.assertTrue(form.save())
def test_no_tags(self):
form = DummyModelForm(self.user, data=self.data)
instance = form.save()
TaggedItem.objects.all().delete()
form = DummyModelForm(instance=instance, data=self.data)
self.assertTrue(form.is_valid())
def test_split_tags(self):
tags = DummyModelForm.split_tags('great day,family, cinema, ')
self.assertEqual(len(tags), 3)
self.assertEqual(tags[0], 'great day')
self.assertEqual(tags[1], 'family')
self.assertEqual(tags[2], 'cinema')
def test_adds_tag_list_to_form(self):
"""
Should add the available tags for each given tag field to the form.
This enables users to do this in their templates::
$(document).ready(function() {
$('#id_skills').tagit({
allowSpaces: true
,availableTags:
{{ form.available_tags_technical_skills|safe }}
,caseSensitive: false
,removeConfirmation: true
});
}
"""
form = DummyModelForm(self.user, data=self.data)
form.save()
result = form.tags_tags_values()
self.assertEqual(result, '["cinema", "family", "great day"]')
result = form.global_tags_tags_values()
self.assertEqual(result, '["bar", "foo"]')
user2 = UserFactory()
form = DummyModelForm(user2)
result = form.tags_tags_values()
self.assertEqual(result, '[]', msg=(
'A user should not be able to see the private tags of another'
' user.'))
form = DummyModelForm()
result = form.tags_tags_values()
self.assertEqual(result, '[]', msg=(
'An anonymous user should not be able to see user specific tags.'))
| mit |
mKeRix/home-assistant | homeassistant/components/hlk_sw16/switch.py | 12 | 1410 | """Support for HLK-SW16 switches."""
import logging
from homeassistant.components.switch import ToggleEntity
from homeassistant.const import CONF_NAME
from . import DATA_DEVICE_REGISTER, SW16Device
_LOGGER = logging.getLogger(__name__)
def devices_from_config(hass, domain_config):
"""Parse configuration and add HLK-SW16 switch devices."""
switches = domain_config[0]
device_id = domain_config[1]
device_client = hass.data[DATA_DEVICE_REGISTER][device_id]
devices = []
for device_port, device_config in switches.items():
device_name = device_config.get(CONF_NAME, device_port)
device = SW16Switch(device_name, device_port, device_id, device_client)
devices.append(device)
return devices
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the HLK-SW16 platform."""
async_add_entities(devices_from_config(hass, discovery_info))
class SW16Switch(SW16Device, ToggleEntity):
"""Representation of a HLK-SW16 switch."""
@property
def is_on(self):
"""Return true if device is on."""
return self._is_on
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
await self._client.turn_on(self._device_port)
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
await self._client.turn_off(self._device_port)
| mit |
Xonshiz/comic-dl | comic_dl/sites/readcomicOnlineli.py | 1 | 7993 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import globalFunctions
import re
import os
import logging
class ReadComicOnlineLi(object):
def __init__(self, manga_url, download_directory, chapter_range, **kwargs):
current_directory = kwargs.get("current_directory")
conversion = kwargs.get("conversion")
keep_files = kwargs.get("keep_files")
self.logging = kwargs.get("log_flag")
self.sorting = kwargs.get("sorting_order")
self.image_quality = kwargs.get("image_quality")
self.comic_name = self.name_cleaner(manga_url)
self.print_index = kwargs.get("print_index")
url_split = str(manga_url).split("/")
if len(url_split) in [5]: # Sometimes, this value came out to be 6, instead of 5. Hmmmmmmmm weird.
# Removing "6" from here, because it caused #47
self.full_series(comic_url=manga_url.replace("&readType=1", ""), comic_name=self.comic_name,
sorting=self.sorting, download_directory=download_directory, chapter_range=chapter_range,
conversion=conversion, keep_files=keep_files)
else:
if "&readType=0" in manga_url:
manga_url = str(manga_url).replace("&readType=0", "&readType=1") # All Images in one page!
# disabled to fix #132 and #145
# elif "&readType=1" not in manga_url:
# manga_url = str(manga_url) + "&readType=1" # All Images in one page!
self.single_chapter(manga_url, self.comic_name, download_directory, conversion=conversion,
keep_files=keep_files)
def single_chapter(self, comic_url, comic_name, download_directory, conversion, keep_files):
# print("Received Comic Url : {0}".format(comic_url))
print("Fooling CloudFlare...Please Wait...")
chapter_number = str(comic_url).split("/")[5].split("?")[0].replace("-", " - ")
source, cookies = globalFunctions.GlobalFunctions().page_downloader(manga_url=comic_url, scrapper_delay=10)
img_list = re.findall(r"lstImages.push\(\"(.*?)\"\);", str(source))
file_directory = globalFunctions.GlobalFunctions().create_file_directory(chapter_number, comic_name)
# directory_path = os.path.realpath(file_directory)
directory_path = os.path.realpath(str(download_directory) + "/" + str(file_directory))
if not os.path.exists(directory_path):
os.makedirs(directory_path)
# image_len = len(image_list)
if str(self.image_quality).lower().strip() in ["low", "worst", "bad", "cancer", "mobile"]:
print("Downloading In Low Quality...")
links = []
file_names = []
for current_chapter, image_link in enumerate(img_list):
image_link = image_link.replace("\\", "")
logging.debug("Image Link : %s" % image_link)
image_link = image_link.replace("=s1600", "=s0").replace("/s1600", "/s0") # Change low quality to best.
if str(self.image_quality).lower().strip() in ["low", "worst", "bad", "cancer", "mobile"]:
image_link = image_link.replace("=s0", "=s1600").replace("/s0", "/s1600")
current_chapter += 1
file_name = str(globalFunctions.GlobalFunctions().prepend_zeroes(current_chapter, len(img_list))) + ".jpg"
file_names.append(file_name)
links.append(image_link)
globalFunctions.GlobalFunctions().multithread_download(chapter_number, comic_name, comic_url, directory_path,
file_names, links, self.logging)
globalFunctions.GlobalFunctions().conversion(directory_path, conversion, keep_files, comic_name,
chapter_number)
return 0
def name_cleaner(self, url):
initial_name = str(url).split("/")[4].strip()
safe_name = re.sub(r"[0-9][a-z][A-Z]\ ", "", str(initial_name))
manga_name = str(safe_name.title()).replace("-", " ")
return manga_name
def full_series(self, comic_url, comic_name, sorting, download_directory, chapter_range, conversion, keep_files):
print("Fooling CloudFlare...Please Wait...")
source, cookies = globalFunctions.GlobalFunctions().page_downloader(manga_url=comic_url, scrapper_delay=10)
all_links = []
listing_table = source.find_all("table", {"class": "listing"})
# print(listing_table)
for elements in listing_table:
x = elements.findAll('a')
for a in x:
all_links.append(str(a['href']).strip())
"""Readcomiconline.li shows the chapters in the Descending order. The 1st chapter is at the bottom, hence, at
the end of the list. So, we'll reverse the list, to perform the ranging functionality properly.
This is a fix for issues like #74.
"""
all_links.reverse()
# print("All Links : {0}".format(all_links))
logging.debug("All Links : %s" % all_links)
# Uh, so the logic is that remove all the unnecessary chapters beforehand
# and then pass the list for further operations.
if chapter_range != "All":
# -1 to shift the episode number accordingly to the INDEX of it. List starts from 0 xD!
starting = int(str(chapter_range).split("-")[0]) - 1
if str(chapter_range).split("-")[1].isdigit():
ending = int(str(chapter_range).split("-")[1])
else:
ending = len(all_links)
indexes = [x for x in range(starting, ending)]
all_links = [all_links[x] for x in indexes][::-1]
else:
all_links = all_links
if self.print_index:
idx = 0
for chap_link in all_links:
idx = idx + 1
print(str(idx) + ": " + chap_link)
return
if str(sorting).lower() in ['new', 'desc', 'descending', 'latest']:
for chap_link in all_links:
chap_link = "http://readcomiconline.li" + chap_link
try:
self.single_chapter(comic_url=chap_link, comic_name=comic_name, download_directory=download_directory,
conversion=conversion, keep_files=keep_files)
except Exception as ex:
logging.error("Error downloading : %s" % chap_link)
break # break to continue processing other mangas
# if chapter range contains "__EnD__" write new value to config.json
# @Chr1st-oo - modified condition due to some changes on automatic download and config.
if chapter_range != "All" and (chapter_range.split("-")[1] == "__EnD__" or len(chapter_range.split("-")) == 3):
globalFunctions.GlobalFunctions().addOne(comic_url)
elif str(sorting).lower() in ['old', 'asc', 'ascending', 'oldest', 'a']:
for chap_link in all_links[::-1]:
chap_link = "http://readcomiconline.to" + chap_link
try:
self.single_chapter(comic_url=chap_link, comic_name=comic_name, download_directory=download_directory,
conversion=conversion, keep_files=keep_files)
except Exception as ex:
logging.error("Error downloading : %s" % chap_link)
break # break to continue processing other mangas
# if chapter range contains "__EnD__" write new value to config.json
# @Chr1st-oo - modified condition due to some changes on automatic download and config.
if chapter_range != "All" and (chapter_range.split("-")[1] == "__EnD__" or len(chapter_range.split("-")) == 3):
globalFunctions.GlobalFunctions().addOne(comic_url)
return 0
| mit |
lekanovic/pycoin | pycoin/blockchain/ChainFinder.py | 25 | 3410 |
class ChainFinder(object):
def __init__(self):
self.parent_lookup = {}
self.descendents_by_top = {}
self.trees_from_bottom = {}
def __repr__(self):
return "<ChainFinder: trees_fb:%s d_b_tops:%s>" % (self.trees_from_bottom, self.descendents_by_top)
def load_nodes(self, nodes):
# register everything
new_hashes = set()
for h, parent in nodes:
if h in self.parent_lookup:
continue
self.parent_lookup[h] = parent
new_hashes.add(h)
if new_hashes:
self.meld_new_hashes(new_hashes)
def meld_new_hashes(self, new_hashes):
# make a list
while len(new_hashes) > 0:
h = new_hashes.pop()
path = [h]
while 1:
h = self.parent_lookup.get(h)
if h is None:
break
new_hashes.discard(h)
preceding_path = self.trees_from_bottom.get(h)
if preceding_path:
del self.trees_from_bottom[h]
path.extend(preceding_path)
# we extended an existing path. Fix up descendents_by_top
self.descendents_by_top[preceding_path[-1]].remove(preceding_path[0])
break
path.append(h)
self.trees_from_bottom[path[0]] = path
#if len(path) <= 1:
# # this is a lone element... don't bother trying to extend
# continue
# now, perform extensions on any trees that start below here
bottom_h, top_h = path[0], path[-1]
top_descendents = self.descendents_by_top.setdefault(top_h, set())
bottom_descendents = self.descendents_by_top.get(bottom_h)
if bottom_descendents:
for descendent in bottom_descendents:
prior_path = self.trees_from_bottom[descendent]
prior_path.extend(path[1:])
if path[0] in self.trees_from_bottom:
del self.trees_from_bottom[path[0]]
else:
pass # TODO: improve this
del self.descendents_by_top[bottom_h]
top_descendents.update(bottom_descendents)
else:
top_descendents.add(bottom_h)
def all_chains_ending_at(self, h):
for bottom_h in self.descendents_by_top.get(h, []):
yield self.trees_from_bottom[bottom_h]
def missing_parents(self):
return self.descendents_by_top.keys()
def maximum_path(self, h, cache={}):
v = self.trees_from_bottom.get(h)
if v:
return v
h1 = h
v = []
while h1 is not None:
v.append(h1)
h1 = self.parent_lookup.get(h1)
for i, h1 in enumerate(v):
cache[h1] = v[i:]
return v
def find_ancestral_path(self, h1, h2, path_cache={}):
p1 = self.maximum_path(h1, path_cache)
p2 = self.maximum_path(h2, path_cache)
if p1[-1] != p2[-1]:
return [], []
shorter_len = min(len(p1), len(p2))
i1 = len(p1) - shorter_len
i2 = len(p2) - shorter_len
while 1:
if p1[i1] == p2[i2]:
return p1[:i1+1], p2[:i2+1]
i1 += 1
i2 += 1
| mit |
pantsbuild/pants | src/python/pants/backend/python/util_rules/pex_cli.py | 3 | 7111 | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import dataclasses
import os
from dataclasses import dataclass
from pathlib import Path
from typing import Iterable, List, Mapping, Optional, Tuple
from pants.backend.python.subsystems.python_native_code import PythonNativeCode
from pants.backend.python.util_rules import pex_environment
from pants.backend.python.util_rules.pex_environment import (
PexRuntimeEnvironment,
PythonExecutable,
SandboxPexEnvironment,
)
from pants.core.util_rules import external_tool
from pants.core.util_rules.external_tool import (
DownloadedExternalTool,
ExternalToolRequest,
TemplatedExternalTool,
)
from pants.engine.fs import CreateDigest, Digest, Directory, FileContent, MergeDigests
from pants.engine.internals.selectors import MultiGet
from pants.engine.platform import Platform
from pants.engine.process import Process, ProcessCacheScope
from pants.engine.rules import Get, collect_rules, rule
from pants.option.global_options import GlobalOptions
from pants.util.frozendict import FrozenDict
from pants.util.logging import LogLevel
from pants.util.meta import classproperty, frozen_after_init
from pants.util.strutil import create_path_env_var
class PexBinary(TemplatedExternalTool):
options_scope = "download-pex-bin"
name = "pex"
help = "The PEX (Python EXecutable) tool (https://github.com/pantsbuild/pex)."
default_version = "v2.1.42"
default_url_template = "https://github.com/pantsbuild/pex/releases/download/{version}/pex"
@classproperty
def default_known_versions(cls):
return [
"|".join(
(
cls.default_version,
plat,
"69d6b1b1009b00dd14a3a9f19b72cff818a713ca44b3186c9b12074b2a31e51f",
"3613838",
)
)
for plat in ["darwin", "linux"]
]
@frozen_after_init
@dataclass(unsafe_hash=True)
class PexCliProcess:
argv: Tuple[str, ...]
description: str = dataclasses.field(compare=False)
additional_input_digest: Optional[Digest]
extra_env: Optional[FrozenDict[str, str]]
output_files: Optional[Tuple[str, ...]]
output_directories: Optional[Tuple[str, ...]]
python: Optional[PythonExecutable]
level: LogLevel
cache_scope: ProcessCacheScope
def __init__(
self,
*,
argv: Iterable[str],
description: str,
additional_input_digest: Optional[Digest] = None,
extra_env: Optional[Mapping[str, str]] = None,
output_files: Optional[Iterable[str]] = None,
output_directories: Optional[Iterable[str]] = None,
python: Optional[PythonExecutable] = None,
level: LogLevel = LogLevel.INFO,
cache_scope: ProcessCacheScope = ProcessCacheScope.SUCCESSFUL,
) -> None:
self.argv = tuple(argv)
self.description = description
self.additional_input_digest = additional_input_digest
self.extra_env = FrozenDict(extra_env) if extra_env else None
self.output_files = tuple(output_files) if output_files else None
self.output_directories = tuple(output_directories) if output_directories else None
self.python = python
self.level = level
self.cache_scope = cache_scope
self.__post_init__()
def __post_init__(self) -> None:
if "--pex-root-path" in self.argv:
raise ValueError("`--pex-root` flag not allowed. We set its value for you.")
class PexPEX(DownloadedExternalTool):
"""The Pex PEX binary."""
@rule
async def download_pex_pex(pex_binary: PexBinary) -> PexPEX:
pex_pex = await Get(
DownloadedExternalTool, ExternalToolRequest, pex_binary.get_request(Platform.current)
)
return PexPEX(digest=pex_pex.digest, exe=pex_pex.exe)
@rule
async def setup_pex_cli_process(
request: PexCliProcess,
pex_binary: PexPEX,
pex_env: SandboxPexEnvironment,
python_native_code: PythonNativeCode,
global_options: GlobalOptions,
pex_runtime_env: PexRuntimeEnvironment,
) -> Process:
tmpdir = ".tmp"
gets: List[Get] = [Get(Digest, CreateDigest([Directory(tmpdir)]))]
cert_args = []
# The certs file will typically not be in the repo, so we can't digest it via a PathGlobs.
# Instead we manually create a FileContent for it.
if global_options.options.ca_certs_path:
ca_certs_content = Path(global_options.options.ca_certs_path).read_bytes()
chrooted_ca_certs_path = os.path.basename(global_options.options.ca_certs_path)
gets.append(
Get(
Digest,
CreateDigest((FileContent(chrooted_ca_certs_path, ca_certs_content),)),
)
)
cert_args = ["--cert", chrooted_ca_certs_path]
digests_to_merge = [pex_binary.digest]
digests_to_merge.extend(await MultiGet(gets))
if request.additional_input_digest:
digests_to_merge.append(request.additional_input_digest)
input_digest = await Get(Digest, MergeDigests(digests_to_merge))
argv = [
pex_binary.exe,
*cert_args,
"--python-path",
create_path_env_var(pex_env.interpreter_search_paths),
# Ensure Pex and its subprocesses create temporary files in the the process execution
# sandbox. It may make sense to do this generally for Processes, but in the short term we
# have known use cases where /tmp is too small to hold large wheel downloads Pex is asked to
# perform. Making the TMPDIR local to the sandbox allows control via
# --local-execution-root-dir for the local case and should work well with remote cases where
# a remoting implementation has to allow for processes producing large binaries in a
# sandbox to support reasonable workloads. Communicating TMPDIR via --tmpdir instead of via
# environment variable allows Pex to absolutize the path ensuring subprocesses that change
# CWD can find the TMPDIR.
"--tmpdir",
tmpdir,
]
if pex_runtime_env.verbosity > 0:
argv.append(f"-{'v' * pex_runtime_env.verbosity}")
# NB: This comes at the end of the argv because the request may use `--` passthrough args,
# which must come at the end.
argv.extend(request.argv)
normalized_argv = pex_env.create_argv(*argv, python=request.python)
env = {
**pex_env.environment_dict(python_configured=request.python is not None),
**python_native_code.environment_dict,
**(request.extra_env or {}),
}
return Process(
normalized_argv,
description=request.description,
input_digest=input_digest,
env=env,
output_files=request.output_files,
output_directories=request.output_directories,
append_only_caches=pex_env.append_only_caches,
level=request.level,
cache_scope=request.cache_scope,
)
def rules():
return [*collect_rules(), *external_tool.rules(), *pex_environment.rules()]
| apache-2.0 |
simmetria/sentry | src/sentry/migrations/0039_auto__add_field_searchdocument_status.py | 6 | 16296 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SearchDocument.status'
db.add_column('sentry_searchdocument', 'status', self.gf('django.db.models.fields.PositiveIntegerField')(default=0), keep_default=False)
def backwards(self, orm):
# Deleting field 'SearchDocument.status'
db.delete_column('sentry_searchdocument', 'status')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.event': {
'Meta': {'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['auth.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owned_project_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectdomain': {
'Meta': {'unique_together': "(('project', 'domain'),)", 'object_name': 'ProjectDomain'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'domain_set'", 'to': "orm['sentry.Project']"})
},
'sentry.projectmember': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'ProjectMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'project_set'", 'to': "orm['auth.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.view': {
'Meta': {'object_name': 'View'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'verbose_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'verbose_name_plural': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'})
}
}
complete_apps = ['sentry']
| bsd-3-clause |
s0930342674/pyload | module/plugins/hooks/RPNetBizHook.py | 12 | 1400 | # -*- coding: utf-8 -*-
from module.common.json_layer import json_loads
from module.plugins.internal.MultiHook import MultiHook
class RPNetBizHook(MultiHook):
__name__ = "RPNetBizHook"
__type__ = "hook"
__version__ = "0.16"
__status__ = "testing"
__config__ = [("pluginmode" , "all;listed;unlisted", "Use for plugins" , "all"),
("pluginlist" , "str" , "Plugin list (comma separated)", "" ),
("reload" , "bool" , "Reload plugin list" , True ),
("reloadinterval", "int" , "Reload interval in hours" , 12 )]
__description__ = """RPNet.biz hook plugin"""
__license__ = "GPLv3"
__authors__ = [("Dman", "dmanugm@gmail.com")]
def get_hosters(self):
#: Get account data
user, info = self.account.select()
res = self.load("https://premium.rpnet.biz/client_api.php",
get={'username': user,
'password': info['login']['password'],
'action' : "showHosterList"})
hoster_list = json_loads(res)
#: If account is not valid thera are no hosters available
if 'error' in hoster_list:
return []
#: Extract hosters from json file
return hoster_list['hosters']
| gpl-3.0 |
Dziolas/invenio | modules/miscutil/lib/remote_debugger_wsgi_reload.py | 33 | 3919 | ## This file is part of Invenio.
## Copyright (C) 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Invenio utility to monitor and automatically reload modules when edits
are done in-live.
This module was inspired by examples from: http://code.google.com/p/modwsgi/wiki/ReloadingSourceCode
(C) Graham Dumpleton
"""
import os
import sys
import time
import signal
import threading
import atexit
import Queue
_interval = 1.0
_times = {}
_files = []
_running = False
_queue = Queue.Queue()
_lock = threading.Lock()
def _restart(path):
_queue.put(True)
prefix = 'monitor (pid=%d):' % os.getpid()
print >> sys.stderr, '%s Change detected to \'%s\'.' % (prefix, path)
print >> sys.stderr, '%s Triggering process restart.' % prefix
os.kill(os.getpid(), signal.SIGINT)
def _modified(path):
try:
# If path doesn't denote a file and were previously
# tracking it, then it has been removed or the file type
# has changed so force a restart. If not previously
# tracking the file then we can ignore it as probably
# pseudo reference such as when file extracted from a
# collection of modules contained in a zip file.
if not os.path.isfile(path):
return path in _times
# Check for when file last modified.
mtime = os.stat(path).st_mtime
if path not in _times:
_times[path] = mtime
# Force restart when modification time has changed, even
# if time now older, as that could indicate older file
# has been restored.
if mtime != _times[path]:
return True
except:
# If any exception occured, likely that file has been
# been removed just before stat(), so force a restart.
return True
return False
def _monitor():
while 1:
# Check modification times on all files in sys.modules.
for module in sys.modules.values():
if not hasattr(module, '__file__'):
continue
path = getattr(module, '__file__')
if not path:
continue
if os.path.splitext(path)[1] in ['.pyc', '.pyo', '.pyd']:
path = path[:-1]
if _modified(path):
return _restart(path)
# Check modification times on files which have
# specifically been registered for monitoring.
for path in _files:
if _modified(path):
return _restart(path)
# Go to sleep for specified interval.
try:
return _queue.get(timeout=_interval)
except:
pass
_thread = threading.Thread(target=_monitor)
_thread.setDaemon(True)
def _exiting():
try:
_queue.put(True)
except:
pass
_thread.join()
atexit.register(_exiting)
def track(path):
if not path in _files:
_files.append(path)
def start(interval=1.0):
global _interval
if interval < _interval:
_interval = interval
global _running
_lock.acquire()
if not _running:
prefix = 'monitor (pid=%d):' % os.getpid()
print >> sys.stderr, '%s Starting change monitor.' % prefix
_running = True
_thread.start()
_lock.release() | gpl-2.0 |
zhiwehu/django-countries | countries/models.py | 1 | 2033 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Country(models.Model):
"""
International Organization for Standardization (ISO) 3166-1 Country list
* ``iso`` = ISO 3166-1 alpha-2
* ``name`` = Official country names used by the ISO 3166/MA in capital letters
* ``printable_name`` = Printable country names for in-text use
* ``iso3`` = ISO 3166-1 alpha-3
* ``numcode`` = ISO 3166-1 numeric
Note::
This model is fixed to the database table 'country' to be more general.
Change ``db_table`` if this cause conflicts with your database layout.
Or comment out the line for default django behaviour.
"""
iso = models.CharField(_('ISO alpha-2'), max_length=2, primary_key=True)
name = models.CharField(_('Official name (CAPS)'), max_length=128)
printable_name = models.CharField(_('Country name'), max_length=128)
iso3 = models.CharField(_('ISO alpha-3'), max_length=3, null=True)
numcode = models.PositiveSmallIntegerField(_('ISO numeric'), null=True)
class Meta:
db_table = 'country'
verbose_name = _('Country')
verbose_name_plural = _('Countries')
ordering = ('name',)
class Admin:
list_display = ('printable_name', 'iso',)
def __unicode__(self):
return self.printable_name
class UsState(models.Model):
"""
United States Postal Service (USPS) State Abbreviations
Note::
This model is fixed to the database table 'usstate' to be more general.
Change ``db_table`` if this cause conflicts with your database layout.
Or comment out the line for default django behaviour.
"""
id = models.AutoField(primary_key=True)
name = models.CharField(_('State name'), max_length=50, null=False)
abbrev = models.CharField(_('Abbreviation'), max_length=2, null=False)
class Meta:
db_table = 'usstate'
verbose_name = _('US State')
verbose_name_plural = _('US States')
ordering = ('name',)
class Admin:
list_display = ('name', 'abbrev',)
def __unicode__(self):
return self.name
| bsd-3-clause |
sbbic/core | wizards/com/sun/star/wizards/ui/event/ListDataEvent.py | 11 | 1214 | #
# This file is part of the LibreOffice project.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# This file incorporates work covered by the following license notice:
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed
# with this work for additional information regarding copyright
# ownership. The ASF licenses this file to you under the Apache
# License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0 .
class ListDataEvent:
INTERVAL_ADDED = 1
INTERVAL_REMOVED = 2
CONTENTS_CHANGED = 3
# general constructor -
# @param source
# @param type_
def __init__(self, source_, type_, i0, i1):
#super(TaskEvent, self).__init__(source)
self.index0 = i0
self.index1 = i1
def getIndex0(self):
return self.index0
def getIndex1(self):
return self.index1
| gpl-3.0 |
alpeware/gcms | lib/googleapiclient/_auth.py | 16 | 3091 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for authentication using oauth2client or google-auth."""
try:
import google.auth
import google.auth.credentials
import google_auth_httplib2
HAS_GOOGLE_AUTH = True
except ImportError: # pragma: NO COVER
HAS_GOOGLE_AUTH = False
try:
import oauth2client
import oauth2client.client
HAS_OAUTH2CLIENT = True
except ImportError: # pragma: NO COVER
HAS_OAUTH2CLIENT = False
from googleapiclient.http import build_http
def default_credentials():
"""Returns Application Default Credentials."""
if HAS_GOOGLE_AUTH:
credentials, _ = google.auth.default()
return credentials
elif HAS_OAUTH2CLIENT:
return oauth2client.client.GoogleCredentials.get_application_default()
else:
raise EnvironmentError(
'No authentication library is available. Please install either '
'google-auth or oauth2client.')
def with_scopes(credentials, scopes):
"""Scopes the credentials if necessary.
Args:
credentials (Union[
google.auth.credentials.Credentials,
oauth2client.client.Credentials]): The credentials to scope.
scopes (Sequence[str]): The list of scopes.
Returns:
Union[google.auth.credentials.Credentials,
oauth2client.client.Credentials]: The scoped credentials.
"""
if HAS_GOOGLE_AUTH and isinstance(
credentials, google.auth.credentials.Credentials):
return google.auth.credentials.with_scopes_if_required(
credentials, scopes)
else:
try:
if credentials.create_scoped_required():
return credentials.create_scoped(scopes)
else:
return credentials
except AttributeError:
return credentials
def authorized_http(credentials):
"""Returns an http client that is authorized with the given credentials.
Args:
credentials (Union[
google.auth.credentials.Credentials,
oauth2client.client.Credentials]): The credentials to use.
Returns:
Union[httplib2.Http, google_auth_httplib2.AuthorizedHttp]: An
authorized http client.
"""
if HAS_GOOGLE_AUTH and isinstance(
credentials, google.auth.credentials.Credentials):
return google_auth_httplib2.AuthorizedHttp(credentials,
http=build_http())
else:
return credentials.authorize(build_http())
| mit |
krafczyk/spack | var/spack/repos/builtin/packages/r-rpostgresql/package.py | 5 | 2292 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RRpostgresql(RPackage):
"""Database interface and PostgreSQL driver for R This package provides a
Database Interface (DBI) compliant driver for R to access PostgreSQL
database systems. In order to build and install this package from source,
PostgreSQL itself must be present your system to provide PostgreSQL
functionality via its libraries and header files. These files are provided
as postgresql-devel package under some Linux distributions. On Microsoft
Windows system the attached libpq library source will be used. A wiki and
issue tracking system for the package are available at Google Code at
https://code.google.com/p/rpostgresql/."""
homepage = "https://code.google.com/p/rpostgresql/"
url = "https://cran.r-project.org/src/contrib/RPostgreSQL_0.4-1.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/RPostgreSQL"
version('0.4-1', 'e7b22e212afbb2cbb88bab937f93e55a')
depends_on('r-dbi', type=('build', 'run'))
depends_on('postgresql')
| lgpl-2.1 |
Zhongqilong/kbengine | kbe/src/lib/python/Lib/__future__.py | 134 | 4584 | """Record of phased-in incompatible language changes.
Each line is of the form:
FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease ","
CompilerFlag ")"
where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples
of the same form as sys.version_info:
(PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int
PY_MINOR_VERSION, # the 1; an int
PY_MICRO_VERSION, # the 0; an int
PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string
PY_RELEASE_SERIAL # the 3; an int
)
OptionalRelease records the first release in which
from __future__ import FeatureName
was accepted.
In the case of MandatoryReleases that have not yet occurred,
MandatoryRelease predicts the release in which the feature will become part
of the language.
Else MandatoryRelease records when the feature became part of the language;
in releases at or after that, modules no longer need
from __future__ import FeatureName
to use the feature in question, but may continue to use such imports.
MandatoryRelease may also be None, meaning that a planned feature got
dropped.
Instances of class _Feature have two corresponding methods,
.getOptionalRelease() and .getMandatoryRelease().
CompilerFlag is the (bitfield) flag that should be passed in the fourth
argument to the builtin function compile() to enable the feature in
dynamically compiled code. This flag is stored in the .compiler_flag
attribute on _Future instances. These values must match the appropriate
#defines of CO_xxx flags in Include/compile.h.
No feature line is ever to be deleted from this file.
"""
all_feature_names = [
"nested_scopes",
"generators",
"division",
"absolute_import",
"with_statement",
"print_function",
"unicode_literals",
"barry_as_FLUFL",
]
__all__ = ["all_feature_names"] + all_feature_names
# The CO_xxx symbols are defined here under the same names used by
# compile.h, so that an editor search will find them here. However,
# they're not exported in __all__, because they don't really belong to
# this module.
CO_NESTED = 0x0010 # nested_scopes
CO_GENERATOR_ALLOWED = 0 # generators (obsolete, was 0x1000)
CO_FUTURE_DIVISION = 0x2000 # division
CO_FUTURE_ABSOLUTE_IMPORT = 0x4000 # perform absolute imports by default
CO_FUTURE_WITH_STATEMENT = 0x8000 # with statement
CO_FUTURE_PRINT_FUNCTION = 0x10000 # print function
CO_FUTURE_UNICODE_LITERALS = 0x20000 # unicode string literals
CO_FUTURE_BARRY_AS_BDFL = 0x40000
class _Feature:
def __init__(self, optionalRelease, mandatoryRelease, compiler_flag):
self.optional = optionalRelease
self.mandatory = mandatoryRelease
self.compiler_flag = compiler_flag
def getOptionalRelease(self):
"""Return first release in which this feature was recognized.
This is a 5-tuple, of the same form as sys.version_info.
"""
return self.optional
def getMandatoryRelease(self):
"""Return release in which this feature will become mandatory.
This is a 5-tuple, of the same form as sys.version_info, or, if
the feature was dropped, is None.
"""
return self.mandatory
def __repr__(self):
return "_Feature" + repr((self.optional,
self.mandatory,
self.compiler_flag))
nested_scopes = _Feature((2, 1, 0, "beta", 1),
(2, 2, 0, "alpha", 0),
CO_NESTED)
generators = _Feature((2, 2, 0, "alpha", 1),
(2, 3, 0, "final", 0),
CO_GENERATOR_ALLOWED)
division = _Feature((2, 2, 0, "alpha", 2),
(3, 0, 0, "alpha", 0),
CO_FUTURE_DIVISION)
absolute_import = _Feature((2, 5, 0, "alpha", 1),
(3, 0, 0, "alpha", 0),
CO_FUTURE_ABSOLUTE_IMPORT)
with_statement = _Feature((2, 5, 0, "alpha", 1),
(2, 6, 0, "alpha", 0),
CO_FUTURE_WITH_STATEMENT)
print_function = _Feature((2, 6, 0, "alpha", 2),
(3, 0, 0, "alpha", 0),
CO_FUTURE_PRINT_FUNCTION)
unicode_literals = _Feature((2, 6, 0, "alpha", 2),
(3, 0, 0, "alpha", 0),
CO_FUTURE_UNICODE_LITERALS)
barry_as_FLUFL = _Feature((3, 1, 0, "alpha", 2),
(3, 9, 0, "alpha", 0),
CO_FUTURE_BARRY_AS_BDFL)
| lgpl-3.0 |
nils-wisiol/pypuf | pypuf/property_test/example.py | 1 | 3649 | """This module is used to store some examples for the documentation"""
from numpy import array, reshape
from pypuf.simulation.arbiter_based.ltfarray import NoisyLTFArray
from pypuf.property_test.base import PropertyTest
from pypuf.tools import sample_inputs
def main():
"""This method is used to execute all example functions."""
example_reliability()
example_reliability_statistic()
def example_reliability():
"""This method shows how to use the PropertyTest.reliability function."""
n = 8
k = 8
transformation = NoisyLTFArray.transform_id
combiner = NoisyLTFArray.combiner_xor
weights = NoisyLTFArray.normal_weights(n=n, k=k)
instance = NoisyLTFArray(
weight_array=weights,
transform=transformation,
combiner=combiner,
sigma_noise=NoisyLTFArray.sigma_noise_from_random_weights(n, 0.5)
)
challenge = array([-1, 1, 1, 1, -1, 1, 1, 1])
reliability = PropertyTest.reliability(instance, reshape(challenge, (1, n)))
print('The reliability is {}.'.format(reliability))
def example_reliability_statistic():
"""This method shows hot to use the PropertyTest.reliability_statistic."""
n = 8
k = 1
N = 2 ** n
instance_count = 3
measurements = 100
transformation = NoisyLTFArray.transform_id
combiner = NoisyLTFArray.combiner_xor
weights = NoisyLTFArray.normal_weights(n=n, k=k)
instances = [
NoisyLTFArray(
weight_array=weights,
transform=transformation,
combiner=combiner,
sigma_noise=NoisyLTFArray.sigma_noise_from_random_weights(n, 0.5)
) for _ in range(instance_count)
]
challenges = array(list(sample_inputs(n, N)))
property_test = PropertyTest(instances)
reliability_statistic = property_test.reliability_statistic(challenges, measurements=measurements)
print('The reliability statistic is {}.'.format(reliability_statistic))
def example_uniqueness():
"""
This method shows the function which can be used to calculate the uniqueness of a set of simulation instances.
"""
n = 8
k = 1
instance_count = 3
transformation = NoisyLTFArray.transform_id
combiner = NoisyLTFArray.combiner_xor
weights = NoisyLTFArray.normal_weights(n=n, k=k)
instances = [
NoisyLTFArray(
weight_array=weights,
transform=transformation,
combiner=combiner,
sigma_noise=NoisyLTFArray.sigma_noise_from_random_weights(n, weights)
) for _ in range(instance_count)
]
challenge = array([-1, 1, 1, 1, -1, 1, 1, 1])
uniqueness = PropertyTest.uniqueness(instances, reshape(challenge, (1, n)))
print('The uniqueness is {}.'.format(uniqueness))
def example_uniqueness_statistic():
"""This method shows the uniqueness statistic function."""
n = 8
k = 1
N = 2 ** n
instance_count = 11
measurements = 1
transformation = NoisyLTFArray.transform_id
combiner = NoisyLTFArray.combiner_xor
weights = NoisyLTFArray.normal_weights(n=n, k=k)
instances = [
NoisyLTFArray(
weight_array=weights,
transform=transformation,
combiner=combiner,
sigma_noise=NoisyLTFArray.sigma_noise_from_random_weights(n, weights)
) for _ in range(instance_count)
]
challenges = array(list(sample_inputs(n, N)))
property_test = PropertyTest(instances)
uniqueness_statistic = property_test.uniqueness_statistic(challenges, measurements=measurements)
print('The uniqueness statistic is {}.'.format(uniqueness_statistic))
if __name__ == '__main__':
main()
| gpl-3.0 |
40223243/2015cd_midterm2 | 2015cd_midterm-master/static/Brython3.1.1-20150328-091302/Lib/xml/dom/pulldom.py | 850 | 11761 | import xml.sax
import xml.sax.handler
START_ELEMENT = "START_ELEMENT"
END_ELEMENT = "END_ELEMENT"
COMMENT = "COMMENT"
START_DOCUMENT = "START_DOCUMENT"
END_DOCUMENT = "END_DOCUMENT"
PROCESSING_INSTRUCTION = "PROCESSING_INSTRUCTION"
IGNORABLE_WHITESPACE = "IGNORABLE_WHITESPACE"
CHARACTERS = "CHARACTERS"
class PullDOM(xml.sax.ContentHandler):
_locator = None
document = None
def __init__(self, documentFactory=None):
from xml.dom import XML_NAMESPACE
self.documentFactory = documentFactory
self.firstEvent = [None, None]
self.lastEvent = self.firstEvent
self.elementStack = []
self.push = self.elementStack.append
try:
self.pop = self.elementStack.pop
except AttributeError:
# use class' pop instead
pass
self._ns_contexts = [{XML_NAMESPACE:'xml'}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self.pending_events = []
def pop(self):
result = self.elementStack[-1]
del self.elementStack[-1]
return result
def setDocumentLocator(self, locator):
self._locator = locator
def startPrefixMapping(self, prefix, uri):
if not hasattr(self, '_xmlns_attrs'):
self._xmlns_attrs = []
self._xmlns_attrs.append((prefix or 'xmlns', uri))
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix or None
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts.pop()
def startElementNS(self, name, tagName , attrs):
# Retrieve xml namespace declaration attributes.
xmlns_uri = 'http://www.w3.org/2000/xmlns/'
xmlns_attrs = getattr(self, '_xmlns_attrs', None)
if xmlns_attrs is not None:
for aname, value in xmlns_attrs:
attrs._attrs[(xmlns_uri, aname)] = value
self._xmlns_attrs = []
uri, localname = name
if uri:
# When using namespaces, the reader may or may not
# provide us with the original name. If not, create
# *a* valid tagName from the current context.
if tagName is None:
prefix = self._current_context[uri]
if prefix:
tagName = prefix + ":" + localname
else:
tagName = localname
if self.document:
node = self.document.createElementNS(uri, tagName)
else:
node = self.buildDocument(uri, tagName)
else:
# When the tagname is not prefixed, it just appears as
# localname
if self.document:
node = self.document.createElement(localname)
else:
node = self.buildDocument(None, localname)
for aname,value in attrs.items():
a_uri, a_localname = aname
if a_uri == xmlns_uri:
if a_localname == 'xmlns':
qname = a_localname
else:
qname = 'xmlns:' + a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
elif a_uri:
prefix = self._current_context[a_uri]
if prefix:
qname = prefix + ":" + a_localname
else:
qname = a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
else:
attr = self.document.createAttribute(a_localname)
node.setAttributeNode(attr)
attr.value = value
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
def endElementNS(self, name, tagName):
self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
self.lastEvent = self.lastEvent[1]
def startElement(self, name, attrs):
if self.document:
node = self.document.createElement(name)
else:
node = self.buildDocument(None, name)
for aname,value in attrs.items():
attr = self.document.createAttribute(aname)
attr.value = value
node.setAttributeNode(attr)
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
def endElement(self, name):
self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
self.lastEvent = self.lastEvent[1]
def comment(self, s):
if self.document:
node = self.document.createComment(s)
self.lastEvent[1] = [(COMMENT, node), None]
self.lastEvent = self.lastEvent[1]
else:
event = [(COMMENT, s), None]
self.pending_events.append(event)
def processingInstruction(self, target, data):
if self.document:
node = self.document.createProcessingInstruction(target, data)
self.lastEvent[1] = [(PROCESSING_INSTRUCTION, node), None]
self.lastEvent = self.lastEvent[1]
else:
event = [(PROCESSING_INSTRUCTION, target, data), None]
self.pending_events.append(event)
def ignorableWhitespace(self, chars):
node = self.document.createTextNode(chars)
self.lastEvent[1] = [(IGNORABLE_WHITESPACE, node), None]
self.lastEvent = self.lastEvent[1]
def characters(self, chars):
node = self.document.createTextNode(chars)
self.lastEvent[1] = [(CHARACTERS, node), None]
self.lastEvent = self.lastEvent[1]
def startDocument(self):
if self.documentFactory is None:
import xml.dom.minidom
self.documentFactory = xml.dom.minidom.Document.implementation
def buildDocument(self, uri, tagname):
# Can't do that in startDocument, since we need the tagname
# XXX: obtain DocumentType
node = self.documentFactory.createDocument(uri, tagname, None)
self.document = node
self.lastEvent[1] = [(START_DOCUMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
# Put everything we have seen so far into the document
for e in self.pending_events:
if e[0][0] == PROCESSING_INSTRUCTION:
_,target,data = e[0]
n = self.document.createProcessingInstruction(target, data)
e[0] = (PROCESSING_INSTRUCTION, n)
elif e[0][0] == COMMENT:
n = self.document.createComment(e[0][1])
e[0] = (COMMENT, n)
else:
raise AssertionError("Unknown pending event ",e[0][0])
self.lastEvent[1] = e
self.lastEvent = e
self.pending_events = None
return node.firstChild
def endDocument(self):
self.lastEvent[1] = [(END_DOCUMENT, self.document), None]
self.pop()
def clear(self):
"clear(): Explicitly release parsing structures"
self.document = None
class ErrorHandler:
def warning(self, exception):
print(exception)
def error(self, exception):
raise exception
def fatalError(self, exception):
raise exception
class DOMEventStream:
def __init__(self, stream, parser, bufsize):
self.stream = stream
self.parser = parser
self.bufsize = bufsize
if not hasattr(self.parser, 'feed'):
self.getEvent = self._slurp
self.reset()
def reset(self):
self.pulldom = PullDOM()
# This content handler relies on namespace support
self.parser.setFeature(xml.sax.handler.feature_namespaces, 1)
self.parser.setContentHandler(self.pulldom)
def __getitem__(self, pos):
rc = self.getEvent()
if rc:
return rc
raise IndexError
def __next__(self):
rc = self.getEvent()
if rc:
return rc
raise StopIteration
def __iter__(self):
return self
def expandNode(self, node):
event = self.getEvent()
parents = [node]
while event:
token, cur_node = event
if cur_node is node:
return
if token != END_ELEMENT:
parents[-1].appendChild(cur_node)
if token == START_ELEMENT:
parents.append(cur_node)
elif token == END_ELEMENT:
del parents[-1]
event = self.getEvent()
def getEvent(self):
# use IncrementalParser interface, so we get the desired
# pull effect
if not self.pulldom.firstEvent[1]:
self.pulldom.lastEvent = self.pulldom.firstEvent
while not self.pulldom.firstEvent[1]:
buf = self.stream.read(self.bufsize)
if not buf:
self.parser.close()
return None
self.parser.feed(buf)
rc = self.pulldom.firstEvent[1][0]
self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
return rc
def _slurp(self):
""" Fallback replacement for getEvent() using the
standard SAX2 interface, which means we slurp the
SAX events into memory (no performance gain, but
we are compatible to all SAX parsers).
"""
self.parser.parse(self.stream)
self.getEvent = self._emit
return self._emit()
def _emit(self):
""" Fallback replacement for getEvent() that emits
the events that _slurp() read previously.
"""
rc = self.pulldom.firstEvent[1][0]
self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
return rc
def clear(self):
"""clear(): Explicitly release parsing objects"""
self.pulldom.clear()
del self.pulldom
self.parser = None
self.stream = None
class SAX2DOM(PullDOM):
def startElementNS(self, name, tagName , attrs):
PullDOM.startElementNS(self, name, tagName, attrs)
curNode = self.elementStack[-1]
parentNode = self.elementStack[-2]
parentNode.appendChild(curNode)
def startElement(self, name, attrs):
PullDOM.startElement(self, name, attrs)
curNode = self.elementStack[-1]
parentNode = self.elementStack[-2]
parentNode.appendChild(curNode)
def processingInstruction(self, target, data):
PullDOM.processingInstruction(self, target, data)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
def ignorableWhitespace(self, chars):
PullDOM.ignorableWhitespace(self, chars)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
def characters(self, chars):
PullDOM.characters(self, chars)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
default_bufsize = (2 ** 14) - 20
def parse(stream_or_string, parser=None, bufsize=None):
if bufsize is None:
bufsize = default_bufsize
if isinstance(stream_or_string, str):
stream = open(stream_or_string, 'rb')
else:
stream = stream_or_string
if not parser:
parser = xml.sax.make_parser()
return DOMEventStream(stream, parser, bufsize)
def parseString(string, parser=None):
from io import StringIO
bufsize = len(string)
buf = StringIO(string)
if not parser:
parser = xml.sax.make_parser()
return DOMEventStream(buf, parser, bufsize)
| agpl-3.0 |
SUSE/azure-sdk-for-python | azure-servicefabric/azure/servicefabric/models/resolved_service_endpoint.py | 2 | 1209 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ResolvedServiceEndpoint(Model):
"""Endpoint of a resolved service partition.
:param kind: Possible values include: 'Invalid', 'Stateless',
'StatefulPrimary', 'StatefulSecondary'
:type kind: str
:param address: The address of the endpoint. If the endpoint has multiple
listeners the address is a JSON object with one property per listener
with the value as the address of that listener.
:type address: str
"""
_attribute_map = {
'kind': {'key': 'Kind', 'type': 'str'},
'address': {'key': 'Address', 'type': 'str'},
}
def __init__(self, kind=None, address=None):
self.kind = kind
self.address = address
| mit |
gennad/Django-nonrel-stub-for-Google-App-Engine | django/contrib/gis/gdal/srs.py | 291 | 11717 | """
The Spatial Reference class, represensents OGR Spatial Reference objects.
Example:
>>> from django.contrib.gis.gdal import SpatialReference
>>> srs = SpatialReference('WGS84')
>>> print srs
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
>>> print srs.proj
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> print srs.ellipsoid
(6378137.0, 6356752.3142451793, 298.25722356300003)
>>> print srs.projected, srs.geographic
False True
>>> srs.import_epsg(32140)
>>> print srs.name
NAD83 / Texas South Central
"""
import re
from ctypes import byref, c_char_p, c_int, c_void_p
# Getting the error checking routine and exceptions
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import OGRException, SRSException
from django.contrib.gis.gdal.prototypes import srs as capi
#### Spatial Reference class. ####
class SpatialReference(GDALBase):
"""
A wrapper for the OGRSpatialReference object. According to the GDAL Web site,
the SpatialReference object "provide[s] services to represent coordinate
systems (projections and datums) and to transform between them."
"""
#### Python 'magic' routines ####
def __init__(self, srs_input=''):
"""
Creates a GDAL OSR Spatial Reference object from the given input.
The input may be string of OGC Well Known Text (WKT), an integer
EPSG code, a PROJ.4 string, and/or a projection "well known" shorthand
string (one of 'WGS84', 'WGS72', 'NAD27', 'NAD83').
"""
buf = c_char_p('')
srs_type = 'user'
if isinstance(srs_input, basestring):
# Encoding to ASCII if unicode passed in.
if isinstance(srs_input, unicode):
srs_input = srs_input.encode('ascii')
try:
# If SRID is a string, e.g., '4326', then make acceptable
# as user input.
srid = int(srs_input)
srs_input = 'EPSG:%d' % srid
except ValueError:
pass
elif isinstance(srs_input, (int, long)):
# EPSG integer code was input.
srs_type = 'epsg'
elif isinstance(srs_input, self.ptr_type):
srs = srs_input
srs_type = 'ogr'
else:
raise TypeError('Invalid SRS type "%s"' % srs_type)
if srs_type == 'ogr':
# Input is already an SRS pointer.
srs = srs_input
else:
# Creating a new SRS pointer, using the string buffer.
srs = capi.new_srs(buf)
# If the pointer is NULL, throw an exception.
if not srs:
raise SRSException('Could not create spatial reference from: %s' % srs_input)
else:
self.ptr = srs
# Importing from either the user input string or an integer SRID.
if srs_type == 'user':
self.import_user_input(srs_input)
elif srs_type == 'epsg':
self.import_epsg(srs_input)
def __del__(self):
"Destroys this spatial reference."
if self._ptr: capi.release_srs(self._ptr)
def __getitem__(self, target):
"""
Returns the value of the given string attribute node, None if the node
doesn't exist. Can also take a tuple as a parameter, (target, child),
where child is the index of the attribute in the WKT. For example:
>>> wkt = 'GEOGCS["WGS 84", DATUM["WGS_1984, ... AUTHORITY["EPSG","4326"]]')
>>> srs = SpatialReference(wkt) # could also use 'WGS84', or 4326
>>> print srs['GEOGCS']
WGS 84
>>> print srs['DATUM']
WGS_1984
>>> print srs['AUTHORITY']
EPSG
>>> print srs['AUTHORITY', 1] # The authority value
4326
>>> print srs['TOWGS84', 4] # the fourth value in this wkt
0
>>> print srs['UNIT|AUTHORITY'] # For the units authority, have to use the pipe symbole.
EPSG
>>> print srs['UNIT|AUTHORITY', 1] # The authority value for the untis
9122
"""
if isinstance(target, tuple):
return self.attr_value(*target)
else:
return self.attr_value(target)
def __str__(self):
"The string representation uses 'pretty' WKT."
return self.pretty_wkt
#### SpatialReference Methods ####
def attr_value(self, target, index=0):
"""
The attribute value for the given target node (e.g. 'PROJCS'). The index
keyword specifies an index of the child node to return.
"""
if not isinstance(target, basestring) or not isinstance(index, int):
raise TypeError
return capi.get_attr_value(self.ptr, target, index)
def auth_name(self, target):
"Returns the authority name for the given string target node."
return capi.get_auth_name(self.ptr, target)
def auth_code(self, target):
"Returns the authority code for the given string target node."
return capi.get_auth_code(self.ptr, target)
def clone(self):
"Returns a clone of this SpatialReference object."
return SpatialReference(capi.clone_srs(self.ptr))
def from_esri(self):
"Morphs this SpatialReference from ESRI's format to EPSG."
capi.morph_from_esri(self.ptr)
def identify_epsg(self):
"""
This method inspects the WKT of this SpatialReference, and will
add EPSG authority nodes where an EPSG identifier is applicable.
"""
capi.identify_epsg(self.ptr)
def to_esri(self):
"Morphs this SpatialReference to ESRI's format."
capi.morph_to_esri(self.ptr)
def validate(self):
"Checks to see if the given spatial reference is valid."
capi.srs_validate(self.ptr)
#### Name & SRID properties ####
@property
def name(self):
"Returns the name of this Spatial Reference."
if self.projected: return self.attr_value('PROJCS')
elif self.geographic: return self.attr_value('GEOGCS')
elif self.local: return self.attr_value('LOCAL_CS')
else: return None
@property
def srid(self):
"Returns the SRID of top-level authority, or None if undefined."
try:
return int(self.attr_value('AUTHORITY', 1))
except (TypeError, ValueError):
return None
#### Unit Properties ####
@property
def linear_name(self):
"Returns the name of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return name
@property
def linear_units(self):
"Returns the value of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return units
@property
def angular_name(self):
"Returns the name of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return name
@property
def angular_units(self):
"Returns the value of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return units
@property
def units(self):
"""
Returns a 2-tuple of the units value and the units name,
and will automatically determines whether to return the linear
or angular units.
"""
if self.projected or self.local:
return capi.linear_units(self.ptr, byref(c_char_p()))
elif self.geographic:
return capi.angular_units(self.ptr, byref(c_char_p()))
else:
return (None, None)
#### Spheroid/Ellipsoid Properties ####
@property
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening)
"""
return (self.semi_major, self.semi_minor, self.inverse_flattening)
@property
def semi_major(self):
"Returns the Semi Major Axis for this Spatial Reference."
return capi.semi_major(self.ptr, byref(c_int()))
@property
def semi_minor(self):
"Returns the Semi Minor Axis for this Spatial Reference."
return capi.semi_minor(self.ptr, byref(c_int()))
@property
def inverse_flattening(self):
"Returns the Inverse Flattening for this Spatial Reference."
return capi.invflattening(self.ptr, byref(c_int()))
#### Boolean Properties ####
@property
def geographic(self):
"""
Returns True if this SpatialReference is geographic
(root node is GEOGCS).
"""
return bool(capi.isgeographic(self.ptr))
@property
def local(self):
"Returns True if this SpatialReference is local (root node is LOCAL_CS)."
return bool(capi.islocal(self.ptr))
@property
def projected(self):
"""
Returns True if this SpatialReference is a projected coordinate system
(root node is PROJCS).
"""
return bool(capi.isprojected(self.ptr))
#### Import Routines #####
def import_epsg(self, epsg):
"Imports the Spatial Reference from the EPSG code (an integer)."
capi.from_epsg(self.ptr, epsg)
def import_proj(self, proj):
"Imports the Spatial Reference from a PROJ.4 string."
capi.from_proj(self.ptr, proj)
def import_user_input(self, user_input):
"Imports the Spatial Reference from the given user input string."
capi.from_user_input(self.ptr, user_input)
def import_wkt(self, wkt):
"Imports the Spatial Reference from OGC WKT (string)"
capi.from_wkt(self.ptr, byref(c_char_p(wkt)))
def import_xml(self, xml):
"Imports the Spatial Reference from an XML string."
capi.from_xml(self.ptr, xml)
#### Export Properties ####
@property
def wkt(self):
"Returns the WKT representation of this Spatial Reference."
return capi.to_wkt(self.ptr, byref(c_char_p()))
@property
def pretty_wkt(self, simplify=0):
"Returns the 'pretty' representation of the WKT."
return capi.to_pretty_wkt(self.ptr, byref(c_char_p()), simplify)
@property
def proj(self):
"Returns the PROJ.4 representation for this Spatial Reference."
return capi.to_proj(self.ptr, byref(c_char_p()))
@property
def proj4(self):
"Alias for proj()."
return self.proj
@property
def xml(self, dialect=''):
"Returns the XML representation of this Spatial Reference."
return capi.to_xml(self.ptr, byref(c_char_p()), dialect)
class CoordTransform(GDALBase):
"The coordinate system transformation object."
def __init__(self, source, target):
"Initializes on a source and target SpatialReference objects."
if not isinstance(source, SpatialReference) or not isinstance(target, SpatialReference):
raise TypeError('source and target must be of type SpatialReference')
self.ptr = capi.new_ct(source._ptr, target._ptr)
self._srs1_name = source.name
self._srs2_name = target.name
def __del__(self):
"Deletes this Coordinate Transformation object."
if self._ptr: capi.destroy_ct(self._ptr)
def __str__(self):
return 'Transform from "%s" to "%s"' % (self._srs1_name, self._srs2_name)
| bsd-3-clause |
wrouesnel/ansible-modules-extras | cloud/cloudstack/cs_project.py | 17 | 8497 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: cs_project
short_description: Manages projects on Apache CloudStack based clouds.
description:
- Create, update, suspend, activate and remove projects.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the project.
required: true
display_text:
description:
- Display text of the project.
- If not specified, C(name) will be used as C(display_text).
required: false
default: null
state:
description:
- State of the project.
required: false
default: 'present'
choices: [ 'present', 'absent', 'active', 'suspended' ]
domain:
description:
- Domain the project is related to.
required: false
default: null
account:
description:
- Account the project is related to.
required: false
default: null
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Create a project
- local_action:
module: cs_project
name: web
# Rename a project
- local_action:
module: cs_project
name: web
display_text: my web project
# Suspend an existing project
- local_action:
module: cs_project
name: web
state: suspended
# Activate an existing project
- local_action:
module: cs_project
name: web
state: active
# Remove a project
- local_action:
module: cs_project
name: web
state: absent
'''
RETURN = '''
---
id:
description: UUID of the project.
returned: success
type: string
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
name:
description: Name of the project.
returned: success
type: string
sample: web project
display_text:
description: Display text of the project.
returned: success
type: string
sample: web project
state:
description: State of the project.
returned: success
type: string
sample: Active
domain:
description: Domain the project is related to.
returned: success
type: string
sample: example domain
account:
description: Account the project is related to.
returned: success
type: string
sample: example account
tags:
description: List of resource tags associated with the project.
returned: success
type: dict
sample: '[ { "key": "foo", "value": "bar" } ]'
'''
try:
from cs import CloudStack, CloudStackException, read_config
has_lib_cs = True
except ImportError:
has_lib_cs = False
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackProject(AnsibleCloudStack):
def get_project(self):
if not self.project:
project = self.module.params.get('name')
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
projects = self.cs.listProjects(**args)
if projects:
for p in projects['project']:
if project.lower() in [ p['name'].lower(), p['id']]:
self.project = p
break
return self.project
def present_project(self):
project = self.get_project()
if not project:
project = self.create_project(project)
else:
project = self.update_project(project)
return project
def update_project(self, project):
args = {}
args['id'] = project['id']
args['displaytext'] = self.get_or_fallback('display_text', 'name')
if self._has_changed(args, project):
self.result['changed'] = True
if not self.module.check_mode:
project = self.cs.updateProject(**args)
if 'errortext' in project:
self.module.fail_json(msg="Failed: '%s'" % project['errortext'])
poll_async = self.module.params.get('poll_async')
if project and poll_async:
project = self._poll_job(project, 'project')
return project
def create_project(self, project):
self.result['changed'] = True
args = {}
args['name'] = self.module.params.get('name')
args['displaytext'] = self.get_or_fallback('display_text', 'name')
args['account'] = self.get_account('name')
args['domainid'] = self.get_domain('id')
if not self.module.check_mode:
project = self.cs.createProject(**args)
if 'errortext' in project:
self.module.fail_json(msg="Failed: '%s'" % project['errortext'])
poll_async = self.module.params.get('poll_async')
if project and poll_async:
project = self._poll_job(project, 'project')
return project
def state_project(self, state=None):
project = self.get_project()
if not project:
self.module.fail_json(msg="No project named '%s' found." % self.module.params('name'))
if project['state'].lower() != state:
self.result['changed'] = True
args = {}
args['id'] = project['id']
if not self.module.check_mode:
if state == 'suspended':
project = self.cs.suspendProject(**args)
else:
project = self.cs.activateProject(**args)
if 'errortext' in project:
self.module.fail_json(msg="Failed: '%s'" % project['errortext'])
poll_async = self.module.params.get('poll_async')
if project and poll_async:
project = self._poll_job(project, 'project')
return project
def absent_project(self):
project = self.get_project()
if project:
self.result['changed'] = True
args = {}
args['id'] = project['id']
if not self.module.check_mode:
res = self.cs.deleteProject(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if res and poll_async:
res = self._poll_job(res, 'project')
return project
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
display_text = dict(default=None),
state = dict(choices=['present', 'absent', 'active', 'suspended' ], default='present'),
domain = dict(default=None),
account = dict(default=None),
poll_async = dict(type='bool', choices=BOOLEANS, default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
if not has_lib_cs:
module.fail_json(msg="python library cs required: pip install cs")
try:
acs_project = AnsibleCloudStackProject(module)
state = module.params.get('state')
if state in ['absent']:
project = acs_project.absent_project()
elif state in ['active', 'suspended']:
project = acs_project.state_project(state=state)
else:
project = acs_project.present_project()
result = acs_project.get_result(project)
except CloudStackException, e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
LabAdvComp/dish | test/test_logging.py | 1 | 2154 | from IPython.parallel.error import CompositeError
import os
from nose.tools import assert_raises
from .utils import assert_eventually_equal
from .test_pipeline import PipelineTest
class TestLogging(PipelineTest):
def test_logging(self):
"""Test that logging information is propagated and stored
correctly.
"""
def logs_things(job, logger):
logger.info(job["description"]+"loggingtest")
self.p.map(logs_things)
# TODO abstract out this logging testing stuff
pipeline_log = open(os.path.join(self.p.logdir, "dish.log")).read()
for job in self.p.jobs:
job_log = open(os.path.join(job["workdir"],
job["description"]+".log")).read()
assert_eventually_equal(job["description"]+"loggingtest" in job_log,
True)
assert_eventually_equal(job["description"]+"loggingtest" in pipeline_log,
True)
def test_stdout_is_logged(self):
"""p.run should log stdout of the command."""
self.p.run("echo testing123")
pipeline_log = open(os.path.join(self.p.logdir, "dish.log")).read()
assert_eventually_equal("testing123" in pipeline_log, True)
for job in self.p.jobs:
job_log = open(os.path.join(job["workdir"],
job["description"]+".log")).read()
assert_eventually_equal("testing123" in job_log, True)
def test_logging_gets_traceback(self):
"""When a call fails, we should log traceback info."""
def failing(job, logger):
raise RuntimeError(job["description"]+"error")
with assert_raises(CompositeError):
self.p.map(failing)
pipeline_log = open(os.path.join(self.p.logdir, "dish.log")).read()
for job in self.p.jobs:
job_log = open(os.path.join(job["workdir"],
job["description"]+".log")).read()
assert job["description"]+"error" in job_log
assert job["description"]+"error" in pipeline_log
| mit |
surgebiswas/poker | PokerBots_2017/Johnny/wheel/util.py | 345 | 4890 | """Utility functions."""
import sys
import os
import base64
import json
import hashlib
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict
__all__ = ['urlsafe_b64encode', 'urlsafe_b64decode', 'utf8',
'to_json', 'from_json', 'matches_requirement']
def urlsafe_b64encode(data):
"""urlsafe_b64encode without padding"""
return base64.urlsafe_b64encode(data).rstrip(binary('='))
def urlsafe_b64decode(data):
"""urlsafe_b64decode without padding"""
pad = b'=' * (4 - (len(data) & 3))
return base64.urlsafe_b64decode(data + pad)
def to_json(o):
'''Convert given data to JSON.'''
return json.dumps(o, sort_keys=True)
def from_json(j):
'''Decode a JSON payload.'''
return json.loads(j)
def open_for_csv(name, mode):
if sys.version_info[0] < 3:
nl = {}
bin = 'b'
else:
nl = { 'newline': '' }
bin = ''
return open(name, mode + bin, **nl)
try:
unicode
def utf8(data):
'''Utf-8 encode data.'''
if isinstance(data, unicode):
return data.encode('utf-8')
return data
except NameError:
def utf8(data):
'''Utf-8 encode data.'''
if isinstance(data, str):
return data.encode('utf-8')
return data
try:
# For encoding ascii back and forth between bytestrings, as is repeatedly
# necessary in JSON-based crypto under Python 3
unicode
def native(s):
return s
def binary(s):
if isinstance(s, unicode):
return s.encode('ascii')
return s
except NameError:
def native(s):
if isinstance(s, bytes):
return s.decode('ascii')
return s
def binary(s):
if isinstance(s, str):
return s.encode('ascii')
class HashingFile(object):
def __init__(self, fd, hashtype='sha256'):
self.fd = fd
self.hashtype = hashtype
self.hash = hashlib.new(hashtype)
self.length = 0
def write(self, data):
self.hash.update(data)
self.length += len(data)
self.fd.write(data)
def close(self):
self.fd.close()
def digest(self):
if self.hashtype == 'md5':
return self.hash.hexdigest()
digest = self.hash.digest()
return self.hashtype + '=' + native(urlsafe_b64encode(digest))
class OrderedDefaultDict(OrderedDict):
def __init__(self, *args, **kwargs):
if not args:
self.default_factory = None
else:
if not (args[0] is None or callable(args[0])):
raise TypeError('first argument must be callable or None')
self.default_factory = args[0]
args = args[1:]
super(OrderedDefaultDict, self).__init__(*args, **kwargs)
def __missing__ (self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = default = self.default_factory()
return default
if sys.platform == 'win32':
import ctypes.wintypes
# CSIDL_APPDATA for reference - not used here for compatibility with
# dirspec, which uses LOCAL_APPDATA and COMMON_APPDATA in that order
csidl = dict(CSIDL_APPDATA=26, CSIDL_LOCAL_APPDATA=28,
CSIDL_COMMON_APPDATA=35)
def get_path(name):
SHGFP_TYPE_CURRENT = 0
buf = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH)
ctypes.windll.shell32.SHGetFolderPathW(0, csidl[name], 0, SHGFP_TYPE_CURRENT, buf)
return buf.value
def save_config_path(*resource):
appdata = get_path("CSIDL_LOCAL_APPDATA")
path = os.path.join(appdata, *resource)
if not os.path.isdir(path):
os.makedirs(path)
return path
def load_config_paths(*resource):
ids = ["CSIDL_LOCAL_APPDATA", "CSIDL_COMMON_APPDATA"]
for id in ids:
base = get_path(id)
path = os.path.join(base, *resource)
if os.path.exists(path):
yield path
else:
def save_config_path(*resource):
import xdg.BaseDirectory
return xdg.BaseDirectory.save_config_path(*resource)
def load_config_paths(*resource):
import xdg.BaseDirectory
return xdg.BaseDirectory.load_config_paths(*resource)
def matches_requirement(req, wheels):
"""List of wheels matching a requirement.
:param req: The requirement to satisfy
:param wheels: List of wheels to search.
"""
try:
from pkg_resources import Distribution, Requirement
except ImportError:
raise RuntimeError("Cannot use requirements without pkg_resources")
req = Requirement.parse(req)
selected = []
for wf in wheels:
f = wf.parsed_filename
dist = Distribution(project_name=f.group("name"), version=f.group("ver"))
if dist in req:
selected.append(wf)
return selected
| mit |
denverfoundation/storybase | apps/storybase_user/migrations/0006_auto__add_contact.py | 1 | 14929 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Contact'
db.create_table('storybase_user_contact', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('storybase.fields.ShortTextField')(blank=True)),
('info', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('storybase_user', ['Contact'])
def backwards(self, orm):
# Deleting model 'Contact'
db.delete_table('storybase_user_contact')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'storybase_asset.asset': {
'Meta': {'object_name': 'Asset'},
'asset_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'asset_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'attribution': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datasets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'assets'", 'blank': 'True', 'to': "orm['storybase_asset.DataSet']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'default': "'CC BY-NC-SA'", 'max_length': '25'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'assets'", 'null': 'True', 'to': "orm['auth.User']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'section_specific': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'storybase_asset.dataset': {
'Meta': {'object_name': 'DataSet'},
'attribution': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dataset_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'dataset_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'datasets'", 'null': 'True', 'to': "orm['auth.User']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'})
},
'storybase_story.story': {
'Meta': {'object_name': 'Story'},
'assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_asset.Asset']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'stories'", 'null': 'True', 'to': "orm['auth.User']"}),
'byline': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'featured_assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'featured_in_stories'", 'blank': 'True', 'to': "orm['storybase_asset.Asset']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'default': "'CC BY-NC-SA'", 'max_length': '25'}),
'on_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_user.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_user.Project']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'}),
'story_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'structure_type': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'storybase_user.contact': {
'Meta': {'object_name': 'Contact'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('storybase.fields.ShortTextField', [], {'blank': 'True'})
},
'storybase_user.organization': {
'Meta': {'object_name': 'Organization'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'curated_stories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'curated_in_organizations'", 'blank': 'True', 'through': "orm['storybase_user.OrganizationStory']", 'to': "orm['storybase_story.Story']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'organizations'", 'blank': 'True', 'to': "orm['auth.User']"}),
'organization_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'storybase_user.organizationstory': {
'Meta': {'object_name': 'OrganizationStory'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Organization']"}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Story']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storybase_user.organizationtranslation': {
'Meta': {'unique_together': "(('organization', 'language'),)", 'object_name': 'OrganizationTranslation'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '15'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('storybase.fields.ShortTextField', [], {}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Organization']"}),
'translation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_user.project': {
'Meta': {'object_name': 'Project'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'curated_stories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'curated_in_projects'", 'blank': 'True', 'through': "orm['storybase_user.ProjectStory']", 'to': "orm['storybase_story.Story']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'projects'", 'blank': 'True', 'to': "orm['auth.User']"}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'projects'", 'blank': 'True', 'to': "orm['storybase_user.Organization']"}),
'project_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'storybase_user.projectstory': {
'Meta': {'object_name': 'ProjectStory'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Project']"}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Story']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storybase_user.projecttranslation': {
'Meta': {'unique_together': "(('project', 'language'),)", 'object_name': 'ProjectTranslation'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '15'}),
'name': ('storybase.fields.ShortTextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Project']"}),
'translation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
}
}
complete_apps = ['storybase_user']
| mit |
marionleborgne/nupic.research | projects/l2_pooling/multi_column.py | 10 | 8328 | # Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file creates simple experiment to test a single column L4-L2 network.
"""
import random
from htmresearch.frameworks.layers.object_machine_factory import (
createObjectMachine
)
from htmresearch.frameworks.layers.l2_l4_inference import L4L2Experiment
def runLateralDisambiguation(noiseLevel=None, profile=False):
"""
Runs a simple experiment where two objects share a (location, feature) pair.
At inference, one column sees that ambiguous pair, and the other sees a
unique one. We should see the first column rapidly converge to a
unique representation.
Parameters:
----------------------------
@param noiseLevel (float)
Noise level to add to the locations and features during inference
@param profile (bool)
If True, the network will be profiled after learning and inference
"""
exp = L4L2Experiment(
"lateral_disambiguation",
numCorticalColumns=2,
)
objects = createObjectMachine(
machineType="simple",
numInputBits=20,
sensorInputSize=1024,
externalInputSize=1024,
numCorticalColumns=2,
)
objects.addObject([(1, 1), (2, 2)])
objects.addObject([(1, 1), (3, 2)])
exp.learnObjects(objects.provideObjectsToLearn())
if profile:
exp.printProfile()
inferConfig = {
"noiseLevel": noiseLevel,
"numSteps": 6,
"pairs": {
# this should activate 0 and 1
0: [(1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)],
# this should activate 1
1: [(3, 2), (3, 2), (3, 2), (3, 2), (3, 2), (3, 2)]
}
}
exp.infer(objects.provideObjectToInfer(inferConfig), objectName=1)
if profile:
exp.printProfile()
exp.plotInferenceStats(
fields=["L2 Representation",
"Overlap L2 with object",
"L4 Representation"],
onePlot=False,
)
def runDisambiguationByUnions(noiseLevel=None, profile=False):
"""
Runs a simple experiment where an object is disambiguated as each column
recognizes a union of two objects, and the real object is the only
common one.
Parameters:
----------------------------
@param noiseLevel (float)
Noise level to add to the locations and features during inference
@param profile (bool)
If True, the network will be profiled after learning and inference
"""
exp = L4L2Experiment(
"disambiguation_unions",
numCorticalColumns=2,
)
objects = createObjectMachine(
machineType="simple",
numInputBits=20,
sensorInputSize=1024,
externalInputSize=1024,
numCorticalColumns=2,
)
objects.addObject([(1, 1), (2, 2)])
objects.addObject([(2, 2), (3, 3)])
objects.addObject([(3, 3), (4, 4)])
exp.learnObjects(objects.provideObjectsToLearn())
if profile:
exp.printProfile()
inferConfig = {
"numSteps": 6,
"noiseLevel": noiseLevel,
"pairs": {
# this should activate 1 and 2
0: [(2, 2), (2, 2), (2, 2), (2, 2), (2, 2), (2, 2)],
# this should activate 2 and 3
1: [(3, 3), (3, 3), (3, 3), (3, 3), (3, 3), (3, 3)]
}
}
exp.infer(objects.provideObjectToInfer(inferConfig), objectName=1)
if profile:
exp.printProfile()
exp.plotInferenceStats(
fields=["L2 Representation",
"Overlap L2 with object",
"L4 Representation"],
onePlot=False,
)
def runStretch(noiseLevel=None, profile=False):
"""
Stretch test that learns a lot of objects.
Parameters:
----------------------------
@param noiseLevel (float)
Noise level to add to the locations and features during inference
@param profile (bool)
If True, the network will be profiled after learning and inference
"""
exp = L4L2Experiment(
"stretch_L10_F10_C2",
numCorticalColumns=2,
)
objects = createObjectMachine(
machineType="simple",
numInputBits=20,
sensorInputSize=1024,
externalInputSize=1024,
numCorticalColumns=2,
)
objects.createRandomObjects(10, 10, numLocations=10, numFeatures=10)
print "Objects are:"
for object, pairs in objects.objects.iteritems():
print str(object) + ": " + str(pairs)
exp.learnObjects(objects.provideObjectsToLearn())
if profile:
exp.printProfile(reset=True)
# For inference, we will check and plot convergence for object 0. We create a
# sequence of random sensations for each column. We will present each
# sensation for 4 time steps to let it settle and ensure it converges.
objectCopy1 = [pair for pair in objects[0]]
objectCopy2 = [pair for pair in objects[0]]
objectCopy3 = [pair for pair in objects[0]]
random.shuffle(objectCopy1)
random.shuffle(objectCopy2)
random.shuffle(objectCopy3)
# stay multiple steps on each sensation
objectSensations1 = []
for pair in objectCopy1:
for _ in xrange(4):
objectSensations1.append(pair)
# stay multiple steps on each sensation
objectSensations2 = []
for pair in objectCopy2:
for _ in xrange(4):
objectSensations2.append(pair)
# stay multiple steps on each sensation
objectSensations3 = []
for pair in objectCopy3:
for _ in xrange(4):
objectSensations3.append(pair)
inferConfig = {
"numSteps": len(objectSensations1),
"noiseLevel": noiseLevel,
"pairs": {
0: objectSensations1,
1: objectSensations2,
# 2: objectSensations3, # Uncomment for 3 columns
}
}
exp.infer(objects.provideObjectToInfer(inferConfig), objectName=0)
if profile:
exp.printProfile()
exp.plotInferenceStats(
fields=["L2 Representation",
"Overlap L2 with object",
"L4 Representation"],
onePlot=False,
)
def runAmbiguities(noiseLevel=None, profile=False):
"""
Runs an experiment where three objects are being learnt, but share many
patterns. At inference, only one object is being moved over, and we should
see quick convergence.
Parameters:
----------------------------
@param noiseLevel (float)
Noise level to add to the locations and features during inference
@param profile (bool)
If True, the network will be profiled after learning and inference
"""
exp = L4L2Experiment(
"ambiguities",
numCorticalColumns=2,
)
objects = createObjectMachine(
machineType="simple",
numInputBits=20,
sensorInputSize=1024,
externalInputSize=1024,
numCorticalColumns=2,
)
objects.addObject([(1, 1), (2, 1), (3, 3)])
objects.addObject([(2, 2), (3, 3), (2, 1)])
objects.addObject([(3, 1), (2, 1), (1, 2)])
exp.learnObjects(objects.provideObjectsToLearn())
if profile:
exp.printProfile()
inferConfig = {
"numSteps": 6,
"noiseLevel": noiseLevel,
"pairs": {
0: [(2, 1), (2, 1), (3, 3), (2, 2), (2, 2), (2, 2)],
1: [(3, 3), (3, 3), (3, 3), (2, 2), (2, 1), (2, 1)]
}
}
exp.infer(objects.provideObjectToInfer(inferConfig), objectName=1)
if profile:
exp.printProfile()
exp.plotInferenceStats(
fields=["L2 Representation",
"Overlap L2 with object",
"L4 Representation"],
onePlot=False,
)
if __name__ == "__main__":
# simple disambiguation by another cortical column
runLateralDisambiguation()
# disambiguation between two cortical columns on ambiguous patterns
runDisambiguationByUnions()
# stretch experiment with a lot of objects
runStretch()
# experiment with a lot of ambiguities between patterns and objects
runAmbiguities()
| agpl-3.0 |
omaciel/mangonel | mangonel/system.py | 1 | 2191 | from common import *
import datetime
import json
import sys
import time
try:
from katello.client.api.system import SystemAPI
except ImportError, e:
print "Please install Katello CLI package."
sys.exit(-1)
class System(SystemAPI):
def __init__(self):
super(System, self).__init__()
def create(self, org, env, name=None, ak=None, type='system',
release=None, sla=None, facts=None, view_id=None, installed_products=None):
if name is None:
name = "%s.example.com" % generate_name(8)
if facts is None:
facts = generate_facts(name)
sys1 = super(System, self).register(name, org['label'], env['id'], ak, type, release, sla, facts, view_id, installed_products)
logger.debug("Created system '%s'" % sys1['name'])
return sys1
def get_or_create_system(self, org, env, name=None, ak=None, type='system',
release=None, sla=None, facts=None, view_id=None, installed_products=None):
sys = None
query = {}
if name is not None:
query['name'] = name
if query != {}:
systems = super(System, self).systems_by_env(env['id'], query)
if systems != []:
sys = systems[0]
else:
sys = self.create(org, env, name, ak, type,
release, sla, facts, view_id, installed_products)
return sys
def delete_system(self, system):
return super(System, self).unregister(system['uuid'])
def checkin(self, system):
return super(System, self).checkin(system['uuid'])
def update_packages(self, system, packages=None):
if packages is None:
packages = packages_list()
return super(System, self).update_packages(system['uuid'], packages)
def available_pools(self, sId, match_system=False, match_installed=False, no_overlap=False):
return super(System, self).available_pools(sId, match_system, match_installed, no_overlap)['pools']
def subscribe(self, sId, pool=None, qty=1):
return super(System, self).subscribe(sId, pool, qty)
| gpl-2.0 |
romykundal/meanjs-multiuploader | node_modules/node-gyp/gyp/pylib/gyp/generator/ninja_test.py | 1843 | 1786 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the ninja.py file. """
import gyp.generator.ninja as ninja
import unittest
import StringIO
import sys
import TestCommon
class TestPrefixesAndSuffixes(unittest.TestCase):
def test_BinaryNamesWindows(self):
# These cannot run on non-Windows as they require a VS installation to
# correctly handle variable expansion.
if sys.platform.startswith('win'):
writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'build.ninja', '.',
'build.ninja', 'win')
spec = { 'target_name': 'wee' }
self.assertTrue(writer.ComputeOutputFileName(spec, 'executable').
endswith('.exe'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
endswith('.dll'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
endswith('.lib'))
def test_BinaryNamesLinux(self):
writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'build.ninja', '.',
'build.ninja', 'linux')
spec = { 'target_name': 'wee' }
self.assertTrue('.' not in writer.ComputeOutputFileName(spec,
'executable'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
startswith('lib'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
startswith('lib'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
endswith('.so'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
endswith('.a'))
if __name__ == '__main__':
unittest.main()
| mit |
SamaraCardoso27/eMakeup | backend/venv/lib/python2.7/site-packages/unidecode/x029.py | 165 | 3584 | data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'', # 0x4d
'', # 0x4e
'', # 0x4f
'', # 0x50
'', # 0x51
'', # 0x52
'', # 0x53
'', # 0x54
'', # 0x55
'', # 0x56
'', # 0x57
'', # 0x58
'', # 0x59
'', # 0x5a
'', # 0x5b
'', # 0x5c
'', # 0x5d
'', # 0x5e
'', # 0x5f
'', # 0x60
'', # 0x61
'', # 0x62
'', # 0x63
'', # 0x64
'', # 0x65
'', # 0x66
'', # 0x67
'', # 0x68
'', # 0x69
'', # 0x6a
'', # 0x6b
'', # 0x6c
'', # 0x6d
'', # 0x6e
'', # 0x6f
'', # 0x70
'', # 0x71
'', # 0x72
'', # 0x73
'', # 0x74
'', # 0x75
'', # 0x76
'', # 0x77
'', # 0x78
'', # 0x79
'', # 0x7a
'', # 0x7b
'', # 0x7c
'', # 0x7d
'', # 0x7e
'', # 0x7f
'', # 0x80
'', # 0x81
'', # 0x82
'{', # 0x83
'} ', # 0x84
'', # 0x85
'', # 0x86
'', # 0x87
'', # 0x88
'', # 0x89
'', # 0x8a
'', # 0x8b
'', # 0x8c
'', # 0x8d
'', # 0x8e
'', # 0x8f
'', # 0x90
'', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
'', # 0xa0
'', # 0xa1
'', # 0xa2
'', # 0xa3
'', # 0xa4
'', # 0xa5
'', # 0xa6
'', # 0xa7
'', # 0xa8
'', # 0xa9
'', # 0xaa
'', # 0xab
'', # 0xac
'', # 0xad
'', # 0xae
'', # 0xaf
'', # 0xb0
'', # 0xb1
'', # 0xb2
'', # 0xb3
'', # 0xb4
'', # 0xb5
'', # 0xb6
'', # 0xb7
'', # 0xb8
'', # 0xb9
'', # 0xba
'', # 0xbb
'', # 0xbc
'', # 0xbd
'', # 0xbe
'', # 0xbf
'', # 0xc0
'', # 0xc1
'', # 0xc2
'', # 0xc3
'', # 0xc4
'', # 0xc5
'', # 0xc6
'', # 0xc7
'', # 0xc8
'', # 0xc9
'', # 0xca
'', # 0xcb
'', # 0xcc
'', # 0xcd
'', # 0xce
'', # 0xcf
'', # 0xd0
'', # 0xd1
'', # 0xd2
'', # 0xd3
'', # 0xd4
'', # 0xd5
'', # 0xd6
'', # 0xd7
'', # 0xd8
'', # 0xd9
'', # 0xda
'', # 0xdb
'', # 0xdc
'', # 0xdd
'', # 0xde
'', # 0xdf
'', # 0xe0
'', # 0xe1
'', # 0xe2
'', # 0xe3
'', # 0xe4
'', # 0xe5
'', # 0xe6
'', # 0xe7
'', # 0xe8
'', # 0xe9
'', # 0xea
'', # 0xeb
'', # 0xec
'', # 0xed
'', # 0xee
'', # 0xef
'', # 0xf0
'', # 0xf1
'', # 0xf2
'', # 0xf3
'', # 0xf4
'', # 0xf5
'', # 0xf6
'', # 0xf7
'', # 0xf8
'', # 0xf9
'', # 0xfa
'', # 0xfb
'', # 0xfc
'', # 0xfd
'', # 0xfe
)
| mit |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/webob/cookies.py | 5 | 29357 | import collections
import base64
import binascii
import hashlib
import hmac
import json
from datetime import (
date,
datetime,
timedelta,
)
import re
import string
import time
import warnings
from webob.compat import (
PY3,
text_type,
bytes_,
text_,
native_,
string_types,
)
from webob.util import strings_differ
__all__ = ['Cookie', 'CookieProfile', 'SignedCookieProfile', 'SignedSerializer',
'JSONSerializer', 'Base64Serializer', 'make_cookie']
_marker = object()
class RequestCookies(collections.MutableMapping):
_cache_key = 'webob._parsed_cookies'
def __init__(self, environ):
self._environ = environ
@property
def _cache(self):
env = self._environ
header = env.get('HTTP_COOKIE', '')
cache, cache_header = env.get(self._cache_key, ({}, None))
if cache_header == header:
return cache
d = lambda b: b.decode('utf8')
cache = dict((d(k), d(v)) for k,v in parse_cookie(header))
env[self._cache_key] = (cache, header)
return cache
def _mutate_header(self, name, value):
header = self._environ.get('HTTP_COOKIE')
had_header = header is not None
header = header or ''
if PY3: # pragma: no cover
header = header.encode('latin-1')
bytes_name = bytes_(name, 'ascii')
if value is None:
replacement = None
else:
bytes_val = _value_quote(bytes_(value, 'utf-8'))
replacement = bytes_name + b'=' + bytes_val
matches = _rx_cookie.finditer(header)
found = False
for match in matches:
start, end = match.span()
match_name = match.group(1)
if match_name == bytes_name:
found = True
if replacement is None: # remove value
header = header[:start].rstrip(b' ;') + header[end:]
else: # replace value
header = header[:start] + replacement + header[end:]
break
else:
if replacement is not None:
if header:
header += b'; ' + replacement
else:
header = replacement
if header:
self._environ['HTTP_COOKIE'] = native_(header, 'latin-1')
elif had_header:
self._environ['HTTP_COOKIE'] = ''
return found
def _valid_cookie_name(self, name):
if not isinstance(name, string_types):
raise TypeError(name, 'cookie name must be a string')
if not isinstance(name, text_type):
name = text_(name, 'utf-8')
try:
bytes_cookie_name = bytes_(name, 'ascii')
except UnicodeEncodeError:
raise TypeError('cookie name must be encodable to ascii')
if not _valid_cookie_name(bytes_cookie_name):
raise TypeError('cookie name must be valid according to RFC 6265')
return name
def __setitem__(self, name, value):
name = self._valid_cookie_name(name)
if not isinstance(value, string_types):
raise ValueError(value, 'cookie value must be a string')
if not isinstance(value, text_type):
try:
value = text_(value, 'utf-8')
except UnicodeDecodeError:
raise ValueError(
value, 'cookie value must be utf-8 binary or unicode')
self._mutate_header(name, value)
def __getitem__(self, name):
return self._cache[name]
def get(self, name, default=None):
return self._cache.get(name, default)
def __delitem__(self, name):
name = self._valid_cookie_name(name)
found = self._mutate_header(name, None)
if not found:
raise KeyError(name)
def keys(self):
return self._cache.keys()
def values(self):
return self._cache.values()
def items(self):
return self._cache.items()
if not PY3:
def iterkeys(self):
return self._cache.iterkeys()
def itervalues(self):
return self._cache.itervalues()
def iteritems(self):
return self._cache.iteritems()
def __contains__(self, name):
return name in self._cache
def __iter__(self):
return self._cache.__iter__()
def __len__(self):
return len(self._cache)
def clear(self):
self._environ['HTTP_COOKIE'] = ''
def __repr__(self):
return '<RequestCookies (dict-like) with values %r>' % (self._cache,)
class Cookie(dict):
def __init__(self, input=None):
if input:
self.load(input)
def load(self, data):
morsel = {}
for key, val in _parse_cookie(data):
if key.lower() in _c_keys:
morsel[key] = val
else:
morsel = self.add(key, val)
def add(self, key, val):
if not isinstance(key, bytes):
key = key.encode('ascii', 'replace')
if not _valid_cookie_name(key):
return {}
r = Morsel(key, val)
dict.__setitem__(self, key, r)
return r
__setitem__ = add
def serialize(self, full=True):
return '; '.join(m.serialize(full) for m in self.values())
def values(self):
return [m for _, m in sorted(self.items())]
__str__ = serialize
def __repr__(self):
return '<%s: [%s]>' % (self.__class__.__name__,
', '.join(map(repr, self.values())))
def _parse_cookie(data):
if PY3: # pragma: no cover
data = data.encode('latin-1')
for key, val in _rx_cookie.findall(data):
yield key, _unquote(val)
def parse_cookie(data):
"""
Parse cookies ignoring anything except names and values
"""
return ((k,v) for k,v in _parse_cookie(data) if _valid_cookie_name(k))
def cookie_property(key, serialize=lambda v: v):
def fset(self, v):
self[key] = serialize(v)
return property(lambda self: self[key], fset)
def serialize_max_age(v):
if isinstance(v, timedelta):
v = str(v.seconds + v.days*24*60*60)
elif isinstance(v, int):
v = str(v)
return bytes_(v)
def serialize_cookie_date(v):
if v is None:
return None
elif isinstance(v, bytes):
return v
elif isinstance(v, text_type):
return v.encode('ascii')
elif isinstance(v, int):
v = timedelta(seconds=v)
if isinstance(v, timedelta):
v = datetime.utcnow() + v
if isinstance(v, (datetime, date)):
v = v.timetuple()
r = time.strftime('%%s, %d-%%s-%Y %H:%M:%S GMT', v)
return bytes_(r % (weekdays[v[6]], months[v[1]]), 'ascii')
class Morsel(dict):
__slots__ = ('name', 'value')
def __init__(self, name, value):
self.name = bytes_(name, encoding='ascii')
self.value = bytes_(value, encoding='ascii')
assert _valid_cookie_name(self.name)
self.update(dict.fromkeys(_c_keys, None))
path = cookie_property(b'path')
domain = cookie_property(b'domain')
comment = cookie_property(b'comment')
expires = cookie_property(b'expires', serialize_cookie_date)
max_age = cookie_property(b'max-age', serialize_max_age)
httponly = cookie_property(b'httponly', bool)
secure = cookie_property(b'secure', bool)
def __setitem__(self, k, v):
k = bytes_(k.lower(), 'ascii')
if k in _c_keys:
dict.__setitem__(self, k, v)
def serialize(self, full=True):
result = []
add = result.append
add(self.name + b'=' + _value_quote(self.value))
if full:
for k in _c_valkeys:
v = self[k]
if v:
info = _c_renames[k]
name = info['name']
quoter = info['quoter']
add(name + b'=' + quoter(v))
expires = self[b'expires']
if expires:
add(b'expires=' + expires)
if self.secure:
add(b'secure')
if self.httponly:
add(b'HttpOnly')
return native_(b'; '.join(result), 'ascii')
__str__ = serialize
def __repr__(self):
return '<%s: %s=%r>' % (self.__class__.__name__,
native_(self.name),
native_(self.value)
)
#
# parsing
#
_re_quoted = r'"(?:\\"|.)*?"' # any doublequoted string
_legal_special_chars = "~!@#$%^&*()_+=-`.?|:/(){}<>'"
_re_legal_char = r"[\w\d%s]" % re.escape(_legal_special_chars)
_re_expires_val = r"\w{3},\s[\w\d-]{9,11}\s[\d:]{8}\sGMT"
_re_cookie_str_key = r"(%s+?)" % _re_legal_char
_re_cookie_str_equal = r"\s*=\s*"
_re_unquoted_val = r"(?:%s|\\(?:[0-3][0-7][0-7]|.))*" % _re_legal_char
_re_cookie_str_val = r"(%s|%s|%s)" % (_re_quoted, _re_expires_val,
_re_unquoted_val)
_re_cookie_str = _re_cookie_str_key + _re_cookie_str_equal + _re_cookie_str_val
_rx_cookie = re.compile(bytes_(_re_cookie_str, 'ascii'))
_rx_unquote = re.compile(bytes_(r'\\([0-3][0-7][0-7]|.)', 'ascii'))
_bchr = (lambda i: bytes([i])) if PY3 else chr
_ch_unquote_map = dict((bytes_('%03o' % i), _bchr(i))
for i in range(256)
)
_ch_unquote_map.update((v, v) for v in list(_ch_unquote_map.values()))
_b_dollar_sign = ord('$') if PY3 else '$'
_b_quote_mark = ord('"') if PY3 else '"'
def _unquote(v):
#assert isinstance(v, bytes)
if v and v[0] == v[-1] == _b_quote_mark:
v = v[1:-1]
return _rx_unquote.sub(_ch_unquote, v)
def _ch_unquote(m):
return _ch_unquote_map[m.group(1)]
#
# serializing
#
# these chars can be in cookie value see
# http://tools.ietf.org/html/rfc6265#section-4.1.1 and
# https://github.com/Pylons/webob/pull/104#issuecomment-28044314
#
# ! (0x21), "#$%&'()*+" (0x25-0x2B), "-./0123456789:" (0x2D-0x3A),
# "<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[" (0x3C-0x5B),
# "]^_`abcdefghijklmnopqrstuvwxyz{|}~" (0x5D-0x7E)
_allowed_special_chars = "!#$%&'()*+-./:<=>?@[]^_`{|}~"
_allowed_cookie_chars = (string.ascii_letters + string.digits +
_allowed_special_chars)
_allowed_cookie_bytes = bytes_(_allowed_cookie_chars)
# these are the characters accepted in cookie *names*
# From http://tools.ietf.org/html/rfc2616#section-2.2:
# token = 1*<any CHAR except CTLs or separators>
# separators = "(" | ")" | "<" | ">" | "@"
# | "," | ";" | ":" | "\" | <">
# | "/" | "[" | "]" | "?" | "="
# | "{" | "}" | SP | HT
#
# CTL = <any US-ASCII control character
# (octets 0 - 31) and DEL (127)>
#
_valid_token_chars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~"
_valid_token_bytes = bytes_(_valid_token_chars)
# this is a map used to escape the values
_escape_noop_chars = _allowed_cookie_chars + ' '
_escape_map = dict((chr(i), '\\%03o' % i) for i in range(256))
_escape_map.update(zip(_escape_noop_chars, _escape_noop_chars))
if PY3: # pragma: no cover
# convert to {int -> bytes}
_escape_map = dict(
(ord(k), bytes_(v, 'ascii')) for k, v in _escape_map.items()
)
_escape_char = _escape_map.__getitem__
weekdays = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
months = (None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec')
# This is temporary, until we can remove this from _value_quote
_should_raise = None
def __warn_or_raise(text, warn_class, to_raise, raise_reason):
if _should_raise:
raise to_raise(raise_reason)
else:
warnings.warn(text, warn_class, stacklevel=2)
def _value_quote(v):
# This looks scary, but is simple. We remove all valid characters from the
# string, if we end up with leftovers (string is longer than 0, we have
# invalid characters in our value)
leftovers = v.translate(None, _allowed_cookie_bytes)
if leftovers:
__warn_or_raise(
"Cookie value contains invalid bytes: (%s). Future versions "
"will raise ValueError upon encountering invalid bytes." %
(leftovers,),
RuntimeWarning, ValueError, 'Invalid characters in cookie value'
)
#raise ValueError('Invalid characters in cookie value')
return b'"' + b''.join(map(_escape_char, v)) + b'"'
return v
def _valid_cookie_name(key):
return isinstance(key, bytes) and not (
key.translate(None, _valid_token_bytes)
# Not explicitly required by RFC6265, may consider removing later:
or key[0] == _b_dollar_sign
or key.lower() in _c_keys
)
def _path_quote(v):
return b''.join(map(_escape_char, v))
_domain_quote = _path_quote
_max_age_quote = _path_quote
_c_renames = {
b"path" : {'name':b"Path", 'quoter':_path_quote},
b"comment" : {'name':b"Comment", 'quoter':_value_quote},
b"domain" : {'name':b"Domain", 'quoter':_domain_quote},
b"max-age" : {'name':b"Max-Age", 'quoter':_max_age_quote},
}
_c_valkeys = sorted(_c_renames)
_c_keys = set(_c_renames)
_c_keys.update([b'expires', b'secure', b'httponly'])
def make_cookie(name, value, max_age=None, path='/', domain=None,
secure=False, httponly=False, comment=None):
""" Generate a cookie value. If ``value`` is None, generate a cookie value
with an expiration date in the past"""
# We are deleting the cookie, override max_age and expires
if value is None:
value = b''
# Note that the max-age value of zero is technically contraspec;
# RFC6265 says that max-age cannot be zero. However, all browsers
# appear to support this to mean "delete immediately".
# http://www.timwilson.id.au/news-three-critical-problems-with-rfc6265.html
max_age = 0
expires = 'Wed, 31-Dec-97 23:59:59 GMT'
# Convert max_age to seconds
elif isinstance(max_age, timedelta):
max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds
expires = max_age
elif max_age is not None:
try:
max_age = int(max_age)
except ValueError:
raise ValueError('max_age should be an integer. Amount of seconds until expiration.')
expires = max_age
else:
expires = None
morsel = Morsel(name, value)
if domain is not None:
morsel.domain = bytes_(domain)
if path is not None:
morsel.path = bytes_(path)
if httponly:
morsel.httponly = True
if secure:
morsel.secure = True
if max_age is not None:
morsel.max_age = max_age
if expires is not None:
morsel.expires = expires
if comment is not None:
morsel.comment = bytes_(comment)
return morsel.serialize()
class JSONSerializer(object):
""" A serializer which uses `json.dumps`` and ``json.loads``"""
def dumps(self, appstruct):
return bytes_(json.dumps(appstruct), encoding='utf-8')
def loads(self, bstruct):
# NB: json.loads raises ValueError if no json object can be decoded
# so we don't have to do it explicitly here.
return json.loads(text_(bstruct, encoding='utf-8'))
class Base64Serializer(object):
""" A serializer which uses base64 to encode/decode data"""
def __init__(self, serializer=None):
if serializer is None:
serializer = JSONSerializer()
self.serializer = serializer
def dumps(self, appstruct):
"""
Given an ``appstruct``, serialize and sign the data.
Returns a bytestring.
"""
cstruct = self.serializer.dumps(appstruct) # will be bytes
return base64.urlsafe_b64encode(cstruct)
def loads(self, bstruct):
"""
Given a ``bstruct`` (a bytestring), verify the signature and then
deserialize and return the deserialized value.
A ``ValueError`` will be raised if the signature fails to validate.
"""
try:
cstruct = base64.urlsafe_b64decode(bytes_(bstruct))
except (binascii.Error, TypeError) as e:
raise ValueError('Badly formed base64 data: %s' % e)
return self.serializer.loads(cstruct)
class SignedSerializer(object):
"""
A helper to cryptographically sign arbitrary content using HMAC.
The serializer accepts arbitrary functions for performing the actual
serialization and deserialization.
``secret``
A string which is used to sign the cookie. The secret should be at
least as long as the block size of the selected hash algorithm. For
``sha512`` this would mean a 128 bit (64 character) secret.
``salt``
A namespace to avoid collisions between different uses of a shared
secret.
``hashalg``
The HMAC digest algorithm to use for signing. The algorithm must be
supported by the :mod:`hashlib` library. Default: ``'sha512'``.
``serializer``
An object with two methods: `loads`` and ``dumps``. The ``loads`` method
should accept bytes and return a Python object. The ``dumps`` method
should accept a Python object and return bytes. A ``ValueError`` should
be raised for malformed inputs. Default: ``None`, which will use a
derivation of :func:`json.dumps` and ``json.loads``.
"""
def __init__(self,
secret,
salt,
hashalg='sha512',
serializer=None,
):
self.salt = salt
self.secret = secret
self.hashalg = hashalg
try:
# bwcompat with webob <= 1.3.1, leave latin-1 as the default
self.salted_secret = bytes_(salt or '') + bytes_(secret)
except UnicodeEncodeError:
self.salted_secret = (
bytes_(salt or '', 'utf-8') + bytes_(secret, 'utf-8'))
self.digestmod = lambda string=b'': hashlib.new(self.hashalg, string)
self.digest_size = self.digestmod().digest_size
if serializer is None:
serializer = JSONSerializer()
self.serializer = serializer
def dumps(self, appstruct):
"""
Given an ``appstruct``, serialize and sign the data.
Returns a bytestring.
"""
cstruct = self.serializer.dumps(appstruct) # will be bytes
sig = hmac.new(self.salted_secret, cstruct, self.digestmod).digest()
return base64.urlsafe_b64encode(sig + cstruct).rstrip(b'=')
def loads(self, bstruct):
"""
Given a ``bstruct`` (a bytestring), verify the signature and then
deserialize and return the deserialized value.
A ``ValueError`` will be raised if the signature fails to validate.
"""
try:
b64padding = b'=' * (-len(bstruct) % 4)
fstruct = base64.urlsafe_b64decode(bytes_(bstruct) + b64padding)
except (binascii.Error, TypeError) as e:
raise ValueError('Badly formed base64 data: %s' % e)
cstruct = fstruct[self.digest_size:]
expected_sig = fstruct[:self.digest_size]
sig = hmac.new(
self.salted_secret, bytes_(cstruct), self.digestmod).digest()
if strings_differ(sig, expected_sig):
raise ValueError('Invalid signature')
return self.serializer.loads(cstruct)
_default = object()
class CookieProfile(object):
"""
A helper class that helps bring some sanity to the insanity that is cookie
handling.
The helper is capable of generating multiple cookies if necessary to
support subdomains and parent domains.
``cookie_name``
The name of the cookie used for sessioning. Default: ``'session'``.
``max_age``
The maximum age of the cookie used for sessioning (in seconds).
Default: ``None`` (browser scope).
``secure``
The 'secure' flag of the session cookie. Default: ``False``.
``httponly``
Hide the cookie from Javascript by setting the 'HttpOnly' flag of the
session cookie. Default: ``False``.
``path``
The path used for the session cookie. Default: ``'/'``.
``domains``
The domain(s) used for the session cookie. Default: ``None`` (no domain).
Can be passed an iterable containing multiple domains, this will set
multiple cookies one for each domain.
``serializer``
An object with two methods: ``loads`` and ``dumps``. The ``loads`` method
should accept a bytestring and return a Python object. The ``dumps``
method should accept a Python object and return bytes. A ``ValueError``
should be raised for malformed inputs. Default: ``None``, which will use
a derivation of :func:`json.dumps` and :func:`json.loads`.
"""
def __init__(self,
cookie_name,
secure=False,
max_age=None,
httponly=None,
path='/',
domains=None,
serializer=None
):
self.cookie_name = cookie_name
self.secure = secure
self.max_age = max_age
self.httponly = httponly
self.path = path
self.domains = domains
if serializer is None:
serializer = Base64Serializer()
self.serializer = serializer
self.request = None
def __call__(self, request):
""" Bind a request to a copy of this instance and return it"""
return self.bind(request)
def bind(self, request):
""" Bind a request to a copy of this instance and return it"""
selfish = CookieProfile(
self.cookie_name,
self.secure,
self.max_age,
self.httponly,
self.path,
self.domains,
self.serializer,
)
selfish.request = request
return selfish
def get_value(self):
""" Looks for a cookie by name in the currently bound request, and
returns its value. If the cookie profile is not bound to a request,
this method will raise a :exc:`ValueError`.
Looks for the cookie in the cookies jar, and if it can find it it will
attempt to deserialize it. Returns ``None`` if there is no cookie or
if the value in the cookie cannot be successfully deserialized.
"""
if not self.request:
raise ValueError('No request bound to cookie profile')
cookie = self.request.cookies.get(self.cookie_name)
if cookie is not None:
try:
return self.serializer.loads(bytes_(cookie))
except ValueError:
return None
def set_cookies(self, response, value, domains=_default, max_age=_default,
path=_default, secure=_default, httponly=_default):
""" Set the cookies on a response."""
cookies = self.get_headers(
value,
domains=domains,
max_age=max_age,
path=path,
secure=secure,
httponly=httponly
)
response.headerlist.extend(cookies)
return response
def get_headers(self, value, domains=_default, max_age=_default,
path=_default, secure=_default, httponly=_default):
""" Retrieve raw headers for setting cookies.
Returns a list of headers that should be set for the cookies to
be correctly tracked.
"""
if value is None:
max_age = 0
bstruct = None
else:
bstruct = self.serializer.dumps(value)
return self._get_cookies(
bstruct,
domains=domains,
max_age=max_age,
path=path,
secure=secure,
httponly=httponly
)
def _get_cookies(self, value, domains, max_age, path, secure, httponly):
"""Internal function
This returns a list of cookies that are valid HTTP Headers.
:environ: The request environment
:value: The value to store in the cookie
:domains: The domains, overrides any set in the CookieProfile
:max_age: The max_age, overrides any set in the CookieProfile
:path: The path, overrides any set in the CookieProfile
:secure: Set this cookie to secure, overrides any set in CookieProfile
:httponly: Set this cookie to HttpOnly, overrides any set in CookieProfile
"""
# If the user doesn't provide values, grab the defaults
if domains is _default:
domains = self.domains
if max_age is _default:
max_age = self.max_age
if path is _default:
path = self.path
if secure is _default:
secure = self.secure
if httponly is _default:
httponly = self.httponly
# Length selected based upon http://browsercookielimits.x64.me
if value is not None and len(value) > 4093:
raise ValueError(
'Cookie value is too long to store (%s bytes)' %
len(value)
)
cookies = []
if not domains:
cookievalue = make_cookie(
self.cookie_name,
value,
path=path,
max_age=max_age,
httponly=httponly,
secure=secure
)
cookies.append(('Set-Cookie', cookievalue))
else:
for domain in domains:
cookievalue = make_cookie(
self.cookie_name,
value,
path=path,
domain=domain,
max_age=max_age,
httponly=httponly,
secure=secure,
)
cookies.append(('Set-Cookie', cookievalue))
return cookies
class SignedCookieProfile(CookieProfile):
"""
A helper for generating cookies that are signed to prevent tampering.
By default this will create a single cookie, given a value it will
serialize it, then use HMAC to cryptographically sign the data. Finally
the result is base64-encoded for transport. This way a remote user can
not tamper with the value without uncovering the secret/salt used.
``secret``
A string which is used to sign the cookie. The secret should be at
least as long as the block size of the selected hash algorithm. For
``sha512`` this would mean a 128 bit (64 character) secret.
``salt``
A namespace to avoid collisions between different uses of a shared
secret.
``hashalg``
The HMAC digest algorithm to use for signing. The algorithm must be
supported by the :mod:`hashlib` library. Default: ``'sha512'``.
``cookie_name``
The name of the cookie used for sessioning. Default: ``'session'``.
``max_age``
The maximum age of the cookie used for sessioning (in seconds).
Default: ``None`` (browser scope).
``secure``
The 'secure' flag of the session cookie. Default: ``False``.
``httponly``
Hide the cookie from Javascript by setting the 'HttpOnly' flag of the
session cookie. Default: ``False``.
``path``
The path used for the session cookie. Default: ``'/'``.
``domains``
The domain(s) used for the session cookie. Default: ``None`` (no domain).
Can be passed an iterable containing multiple domains, this will set
multiple cookies one for each domain.
``serializer``
An object with two methods: `loads`` and ``dumps``. The ``loads`` method
should accept bytes and return a Python object. The ``dumps`` method
should accept a Python object and return bytes. A ``ValueError`` should
be raised for malformed inputs. Default: ``None`, which will use a
derivation of :func:`json.dumps` and ``json.loads``.
"""
def __init__(self,
secret,
salt,
cookie_name,
secure=False,
max_age=None,
httponly=False,
path="/",
domains=None,
hashalg='sha512',
serializer=None,
):
self.secret = secret
self.salt = salt
self.hashalg = hashalg
self.original_serializer = serializer
signed_serializer = SignedSerializer(
secret,
salt,
hashalg,
serializer=self.original_serializer,
)
CookieProfile.__init__(
self,
cookie_name,
secure=secure,
max_age=max_age,
httponly=httponly,
path=path,
domains=domains,
serializer=signed_serializer,
)
def bind(self, request):
""" Bind a request to a copy of this instance and return it"""
selfish = SignedCookieProfile(
self.secret,
self.salt,
self.cookie_name,
self.secure,
self.max_age,
self.httponly,
self.path,
self.domains,
self.hashalg,
self.original_serializer,
)
selfish.request = request
return selfish
| agpl-3.0 |
willzhang05/cslbot | cslbot/commands/guarded.py | 2 | 1149 | # Copyright (C) 2013-2015 Samuel Damashek, Peter Foley, James Forcier, Srijay Kasturi, Reed Koser, Christopher Reffett, and Fox Wilson
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from ..helpers.command import Command
@Command('guarded', ['handler'])
def cmd(send, _, args):
"""Shows the currently guarded nicks.
Syntax: {command}
"""
guarded = args['handler'].guarded
if not guarded:
send("Nobody is guarded.")
else:
send(", ".join(guarded))
| gpl-2.0 |
jmargutt/AliPhysics | PWGMM/MC/aligenqa/aligenqa/plotting.py | 41 | 45785 | from pprint import pprint
from rootpy import asrootpy, log, collection
from rootpy.plotting import Hist2D
from rootpy.io import root_open
from data_extractors import \
get_dNdeta_in_classifier_bin_interval,\
get_identified_vs_mult,\
get_correlation_histogram,\
get_PNch_vs_estmult,\
get_meanpt_vs_estmult,\
get_pT_distribution,\
get_mean_nMPI,\
get_graphs_particle_ratios_vs_refmult
from utils import \
gen_random_name,\
get_est_dirs,\
make_estimator_title,\
remap_x_values,\
remove_zero_value_points,\
remove_points_with_equal_x,\
remove_points_with_x_err_gt_1NchRef,\
percentile_bin_to_binidx_bin
from .roofie import Figure, Styles
kPROTON = str(2212)
kANTIPROTON = str(-2212)
kLAMBDA = str(3122)
kANTILAMBDA = str(-3122)
kK0S = str(310)
kKPLUS = str(321)
kKMINUS = str(-321)
kPIPLUS = str(211)
kPIMINUS = str(-211)
kPI0 = str(111)
kXI = str(3312)
kANTIXI = str(-3312)
kOMEGAMINUS = str(3334)
kOMEGAPLUS = str(-3334)
class Plotting(object):
def __init__(self, f_name, sums_dir_name, results_dir_name, percentile_bins, considered_ests):
self.f_name = f_name
self.sums_dir_name = sums_dir_name
self.results_dir_name = results_dir_name
# use the last mult bin starts at a multiplicity x times larger than the mean in this estimator
# self.mean_mult_cutoff_factor = 4
self.ref_ests = ['EtaLt05', ]
self.considered_ests = considered_ests
self.perc_bins = percentile_bins
# figure out the nch edges corresponding to the percentile edges, depends on P(Nch)
self.delete_results_dir()
self.make_results_dir()
self.plot_event_counters() # needed for calculations of the edges
self.nch_edges = self._find_nch_edges_from_percentile_edges()
pprint(self.nch_edges)
# set the default style for all figures created from her on forward:
Figure.style = Styles.Presentation_half
def _io_decorator(func):
"""
Open and close the file befor and after the execution of the decorated function.
The purpose ist to clean up memory in this way and to force an update of the file
before the next function calls. The wrapper adds the file, sums and results_post to `self`.
"""
def wrapper(self, **kwargs):
with root_open(self.f_name, 'update') as self.f:
self.sums = self.f.MultEstimators.__getattr__(self.sums_dir_name)
try:
self.results_post = self.f.MultEstimators.__getattr__(self.results_dir_name)
except AttributeError:
# results dir does not exists (yet)
pass
return_value = func(self, **kwargs)
# Delete all TLists in sums since we own them and they would be left in memory otherwise
for obj in self.sums:
if isinstance(obj, collection.List):
obj.Delete()
self.sums.Delete()
return return_value
return wrapper
@_io_decorator
def _find_nch_edges_from_percentile_edges(self):
nch_edges = {}
estimators_to_be_removed = []
for est_dir in get_est_dirs(self.results_post, self.considered_ests):
event_counter = est_dir.event_counter
try:
nch_edges[est_dir.GetName()] = [percentile_bin_to_binidx_bin(perc_bin, event_counter)
for perc_bin in self.perc_bins[est_dir.GetName()]]
except ValueError, e:
print "Error occured for classifier " + est_dir.GetName()
print e
print self.perc_bins[est_dir.GetName()]
print "You can change the percentile bins in the beginning of this script"
print "For the following, this estimator is removed"
estimators_to_be_removed.append(est_dir.GetName())
print "Bin edges for given percentile bins"
print nch_edges
for est in estimators_to_be_removed:
del self.perc_bins[est]
del self.considered_ests[self.considered_ests.index(est)]
return nch_edges
@_io_decorator
def delete_results_dir(self):
# delete old result directory
self.f.rm('MultEstimators/' + self.results_dir_name)
self.f.Write()
@_io_decorator
def make_results_dir(self):
self.f.mkdir('MultEstimators/' + self.results_dir_name, recurse=True)
for est_dir in get_est_dirs(self.sums, self.considered_ests):
try:
resdir = self.f.MultEstimators.__getattr__(self.results_dir_name).mkdir(est_dir.GetName())
resdir.Write()
except:
pass
@_io_decorator
def plot_particle_ratios_vs_estmult(self, pids1, pids2, scale=None, ytitle=''):
ratio_vs_estmult_dir = (self.results_post.GetPath().split(":")[1] # file.root:/internal/root/path
+ '/pid_ratios_vs_estmult')
fig = Figure()
if not ytitle:
fig.ytitle = ", ".join(pids1) + " / " + ", ".join(pids2)
else:
fig.ytitle = ytitle
for est_dir in get_est_dirs(self.sums, self.considered_ests):
h3d = asrootpy(est_dir.FindObject("fNch_pT_pid"))
pids1hists = [get_identified_vs_mult(h3d, pdg) for pdg in pids1]
pids2hists = [get_identified_vs_mult(h3d, pdg) for pdg in pids2]
pids1_px = sum(pids1hists)
pids2_px = sum(pids2hists)
ratio1d = pids1_px / pids2_px
fig.xtitle = "N_{ch}|_{" + make_estimator_title(est_dir.GetName()) + "}"
if scale:
ratio1d.Scale(scale)
fig.add_plottable(ratio1d, legend_title=make_estimator_title(est_dir.GetName()))
name = "_".join(pids1) + "_div_" + "_".join(pids2)
fig.save_to_root_file(self.f, name, ratio_vs_estmult_dir)
@_io_decorator
def plot_event_counters(self):
log.info("Creating event counters")
for est_dir in get_est_dirs(self.sums, self.considered_ests):
results_est_dir = self.results_post.__getattr__(est_dir.GetName())
# Nasty, but just use a reference estimator here...
corr = get_correlation_histogram(self.sums, est_dir.GetName(), "EtaLt05")
counter = asrootpy(corr.ProjectionX())
counter.name = "event_counter"
path = results_est_dir.GetPath().split(":")[1] # file.root:/internal/root/path
self.f.cd(path)
results_est_dir.WriteTObject(counter)
@_io_decorator
def plot_dNdetas(self, ratio_to_mb):
# Loop over all estimators in the Sums list:
log.info("Creating dN/deta bin in multiplicity")
figs = []
for est_dir in get_est_dirs(self.sums, self.considered_ests):
# does this estimator have several multiplicity bins?
# Q2, for example only works with pythia and makes no sense to plot
# on Dipsy as it would only be the MB line
if len(self.nch_edges[est_dir.GetName()]) == 1:
continue
results_est_dir = self.results_post.Get(est_dir.GetName())
event_counter = asrootpy(results_est_dir.Get("event_counter"))
fig = Figure()
fig.plot.palette = 'colorblind'
fig.xtitle = '#eta'
fig.ytitle = 'Ratio of dN_{ch}/d#eta over MB result' if ratio_to_mb else '1/N #times dN_{ch}/d#eta'
fig.legend.title = make_estimator_title(est_dir.GetName())
fig.plot.ymin = 0
dNdeta_mb = get_dNdeta_in_classifier_bin_interval(est_dir, event_counter,
[1, event_counter.GetXaxis().GetNbins()])
for cls_bin, perc_bin in zip(self.nch_edges[est_dir.GetName()], self.perc_bins[est_dir.GetName()]):
title = "{0}%-{1}%".format(perc_bin[1] * 100, perc_bin[0] * 100)
dNdeta_in_interval = get_dNdeta_in_classifier_bin_interval(est_dir, event_counter, cls_bin)
if ratio_to_mb:
fig.add_plottable(dNdeta_in_interval / dNdeta_mb, legend_title=title)
else:
fig.add_plottable(dNdeta_in_interval, legend_title=title)
# add MB as well, if it is not the ratio plots we are making
if not ratio_to_mb:
title = "MB"
fig.add_plottable(dNdeta_mb, legend_title=title)
path = results_est_dir.GetPath().split(":")[1] # file.root:/internal/root/path
if ratio_to_mb:
fig.save_to_root_file(self.f, "dNdeta_MB_ratio_summary", path=path)
else:
fig.save_to_root_file(self.f, "dNdeta_summary", path=path)
figs.append(fig)
return figs
@_io_decorator
def plot_pt_distribution_ratios(self):
# create particle ratio vs pT plots
log.info("Computing histograms vs pt")
results_path = self.results_post.GetPath().split(":")[1] # file.root:/internal/root/path
# Loop over all estimators in the Sums list:
figs = []
def get_new_figure():
fig = Figure()
fig.xtitle = 'p_{T} (GeV)'
fig.plot.ymin = 0
fig.plot.xmax = 10
fig.plot.palette = 'colorblind'
# fig.plot.palette_ncolors = len(nch_edges) - 1
fig.legend.position = 'br'
return fig
for est_dir in get_est_dirs(self.results_post, self.considered_ests):
dirname = '{0}/{1}/pid_ratios/'.format(results_path, est_dir.GetName())
mult_binned_pt_dists = {}
mult_binned_pt_dists['proton'] = [
get_pT_distribution(est_dir, [kANTIPROTON, kPROTON], classifier_bin_interval)
for classifier_bin_interval in self.nch_edges[est_dir.GetName()]
]
mult_binned_pt_dists['pi_ch'] = [
get_pT_distribution(est_dir, [kPIMINUS, kPIPLUS], classifier_bin_interval)
for classifier_bin_interval in self.nch_edges[est_dir.GetName()]
]
mult_binned_pt_dists['xi'] = [
get_pT_distribution(est_dir, [kANTIXI, kXI], classifier_bin_interval)
for classifier_bin_interval in self.nch_edges[est_dir.GetName()]
]
mult_binned_pt_dists['omega'] = [
get_pT_distribution(est_dir, [kOMEGAMINUS, kOMEGAPLUS], classifier_bin_interval)
for classifier_bin_interval in self.nch_edges[est_dir.GetName()]
]
mult_binned_pt_dists['lambda'] = [
get_pT_distribution(est_dir, [kANTILAMBDA, kLAMBDA], classifier_bin_interval)
for classifier_bin_interval in self.nch_edges[est_dir.GetName()]
]
mult_binned_pt_dists['k0s'] = [
get_pT_distribution(est_dir, [kK0S], classifier_bin_interval)
for classifier_bin_interval in self.nch_edges[est_dir.GetName()]
]
mult_binned_pt_dists['k_ch'] = [
get_pT_distribution(est_dir, [kKPLUS, kKMINUS], classifier_bin_interval)
for classifier_bin_interval in self.nch_edges[est_dir.GetName()]
]
mult_binned_pt_dists['pi0'] = [
get_pT_distribution(est_dir, [kPI0], classifier_bin_interval)
for classifier_bin_interval in self.nch_edges[est_dir.GetName()]
]
perc_titles = ["{0}%-{1}%".format(perc_bin[1] * 100, perc_bin[0] * 100)
for perc_bin in self.perc_bins[est_dir.GetName()]]
fig = get_new_figure()
name = "proton_over_pich__vs__pt"
fig.ytitle = "(p+#bar{p})/#pi^{+-}"
fig.plot.ymax = .3
fig.legend.title = make_estimator_title(est_dir.GetName())
[
fig.add_plottable(h1 / h2, legend_title=title)
for h1, h2, title in zip(mult_binned_pt_dists['proton'], mult_binned_pt_dists['pi_ch'], perc_titles)
]
fig.save_to_root_file(self.f, name, dirname)
figs.append(fig)
fig = get_new_figure()
name = "Xi_over_pich__vs__pt"
fig.plot.ymax = .06
fig.legend.position = 'tl'
fig.ytitle = "#Xi/#pi^{+-}"
fig.legend.title = make_estimator_title(est_dir.GetName())
[
fig.add_plottable(h1 / h2, legend_title=title)
for h1, h2, title in zip(mult_binned_pt_dists['xi'], mult_binned_pt_dists['pi_ch'], perc_titles)
]
fig.save_to_root_file(self.f, name, dirname)
figs.append(fig)
fig = get_new_figure()
name = "OmegaCh_over_pich__vs__pt"
fig.plot.ymax = .005
fig.legend.position = 'tl'
fig.ytitle = "#Omega_{ch}/#pi^{+-} "
fig.legend.title = make_estimator_title(est_dir.GetName())
[
fig.add_plottable(h1 / h2, legend_title=title)
for h1, h2, title in zip(mult_binned_pt_dists['omega'], mult_binned_pt_dists['pi_ch'], perc_titles)
]
fig.save_to_root_file(self.f, name, dirname)
figs.append(fig)
# Ratios to pi0
fig = get_new_figure()
name = "pich_over_pi0__vs__pt"
fig.plot.ymax = 2.5
fig.legend.position = 'bl'
fig.ytitle = "#pi^{+-}/#pi^{0}"
fig.legend.title = make_estimator_title(est_dir.GetName())
[
fig.add_plottable(h1 / h2, legend_title=title)
for h1, h2, title in zip(mult_binned_pt_dists['pi_ch'], mult_binned_pt_dists['pi0'], perc_titles)
]
fig.save_to_root_file(self.f, name, dirname)
figs.append(fig)
fig = get_new_figure()
name = "proton_over_pi0__vs__pt"
fig.plot.ymax = 1
fig.legend.position = 'tr'
fig.ytitle = "p/#pi^{0}"
fig.legend.title = make_estimator_title(est_dir.GetName())
[
fig.add_plottable(h1 / h2, legend_title=title)
for h1, h2, title in zip(mult_binned_pt_dists['proton'], mult_binned_pt_dists['pi0'], perc_titles)
]
fig.save_to_root_file(self.f, name, dirname)
figs.append(fig)
fig = get_new_figure()
name = "K0S_over_pi0__vs__pt"
fig.plot.ymax = 1.4
fig.legend.position = 'tl'
fig.ytitle = "K^{0}_{S}/#pi^{0}"
fig.legend.title = make_estimator_title(est_dir.GetName())
[
fig.add_plottable(h1 / h2, legend_title=title)
for h1, h2, title in zip(mult_binned_pt_dists['k0s'], mult_binned_pt_dists['pi0'], perc_titles)
]
fig.save_to_root_file(self.f, name, dirname)
figs.append(fig)
fig = get_new_figure()
name = "Lambda_over_pi0__vs__pt"
fig.plot.ymax = .9
fig.legend.position = 'tl'
fig.ytitle = "#Lambda/#pi^{0}"
fig.legend.title = make_estimator_title(est_dir.GetName())
[
fig.add_plottable(h1 / h2, legend_title=title)
for h1, h2, title in zip(mult_binned_pt_dists['lambda'], mult_binned_pt_dists['pi0'], perc_titles)
]
fig.save_to_root_file(self.f, name, dirname)
figs.append(fig)
fig = get_new_figure()
name = "Xi_over_pi0__vs__pt"
fig.plot.ymax = .08
fig.legend.position = 'tl'
fig.ytitle = "#Xi/#pi^{0}"
fig.legend.title = make_estimator_title(est_dir.GetName())
[
fig.add_plottable(h1 / h2, legend_title=title)
for h1, h2, title in zip(mult_binned_pt_dists['xi'], mult_binned_pt_dists['pi0'], perc_titles)
]
fig.save_to_root_file(self.f, name, dirname)
figs.append(fig)
fig = get_new_figure()
name = "OmegaCh_over_pi0__vs__pt"
fig.plot.ymax = .005
fig.legend.position = 'tl'
fig.ytitle = "#Omega_{ch}/#pi^{0}"
fig.legend.title = make_estimator_title(est_dir.GetName())
[
fig.add_plottable(h1 / h2, legend_title=title)
for h1, h2, title in zip(mult_binned_pt_dists['omega'], mult_binned_pt_dists['pi0'], perc_titles)
]
fig.save_to_root_file(self.f, name, dirname)
figs.append(fig)
# Ratios to K0S
fig = get_new_figure()
name = "proton_over_K0S__vs__pt"
fig.plot.ymax = 2.6
fig.legend.position = 'tr'
fig.ytitle = "p/K^{0}_{S}"
fig.legend.title = make_estimator_title(est_dir.GetName())
[
fig.add_plottable(h1 / h2, legend_title=title)
for h1, h2, title in zip(mult_binned_pt_dists['proton'], mult_binned_pt_dists['k0s'], perc_titles)
]
fig.save_to_root_file(self.f, name, dirname)
figs.append(fig)
fig = get_new_figure()
name = "Lambda_over_K0S__vs__pt"
fig.plot.ymax = 1
fig.legend.position = 'bl'
fig.ytitle = "#Lambda/K^{0}_{S}"
fig.legend.title = make_estimator_title(est_dir.GetName())
[
fig.add_plottable(h1 / h2, legend_title=title)
for h1, h2, title in zip(mult_binned_pt_dists['lambda'], mult_binned_pt_dists['k0s'], perc_titles)
]
fig.save_to_root_file(self.f, name, dirname)
figs.append(fig)
fig = get_new_figure()
name = "Xi_over_K0S__vs__pt"
fig.plot.ymax = .2
fig.legend.position = 'tl'
fig.ytitle = "#Xi/K^{0}_{S}"
fig.legend.title = make_estimator_title(est_dir.GetName())
[
fig.add_plottable(h1 / h2, legend_title=title)
for h1, h2, title in zip(mult_binned_pt_dists['xi'], mult_binned_pt_dists['k0s'], perc_titles)
]
fig.save_to_root_file(self.f, name, dirname)
figs.append(fig)
fig = get_new_figure()
name = "OmegaCh_over_K0S__vs__pt"
fig.plot.ymax = .012
fig.legend.position = 'tl'
fig.ytitle = "#Omega_{ch}/K^{0}_{S}"
fig.legend.title = make_estimator_title(est_dir.GetName())
[
fig.add_plottable(h1 / h2, legend_title=title)
for h1, h2, title in zip(mult_binned_pt_dists['omega'], mult_binned_pt_dists['k0s'], perc_titles)
]
fig.save_to_root_file(self.f, name, dirname)
figs.append(fig)
fig = get_new_figure()
name = "Kaon_over_pich__vs__pt"
fig.plot.ymax = 1
fig.legend.position = 'tl'
fig.ytitle = "(K^{+} + K^{-}) / (#pi^{+} +#pi^{-})"
fig.legend.title = make_estimator_title(est_dir.GetName())
[
fig.add_plottable(h1 / h2, legend_title=title)
for h1, h2, title in zip(mult_binned_pt_dists['k_ch'], mult_binned_pt_dists['pi_ch'], perc_titles)
]
fig.save_to_root_file(self.f, name, dirname)
figs.append(fig)
return figs
@_io_decorator
def plot_PNch_summary(self):
log.info("Creating P(Nch) summary plot")
summary_fig = Figure()
summary_fig.xtitle = "N_{ch}^{est}"
summary_fig.ytitle = "P(N_{ch}^{est})"
summary_fig.legend.position = 'tr'
summary_fig.plot.logy = True
for est_dir in get_est_dirs(self.sums, self.considered_ests):
est_name = est_dir.GetName()
h_tmp = get_PNch_vs_estmult(self.sums, est_name)
if h_tmp.Integral() > 0:
h_tmp.Scale(1.0 / h_tmp.Integral())
summary_fig.add_plottable(h_tmp, make_estimator_title(est_name))
path = self.results_post.GetPath().split(":")[1] # file.root:/internal/root/path
summary_fig.save_to_root_file(self.f, "PNch_summary", path=path)
# list as return type is expected for making the pdf
return [summary_fig]
@_io_decorator
def plot_PNch(self):
log.info("Creating P(Nch_est) and P(Nch_refest) histograms")
# mult_bin_size = 10
figs = []
for ref_est_name in self.ref_ests:
for res_est_dir in get_est_dirs(self.results_post, self.considered_ests):
est_name = res_est_dir.GetName()
# Figure properties:
fig_vs_estmult = Figure()
fig_vs_refmult = Figure()
fig_vs_estmult.plot.logy = True
fig_vs_refmult.plot.logy = True
fig_vs_estmult.plot.palette = 'colorblind'
fig_vs_refmult.plot.palette = 'colorblind'
fig_vs_estmult.legend.position = 'tr'
fig_vs_refmult.legend.position = 'tr'
fig_vs_estmult.xtitle = "N_{{ch}}^{{{0}}}".format(est_name)
fig_vs_refmult.xtitle = "N_{{ch}}^{{{0}}}".format(ref_est_name)
fig_vs_estmult.ytitle = "P(N_{{ch}}^{{{0}}})".format(est_name)
fig_vs_refmult.ytitle = "P(N_{{ch}}^{{{0}}})".format(ref_est_name)
corr_hist = get_correlation_histogram(self.sums, est_name, ref_est_name)
# logic when dealing with fixed bins given in Nch:
# ------------------------------------------------
# mean_nch_est = corr_hist.GetMean(1) # mean of x axis
# nch_max = corr_hist.xaxis.GetNbins()
# nch_cutoff = mean_nch_est * mean_mult_cutoff_factor
# nch_bins = [(low, low + mult_bin_size) for low in range(0, int(nch_cutoff), mult_bin_size)]
# # a large last bin covering the rest:
# nch_bins += [(nch_bins[-1][2], nch_max)]
# legend_tmpl = "{} < N_{ch} < {}"
# logic when dealing with percentile bins:
# ----------------------------------------
# event_counter_est = asrootpy(getattr(res_est_dir, "event_counter"))
legend_tmpl = "{0}% - {1}%"
fig_vs_estmult.legend.title = "Selected in {0}".format(make_estimator_title(ref_est_name))
fig_vs_refmult.legend.title = "Selected in {0}".format(make_estimator_title(est_name))
# WARNING: the following needs tweeking when going back to fixed N_ch bins!
for nch_bin, perc_bin in zip(self.nch_edges[ref_est_name], self.perc_bins[ref_est_name]):
# vs est_mult:
corr_hist.xaxis.SetRange(0, 0) # reset x axis
corr_hist.yaxis.SetRange(nch_bin[0], nch_bin[1])
h_vs_est = asrootpy(corr_hist.ProjectionX(gen_random_name()))
if h_vs_est.Integral() > 0:
h_vs_est.Scale(1.0 / h_vs_est.Integral())
fig_vs_estmult.add_plottable(h_vs_est, legend_tmpl.format(perc_bin[1] * 100, perc_bin[0] * 100))
else:
log.info("No charged particles in {0}*100 percentile bin of estimator {1}. This should not happen".
format(perc_bin, ref_est_name))
for nch_bin, perc_bin in zip(self.nch_edges[est_name], self.perc_bins[est_name]):
# vs ref_mult:
corr_hist.yaxis.SetRange(0, 0) # reset y axis
corr_hist.xaxis.SetRange(*nch_bin)
h_vs_ref = asrootpy(corr_hist.ProjectionY(gen_random_name()))
if h_vs_ref.Integral() > 0:
h_vs_ref.Scale(1.0 / h_vs_ref.Integral())
fig_vs_refmult.add_plottable(h_vs_ref, legend_tmpl.format(perc_bin[1] * 100, perc_bin[0] * 100))
else:
log.info(
"No charged particles in {0}*100 percentile bin of estimator {1}. This should not happen".
format(perc_bin, est_name))
path = res_est_dir.GetPath().split(":")[1]
# vs est_mult
fig_vs_estmult.save_to_root_file(self.f, "PNchEst_binned_in_Nch{0}".format(ref_est_name), path)
# vs est_mult
fig_vs_refmult.save_to_root_file(self.f, "PNch{0}_binned_in_NchEst".format(ref_est_name), path)
figs.append(fig_vs_estmult)
figs.append(fig_vs_refmult)
return figs
@_io_decorator
def plot_mult_vs_pt(self):
log.info("Makeing 2D pt plots for each particle kind")
for est_dir in get_est_dirs(self.sums, self.considered_ests):
path = (self.results_post.GetPath().split(":")[1] # file.root:/internal/root/path
+ "/" + est_dir.GetName()
+ "/mult_pt")
try:
self.f.mkdir(path, recurse=True)
except ValueError:
pass
self.f.cd(path)
h3d = asrootpy(est_dir.FindObject('classifier_pT_PID_{0}'.format(est_dir.GetName())))
# loop through all particle kinds:
nPIDs = h3d.zaxis.GetNbins()
for ibin in range(1, nPIDs + 1):
h3d.zaxis.SetRange(ibin, ibin)
mult_pt = asrootpy(h3d.Project3D("yx"))
mult_pt.name = h3d.zaxis.GetBinLabel(ibin)
mult_pt.Write()
@_io_decorator
def plot_correlation(self):
# Make correlations between estimators
log.info("Correlating N_ch of each estimator")
corr_dir = self.results_post.GetPath().split(":")[1] + '/correlations'
try:
self.f.mkdir(corr_dir, recurse=True)
except:
pass
# Take ntuple from the first estimator and then add friends to this one
nt0 = self.sums[0].FindObject("fEventTuple")
nt0.SetAlias(self.sums[0].GetName(), "fEventTuple")
# build ntuple
for est_dir in self.sums[1:]:
nt0.AddFriend(est_dir.FindObject("fEventTuple"), est_dir.GetName())
for ref_est in self.considered_ests:
for est_dir in self.sums:
log.info("Correlating {0} with {1}".format(ref_est, est_dir.GetName()))
corr_hist = Hist2D(400, 0, 400,
400, 0, 400,
name="corr_hist_{0}_vs_{1}".format(ref_est, est_dir.GetName()))
# Lables are deliberatly swaped, see Projection below!
corr_hist.title = ("Correlation N_{{ch}} in {0} and {1};N_{{ch}} {1};N_{{ch}} {0}"
.format(ref_est, est_dir.GetName()))
# this projects onto y:x, to make coding more adventurous
nt0.Project(corr_hist.name, "{0}.nch:{1}.nch".format(ref_est, est_dir.GetName()),
"ev_weight")
corr_hist.drawstyle = 'colz'
self.f.cd(corr_dir)
corr_hist.write()
@_io_decorator
def plot_pid_ratio_vs_refmult(self):
log.info("Creating plots vs refmult")
ratios_dir = self.results_post.GetPath().split(":")[1] + '/pid_ratios_vs_refmult'
def get_new_figure():
fig = Figure()
fig.plot.ncolors = len(self.considered_ests)
fig.xtitle = "N_{ch}|_{" + make_estimator_title('EtaLt05') + "}"
fig.plot.xmin = 0
fig.plot.xmax = 60
return fig
figs = []
# Proton / pi_ch
fig = get_new_figure()
pids1, pids2 = ['-2212', '2212'], ['-211', '211']
fig.ytitle = "p/#pi^{+-}"
fig.plot.ymin, fig.plot.ymax = 0.04, 0.13
graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2, )
[fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs]
name = "_".join(pids1) + "_div_" + "_".join(pids2)
fig.save_to_root_file(self.f, name, ratios_dir)
figs.append(fig)
# K / pi_ch
fig = get_new_figure()
pids1, pids2 = ['310', '321', '-321'], ['-211', '211']
fig.ytitle = "K^{*}/#pi^{+-}"
fig.plot.ymin, fig.plot.ymax = 0.09, 0.30
graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2)
[fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs]
name = "_".join(pids1) + "_div_" + "_".join(pids2)
fig.save_to_root_file(self.f, name, ratios_dir)
figs.append(fig)
# Lambda / pi_ch
fig = get_new_figure()
pids1, pids2 = ['3122'], ['-211', '211']
fig.ytitle = "#Lambda / #pi^{+-}"
fig.plot.ymin, fig.plot.ymax = 0.005, 0.035
graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2)
[fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs]
name = "_".join(pids1) + "_div_" + "_".join(pids2)
fig.save_to_root_file(self.f, name, ratios_dir)
figs.append(fig)
# Xi / pi_ch
fig = get_new_figure()
pids1, pids2 = ['3312'], ['-211', '211']
fig.ytitle = "#Xi / #pi^{+-}"
fig.plot.ymin, fig.plot.ymax = 0.0004, 0.003
graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2)
[fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs]
name = "_".join(pids1) + "_div_" + "_".join(pids2)
fig.save_to_root_file(self.f, name, ratios_dir)
figs.append(fig)
# Omega / pi_ch
fig = get_new_figure()
pids1, pids2 = ['3334', '-3334'], ['-211', '211']
fig.ytitle = "#Omega / #pi^{+-}"
fig.plot.ymin, fig.plot.ymax = 0.00001, 0.0005
graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2)
[fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs]
name = "_".join(pids1) + "_div_" + "_".join(pids2)
fig.save_to_root_file(self.f, name, ratios_dir)
figs.append(fig)
# pi_ch/pi0
fig = get_new_figure()
pids1, pids2 = ['-211', '211'], ['111']
fig.ytitle = "#pi^{+-}/#pi^{0}"
fig.plot.ymin, fig.plot.ymax = 1.5, 2.2
graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2)
[fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs]
name = "_".join(pids1) + "_div_" + "_".join(pids2)
fig.save_to_root_file(self.f, name, ratios_dir)
figs.append(fig)
# proton / pi0
fig = get_new_figure()
pids1, pids2 = ['-2212', '2212'], ['111']
fig.ytitle = "p/#pi^{0}"
fig.plot.ymin, fig.plot.ymax = 0.09, 0.30
fig.legend.position = 'tl'
graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2)
[fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs]
name = "_".join(pids1) + "_div_" + "_".join(pids2)
fig.save_to_root_file(self.f, name, ratios_dir)
figs.append(fig)
# K / pi0
fig = get_new_figure()
pids1, pids2 = ['310', '321', '-321'], ['111']
fig.ytitle = "K^{*}/#pi^{0}"
fig.plot.ymin, fig.plot.ymax = 0.15, 0.50
fig.legend.position = 'tl'
graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2)
[fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs]
name = "_".join(pids1) + "_div_" + "_".join(pids2)
fig.save_to_root_file(self.f, name, ratios_dir)
figs.append(fig)
# Lambda / pi0
fig = get_new_figure()
pids1, pids2 = ['3122'], ['111']
fig.ytitle = "#Lambda/#pi^{0}"
fig.plot.ymin, fig.plot.ymax = 0.014, 0.045
graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2)
[fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs]
name = "_".join(pids1) + "_div_" + "_".join(pids2)
fig.save_to_root_file(self.f, name, ratios_dir)
figs.append(fig)
# Xi / pi0
fig = get_new_figure()
pids1, pids2 = ['3312'], ['111']
fig.ytitle = "#Xi/#pi^{0}"
fig.plot.ymin, fig.plot.ymax = 0.0010, 0.005
fig.legend.position = 'tl'
graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2)
[fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs]
name = "_".join(pids1) + "_div_" + "_".join(pids2)
fig.save_to_root_file(self.f, name, ratios_dir)
figs.append(fig)
# Omega / pi0
fig = get_new_figure()
pids1, pids2 = ['3334', '-3334'], ['111']
fig.ytitle = "#Omega/#pi^{0}"
fig.legend.position = 'tl'
fig.plot.ymin, fig.plot.ymax = 0.00002, 0.0008
graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2)
[fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs]
name = "_".join(pids1) + "_div_" + "_".join(pids2)
fig.save_to_root_file(self.f, name, ratios_dir)
figs.append(fig)
# K_ch / K0_S
fig = get_new_figure()
pids1, pids2 = ['321', '-321'], ['310']
fig.ytitle = "(K^{+}+K^{-}) / (2#timesK^{0}_{S})"
fig.plot.ymin, fig.plot.ymax = 0.4, 1.5
fig.legend.position = 'tl'
graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2, scale=.5)
[fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs]
name = "_".join(pids1) + "_div_" + "_".join(pids2)
fig.save_to_root_file(self.f, name, ratios_dir)
figs.append(fig)
# K0_S / Lambda
fig = get_new_figure()
pids1, pids2 = ['310'], ['-3122', '3122']
fig.ytitle = "K^{0}_{S} / #Lambda"
fig.plot.ymin, fig.plot.ymax = 1.3, 3.7
graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2)
[fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs]
name = "_".join(pids1) + "_div_" + "_".join(pids2)
fig.save_to_root_file(self.f, name, ratios_dir)
figs.append(fig)
# K0_S / Xi
fig = get_new_figure()
pids1, pids2 = ['310'], ['3312']
fig.ytitle = "K^{0}_{S} / #Xi"
fig.plot.ymin, fig.plot.ymax = 15, 80
graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2)
[fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs]
name = "_".join(pids1) + "_div_" + "_".join(pids2)
fig.save_to_root_file(self.f, name, ratios_dir)
figs.append(fig)
return figs
# ######################################################################################
# # vs Est mult
# _plot_particle_ratios_vs_estmult(self, ['321', '-321'], ['310'],
# scale=.5, fig.ytitle = "(K^{+} + K^{-}) / (2*K_{S}^{0})")
@_io_decorator
def plot_meanpt_vs_ref_mult_for_pids(self):
log.info("Creating mean pT plots")
figs = []
for sums_est_dir, res_est_dir in zip(get_est_dirs(self.sums, self.considered_ests),
get_est_dirs(self.results_post, self.considered_ests)):
if sums_est_dir.GetName() != res_est_dir.GetName():
raise IndexError("Order of estimator dirs is different in sums and results_post")
res_dir_str = res_est_dir.GetPath().split(":")[1]
corr_hist = get_correlation_histogram(self.sums, sums_est_dir.GetName(), "EtaLt05")
# Get the <pT> per classifier bin; then, re-map the classifier value to the reference classifier (eg EtaLt05)
# This might not make a lot of sense, actually. Maybe it would be much more telling if I were to
# put the percentile bins on the x-axis? As in the highest 1% of that classifier has a <pT> of ...
graphs = []
graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kPI0, kPIMINUS, kPIPLUS]), corr_hist))
graphs[-1].title = "#pi"
graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kKMINUS, kKPLUS]), corr_hist))
graphs[-1].title = "K^{#pm}"
graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kPROTON, kANTIPROTON]), corr_hist))
graphs[-1].title = "p"
graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kK0S]), corr_hist))
graphs[-1].title = "K^{0}_{S}"
graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kLAMBDA, kANTILAMBDA]), corr_hist))
graphs[-1].title = "#Lambda"
graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kXI, kANTIXI]), corr_hist))
graphs[-1].title = "#Xi"
graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kOMEGAMINUS, kOMEGAPLUS]), corr_hist))
graphs[-1].title = "#Omega"
# sanitize graphs:
for g in graphs:
remove_zero_value_points(g)
remove_points_with_x_err_gt_1NchRef(g)
remove_points_with_equal_x(g)
fig = Figure()
fig.plot.palette = 'root'
fig.plot.ncolors = 7
fig.plot.xmin = 0
fig.plot.xmax = 40
fig.plot.ymin = 0.3
fig.plot.ymax = 2.1
fig.ytitle = "<p_{T}>"
fig.xtitle = "N_{ch}|_{|#eta|<0.5}"
fig.legend.title = make_estimator_title(sums_est_dir.GetName())
[fig.add_plottable(g, g.title) for g in graphs]
fig.save_to_root_file(self.f, "mean_pt", res_dir_str)
figs.append(fig)
return figs
# def _plot_event_counter_with_shaded_perc_areas(f, results_post):
# log.info("Broken: Root sucks! Creating shaded event counter with percentile regions")
# return
# for est_dir in get_est_dirs(results_post):
# event_counter = asrootpy(getattr(est_dir, "event_counter"))
# nch_edges = get_Nch_edges_for_percentile_edges(perc_edges, event_counter)
# c = Canvas(name="event_counter_with_perc")
# leg = Legend(len(nch_edges) - 1)
# copies = []
# colors = get_color_generator(ncolors=10)
# # Draw the hist once
# event_counter.Draw()
# for nch_low, nch_up in zip(nch_edges[:-1], nch_edges[1:]):
# copies.append(event_counter.Clone(gen_random_name()))
# copies[-1].xaxis.SetRangeUser(nch_low, nch_up)
# copies[-1].SetFillStyle(1001)
# copies[-1].color = next(colors)
# copies[-1].xaxis.title = "N_{ch}"
# copies[-1].yaxis.title = "counts"
# leg.AddEntry(copies[-1], "{}-{}%".format(str(nch_low), str(nch_up)))
# copies[-1].Draw('sameHist')
# break
# leg.Draw()
# est_dir.cd()
# c.Write()
@_io_decorator
def plot_dNdpT(self, pid_selection):
"""
Plot dNdpT particles in pid_selection
Parameters
----------
pid_selection : str
Either all charged particles ('ch') or 'pi', 'K' or 'p'
"""
log.info("1/N_evts dN_ch/dpT plots")
figs = []
for sums_est_dir, res_est_dir in zip(get_est_dirs(self.sums, self.considered_ests),
get_est_dirs(self.results_post, self.considered_ests)):
if sums_est_dir.GetName() != res_est_dir.GetName():
raise IndexError("Order of estimator dirs is different in sums and results_post")
res_dir_str = res_est_dir.GetPath().split(":")[1]
fig = Figure()
fig.plot.palette = 'colorblind'
# fig.plot.ncolors = 5
fig.legend.position = 'tr'
fig.ytitle = "1/N_{evts} dN/dp_{T} (" + make_estimator_title(sums_est_dir.GetName()) + ")"
fig.xtitle = "p_{T} (GeV)"
fig.plot.logy = True
hists = []
if pid_selection == 'ch':
fig.legend.title = "#pi^{#pm}, K^{#pm}, p, #Lambda, #Xi, #Omega"
pid_numbers = [kPIMINUS, kPIPLUS, kKMINUS, kKPLUS, kPROTON, kANTIPROTON,
kLAMBDA, kANTILAMBDA, kXI, kANTIXI, kOMEGAMINUS, kOMEGAPLUS]
if pid_selection == 'pi':
fig.legend.title = "#pi^{#pm}"
pid_numbers = [kPIMINUS, kPIPLUS]
if pid_selection == 'K':
fig.legend.title = "K^{#pm}"
pid_numbers = [kKMINUS, kKPLUS]
if pid_selection == 'p':
fig.legend.title = "p, #bar{p}"
pid_numbers = [kPROTON, kANTIPROTON]
for perc_bin, classifier_bin in zip(self.perc_bins[sums_est_dir.GetName()], self.nch_edges[sums_est_dir.GetName()]):
hists.append(get_pT_distribution(res_est_dir, pid_numbers, classifier_bin, normalized=False))
hists[-1].title = "{0}%-{1}%".format(perc_bin[1] * 100, perc_bin[0] * 100)
# add MB last to be consistent with colors in other plots; the very first and very last bin we look at
classifier_bin_mb = (self.nch_edges[sums_est_dir.GetName()][0][0], self.nch_edges[sums_est_dir.GetName()][-1][-1])
hists.append(get_pT_distribution(res_est_dir, pid_numbers, classifier_bin_mb, normalized=False))
hists[-1].title = "MB"
# scale by bin width
[h.Scale(1, "width") for h in hists]
[fig.add_plottable(p, p.title) for p in hists]
fig.save_to_root_file(self.f, "dN{0}dpT".format(pid_selection), res_dir_str)
figs.append(fig)
return figs
@_io_decorator
def plot_pT_HM_div_pt_MB(self, scale_nMPI):
log.info("Plot dN_{HM}/dpT / dN_{MB}/dpT ratios scaled with nMPI")
figs = []
for sums_est_dir, res_est_dir in zip(get_est_dirs(self.sums, self.considered_ests),
get_est_dirs(self.results_post, self.considered_ests)):
if sums_est_dir.GetName() != res_est_dir.GetName():
raise IndexError("Order of estimator dirs is different in sums and results_post")
res_dir_str = res_est_dir.GetPath().split(":")[1]
fig = Figure()
fig.plot.palette = 'root'
fig.plot.ncolors = 7
fig.xtitle = "p_{T} (GeV)"
fig.legend.title = make_estimator_title(sums_est_dir.GetName())
if scale_nMPI:
fig.ytitle = ("#left[ #frac{dN^{HM}}{dp_{T}} / #frac{dN^{MB}}{dp_{T}} #right] "
"#times #left[ #frac{<N_{MPI}^{MB}>}{<N_{MPI}^{HM}>} #right]")
else:
fig.ytitle = "#frac{dN^{HM}}{dp_{T}} / #frac{dN^{MB}}{dp_{T}}"
charged_particles = [kPIMINUS, kPIPLUS, kKMINUS, kKPLUS, kPROTON, kANTIPROTON,
kLAMBDA, kANTILAMBDA, kXI, kANTIXI, kOMEGAMINUS, kOMEGAPLUS]
# get the MB distribution which will be used to devide the nch-binned distributions
classifier_bin_mb = (self.nch_edges[sums_est_dir.GetName()][0][0],
self.nch_edges[sums_est_dir.GetName()][-1][-1])
pt_dist_mb = get_pT_distribution(res_est_dir, charged_particles, classifier_bin_mb, normalized=False)
mean_nmpi_mb = get_mean_nMPI(sums_est_dir, classifier_bin_mb)
for perc_bin, classifier_bin in zip(self.perc_bins[sums_est_dir.GetName()],
self.nch_edges[sums_est_dir.GetName()]):
# get the pt distribution in this Nch interval
pt_dist_in_interval = get_pT_distribution(res_est_dir, charged_particles,
classifier_bin, normalized=False)
title = "{0}%-{1}%".format(perc_bin[1] * 100, perc_bin[0] * 100)
if scale_nMPI:
mean_nmpi_hm = get_mean_nMPI(sums_est_dir, classifier_bin)
fig.add_plottable((pt_dist_in_interval / pt_dist_mb) * (mean_nmpi_mb / mean_nmpi_hm), title)
name = "pt_hm_div_pt_mb_scaled_nMPI"
else:
fig.add_plottable((pt_dist_in_interval / pt_dist_mb), title)
name = "pt_hm_div_pt_mb"
fig.save_to_root_file(self.f, name, res_dir_str)
figs.append(fig)
return figs
@_io_decorator
def plot_nMPI_vs_Nch(self):
log.info("Creating nMPI(Nch) summary plot")
summary_fig = Figure()
summary_fig.xtitle = "N_{ch}^{est}"
summary_fig.ytitle = "<N_{MPI}>"
summary_fig.plot.palette = 'root'
summary_fig.legend.position = 'br'
summary_fig.plot.logy = True
summary_fig.plot.ymin = 1
for est_dir in get_est_dirs(self.sums, self.considered_ests):
h_tmp = asrootpy(get_correlation_histogram(self.sums, est_dir.GetName(), "nMPI").ProfileX())
summary_fig.add_plottable(h_tmp, make_estimator_title(est_dir.GetName()))
path = self.results_post.GetPath().split(":")[1] # file.root:/internal/root/path
summary_fig.save_to_root_file(self.f, "nMPI_summary", path=path)
return [summary_fig]
| bsd-3-clause |
jswanljung/iris | lib/iris/tests/unit/experimental/fieldsfile/test__convert_collation.py | 8 | 14897 | # (C) British Crown Copyright 2014 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for :func:`iris.experimental.fieldsfile._convert_collation`."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import cf_units
import netcdftime
import numpy as np
from iris.experimental.fieldsfile \
import _convert_collation as convert_collation
import iris.aux_factory
import iris.coord_systems
import iris.coords
import iris.fileformats.pp
import iris.fileformats.rules
from iris.tests import mock
COORD_SYSTEM = iris.coord_systems.GeogCS(6371229.0)
LATITUDE = iris.coords.DimCoord([15, 0, -15], 'latitude', units='degrees',
coord_system=COORD_SYSTEM)
LONGITUDE = iris.coords.DimCoord([0, 20, 40, 60], 'longitude', units='degrees',
coord_system=COORD_SYSTEM, circular=True)
class Test(tests.IrisTest):
def _field(self):
# Create PP field for X wind on a regular lat-lon grid.
header = [0] * 64
# Define the regular lat-lon grid.
header[15] = 1 # LBCODE
header[17] = 3 # LBROW
header[18] = 4 # LBNPT
header[55] = 90 # BPLAT
header[58] = 30 # BZY
header[59] = -15 # BDY
header[60] = -20 # BZX
header[61] = 20 # BDX
# Define the STASH code m01s00i002.
header[41] = 2 # LBUSER(4)
header[44] = 1 # LBUSER(7)
field = iris.fileformats.pp.PPField3(header)
return field
def _check_phenomenon(self, metadata, factory=None):
if factory is None:
self.assertEqual(metadata.factories, [])
else:
self.assertEqual(metadata.factories, [factory])
self.assertEqual(metadata.references, [])
self.assertEqual(metadata.standard_name, 'x_wind')
self.assertIsNone(metadata.long_name)
self.assertEqual(metadata.units, cf_units.Unit('m s-1'))
self.assertEqual(metadata.attributes, {'STASH': (1, 0, 2)})
self.assertEqual(metadata.cell_methods, [])
def test_all_scalar(self):
field = self._field()
field.lbtim = 11
field.t1 = netcdftime.datetime(1970, 1, 1, 18)
field.t2 = netcdftime.datetime(1970, 1, 1, 12)
collation = mock.Mock(fields=[field], vector_dims_shape=(),
element_arrays_and_dims={})
metadata = convert_collation(collation)
self._check_phenomenon(metadata)
coords_and_dims = [(LONGITUDE, 1),
(LATITUDE, 0)]
self.assertEqual(metadata.dim_coords_and_dims, coords_and_dims)
coords_and_dims = [
(iris.coords.DimCoord(18, 'time', units='hours since epoch'),
None),
(iris.coords.DimCoord(12, 'forecast_reference_time',
units='hours since epoch'), None),
(iris.coords.DimCoord(6, 'forecast_period', units='hours'), None)
]
self.assertEqual(metadata.aux_coords_and_dims, coords_and_dims)
def test_vector_t1(self):
field = self._field()
field.lbtim = 11
field.t2 = netcdftime.datetime(1970, 1, 1, 12)
t1 = ([netcdftime.datetime(1970, 1, 1, 18),
netcdftime.datetime(1970, 1, 2, 0),
netcdftime.datetime(1970, 1, 2, 6)], [0])
collation = mock.Mock(fields=[field], vector_dims_shape=(3,),
element_arrays_and_dims={'t1': t1})
metadata = convert_collation(collation)
self._check_phenomenon(metadata)
coords_and_dims = [(LONGITUDE, 2),
(LATITUDE, 1),
(iris.coords.DimCoord([18, 24, 30], 'time',
units='hours since epoch'),
(0,))
]
self.assertEqual(metadata.dim_coords_and_dims, coords_and_dims)
coords_and_dims = [
(iris.coords.DimCoord(12, 'forecast_reference_time',
units='hours since epoch'), None),
(iris.coords.DimCoord([6, 12, 18], 'forecast_period',
units='hours'), (0,))
]
self.assertEqual(metadata.aux_coords_and_dims, coords_and_dims)
def test_vector_t2(self):
field = self._field()
field.lbtim = 11
field.t1 = netcdftime.datetime(1970, 1, 1, 18)
t2 = ([netcdftime.datetime(1970, 1, 1, 12),
netcdftime.datetime(1970, 1, 1, 15),
netcdftime.datetime(1970, 1, 1, 18)], [0])
collation = mock.Mock(fields=[field], vector_dims_shape=(3,),
element_arrays_and_dims={'t2': t2})
metadata = convert_collation(collation)
self._check_phenomenon(metadata)
coords_and_dims = [(LONGITUDE, 2),
(LATITUDE, 1),
(iris.coords.DimCoord([12, 15, 18],
'forecast_reference_time',
units='hours since epoch'),
(0,))
]
self.assertEqual(metadata.dim_coords_and_dims, coords_and_dims)
coords_and_dims = [
(iris.coords.DimCoord(18, 'time', units='hours since epoch'),
None),
(iris.coords.DimCoord([6, 3, 0.], 'forecast_period',
units='hours'),
(0,))
]
self.assertEqual(metadata.aux_coords_and_dims, coords_and_dims)
def test_vector_lbft(self):
field = self._field()
field.lbtim = 21
field.t1 = netcdftime.datetime(1970, 1, 1, 12)
field.t2 = netcdftime.datetime(1970, 1, 1, 18)
lbft = ([18, 15, 12], [0])
collation = mock.Mock(fields=[field], vector_dims_shape=(3,),
element_arrays_and_dims={'lbft': lbft})
metadata = convert_collation(collation)
self._check_phenomenon(metadata)
coords_and_dims = [(LONGITUDE, 2),
(LATITUDE, 1),
(iris.coords.DimCoord([0, 3, 6],
'forecast_reference_time',
units='hours since epoch'),
(0,))]
coords_and_dims = [
(iris.coords.DimCoord(15, 'time', units='hours since epoch',
bounds=[[12, 18]]), None),
(iris.coords.DimCoord([15, 12, 9], 'forecast_period',
units='hours',
bounds=[[12, 18], [9, 15], [6, 12]]),
(0,))
]
self.assertEqual(metadata.aux_coords_and_dims, coords_and_dims)
def test_vector_t1_and_t2(self):
field = self._field()
field.lbtim = 11
t1 = ([netcdftime.datetime(1970, 1, 2, 6),
netcdftime.datetime(1970, 1, 2, 9),
netcdftime.datetime(1970, 1, 2, 12)], [1])
t2 = ([netcdftime.datetime(1970, 1, 1, 12),
netcdftime.datetime(1970, 1, 2, 0)], [0])
collation = mock.Mock(fields=[field], vector_dims_shape=(2, 3),
element_arrays_and_dims={'t1': t1, 't2': t2})
metadata = convert_collation(collation)
self._check_phenomenon(metadata)
coords_and_dims = [(LONGITUDE, 3),
(LATITUDE, 2),
(iris.coords.DimCoord([30, 33, 36], 'time',
units='hours since epoch'),
(1,)),
(iris.coords.DimCoord([12, 24],
'forecast_reference_time',
units='hours since epoch'),
(0,))]
self.assertEqual(metadata.dim_coords_and_dims, coords_and_dims)
coords_and_dims = [
(iris.coords.AuxCoord([[18, 21, 24], [6, 9, 12]],
'forecast_period', units='hours'), (0, 1))
]
self.assertEqual(metadata.aux_coords_and_dims, coords_and_dims)
def test_vertical_pressure(self):
field = self._field()
field.lbvc = 8
blev = ([1000, 850, 700], (0,))
lblev = ([1000, 850, 700], (0,))
collation = mock.Mock(fields=[field], vector_dims_shape=(3,),
element_arrays_and_dims={'blev': blev,
'lblev': lblev})
metadata = convert_collation(collation)
self._check_phenomenon(metadata)
coords_and_dims = [(LONGITUDE, 2),
(LATITUDE, 1),
(iris.coords.DimCoord([1000, 850, 700],
long_name='pressure',
units='hPa'),
(0,))]
self.assertEqual(metadata.dim_coords_and_dims, coords_and_dims)
coords_and_dims = []
self.assertEqual(metadata.aux_coords_and_dims, coords_and_dims)
def test_soil_level(self):
field = self._field()
field.lbvc = 6
points = [10, 20, 30]
lower = [0] * 3
upper = [0] * 3
lblev = (points, (0,))
brsvd1 = (lower, (0,))
brlev = (upper, (0,))
collation = mock.Mock(fields=[field], vector_dims_shape=(3,),
element_arrays_and_dims={'lblev': lblev,
'brsvd1': brsvd1,
'brlev': brlev})
metadata = convert_collation(collation)
self._check_phenomenon(metadata)
level = iris.coords.DimCoord(points,
long_name='soil_model_level_number',
attributes={'positive': 'down'})
coords_and_dims = [(LONGITUDE, 2),
(LATITUDE, 1),
(level, (0,))]
self.assertEqual(metadata.dim_coords_and_dims, coords_and_dims)
coords_and_dims = []
self.assertEqual(metadata.aux_coords_and_dims, coords_and_dims)
def test_soil_depth(self):
field = self._field()
field.lbvc = 6
points = [10, 20, 30]
lower = [0, 15, 25]
upper = [15, 25, 35]
blev = (points, (0,))
brsvd1 = (lower, (0,))
brlev = (upper, (0,))
collation = mock.Mock(fields=[field], vector_dims_shape=(3,),
element_arrays_and_dims={'blev': blev,
'brsvd1': brsvd1,
'brlev': brlev})
metadata = convert_collation(collation)
self._check_phenomenon(metadata)
depth = iris.coords.DimCoord(points, standard_name='depth',
bounds=np.vstack((lower, upper)).T,
units='m',
attributes={'positive': 'down'})
coords_and_dims = [(LONGITUDE, 2),
(LATITUDE, 1),
(depth, (0,))]
self.assertEqual(metadata.dim_coords_and_dims, coords_and_dims)
coords_and_dims = []
self.assertEqual(metadata.aux_coords_and_dims, coords_and_dims)
def test_vertical_hybrid_height(self):
field = self._field()
field.lbvc = 65
blev = ([5, 18, 38], (0,))
lblev = ([1000, 850, 700], (0,))
brsvd1 = ([10, 26, 50], (0,))
brsvd2 = ([0.9989, 0.9970, 0.9944], (0,))
brlev = ([0, 10, 26], (0,))
bhrlev = ([1, 0.9989, 0.9970], (0,))
lblev = ([1, 2, 3], (0,))
bhlev = ([0.9994, 0.9979, 0.9957], (0,))
collation = mock.Mock(fields=[field], vector_dims_shape=(3,),
element_arrays_and_dims={'blev': blev,
'lblev': lblev,
'brsvd1': brsvd1,
'brsvd2': brsvd2,
'brlev': brlev,
'bhrlev': bhrlev,
'lblev': lblev,
'bhlev': bhlev})
metadata = convert_collation(collation)
factory = iris.fileformats.rules.Factory(
iris.aux_factory.HybridHeightFactory,
[{'long_name': 'level_height'}, {'long_name': 'sigma'},
iris.fileformats.rules.Reference('orography')])
self._check_phenomenon(metadata, factory)
coords_and_dims = [(LONGITUDE, 2),
(LATITUDE, 1),
(iris.coords.DimCoord([1, 2, 3],
'model_level_number',
attributes={'positive': 'up'}
),
(0,))]
self.assertEqual(metadata.dim_coords_and_dims, coords_and_dims)
coords_and_dims = [
(iris.coords.DimCoord([5, 18, 38], long_name='level_height',
units='m', bounds=[[0, 10], [10, 26],
[26, 50]],
attributes={'positive': 'up'}), (0,)),
(iris.coords.AuxCoord([0.9994, 0.9979, 0.9957], long_name='sigma',
bounds=[[1, 0.9989], [0.9989, 0.9970],
[0.9970, 0.9944]]), (0,))
]
self.assertEqual(metadata.aux_coords_and_dims, coords_and_dims)
if __name__ == '__main__':
tests.main()
| lgpl-3.0 |
cosim/zerorpc-python | tests/test_channel.py | 76 | 3964 | # -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2012 DotCloud Inc (opensource@dotcloud.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from zerorpc import zmq
import zerorpc
from testutils import teardown, random_ipc_endpoint
def test_events_channel_client_side():
endpoint = random_ipc_endpoint()
server_events = zerorpc.Events(zmq.ROUTER)
server_events.bind(endpoint)
server = zerorpc.ChannelMultiplexer(server_events)
client_events = zerorpc.Events(zmq.DEALER)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events)
client_channel = client.channel()
client_channel.emit('someevent', (42,))
event = server.recv()
print event
assert list(event.args) == [42]
assert event.header.get('zmqid', None) is not None
server.emit('someanswer', (21,),
xheader=dict(response_to=event.header['message_id'],
zmqid=event.header['zmqid']))
event = client_channel.recv()
assert list(event.args) == [21]
def test_events_channel_client_side_server_send_many():
endpoint = random_ipc_endpoint()
server_events = zerorpc.Events(zmq.ROUTER)
server_events.bind(endpoint)
server = zerorpc.ChannelMultiplexer(server_events)
client_events = zerorpc.Events(zmq.DEALER)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events)
client_channel = client.channel()
client_channel.emit('giveme', (10,))
event = server.recv()
print event
assert list(event.args) == [10]
assert event.header.get('zmqid', None) is not None
for x in xrange(10):
server.emit('someanswer', (x,),
xheader=dict(response_to=event.header['message_id'],
zmqid=event.header['zmqid']))
for x in xrange(10):
event = client_channel.recv()
assert list(event.args) == [x]
def test_events_channel_both_side():
endpoint = random_ipc_endpoint()
server_events = zerorpc.Events(zmq.ROUTER)
server_events.bind(endpoint)
server = zerorpc.ChannelMultiplexer(server_events)
client_events = zerorpc.Events(zmq.DEALER)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events)
client_channel = client.channel()
client_channel.emit('openthat', (42,))
event = server.recv()
print event
assert list(event.args) == [42]
assert event.name == 'openthat'
server_channel = server.channel(event)
server_channel.emit('test', (21,))
event = client_channel.recv()
assert list(event.args) == [21]
assert event.name == 'test'
server_channel.emit('test', (22,))
event = client_channel.recv()
assert list(event.args) == [22]
assert event.name == 'test'
server_events.close()
server_channel.close()
client_channel.close()
client_events.close()
| mit |
ifduyue/sentry | src/sentry/tagstore/legacy/models/tagkey.py | 2 | 1534 | """
sentry.tagstore.legacy.models.tagkey
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2017 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
from django.db import models
from django.utils.translation import ugettext_lazy as _
from sentry.tagstore import TagKeyStatus
from sentry.constants import MAX_TAG_KEY_LENGTH
from sentry.db.models import (Model, BoundedPositiveIntegerField, sane_repr)
class TagKey(Model):
"""
Stores references to available filters keys.
"""
__core__ = False
project_id = BoundedPositiveIntegerField(db_index=True)
key = models.CharField(max_length=MAX_TAG_KEY_LENGTH)
values_seen = BoundedPositiveIntegerField(default=0)
label = models.CharField(max_length=64, null=True)
status = BoundedPositiveIntegerField(
choices=(
(TagKeyStatus.VISIBLE, _('Visible')),
(TagKeyStatus.PENDING_DELETION, _('Pending Deletion')),
(TagKeyStatus.DELETION_IN_PROGRESS, _('Deletion in Progress')),
),
default=TagKeyStatus.VISIBLE
)
class Meta:
app_label = 'sentry'
db_table = 'sentry_filterkey'
unique_together = (('project_id', 'key'), )
__repr__ = sane_repr('project_id', 'key')
def get_label(self):
from sentry import tagstore
return tagstore.get_tag_key_label(self.key)
def get_audit_log_data(self):
return {
'key': self.key,
}
| bsd-3-clause |
CatsAndDogsbvba/odoo | addons/analytic/analytic.py | 27 | 18705 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from openerp.osv import fields, osv
from openerp import tools
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class account_analytic_account(osv.osv):
_name = 'account.analytic.account'
_inherit = ['mail.thread']
_description = 'Analytic Account'
_track = {
'state': {
'analytic.mt_account_pending': lambda self, cr, uid, obj, ctx=None: obj.state == 'pending',
'analytic.mt_account_closed': lambda self, cr, uid, obj, ctx=None: obj.state == 'close',
'analytic.mt_account_opened': lambda self, cr, uid, obj, ctx=None: obj.state == 'open',
},
}
def _compute_level_tree(self, cr, uid, ids, child_ids, res, field_names, context=None):
currency_obj = self.pool.get('res.currency')
recres = {}
def recursive_computation(account):
result2 = res[account.id].copy()
for son in account.child_ids:
result = recursive_computation(son)
for field in field_names:
if (account.currency_id.id != son.currency_id.id) and (field!='quantity'):
result[field] = currency_obj.compute(cr, uid, son.currency_id.id, account.currency_id.id, result[field], context=context)
result2[field] += result[field]
return result2
for account in self.browse(cr, uid, ids, context=context):
if account.id not in child_ids:
continue
recres[account.id] = recursive_computation(account)
return recres
def _debit_credit_bal_qtty(self, cr, uid, ids, fields, arg, context=None):
res = {}
if context is None:
context = {}
child_ids = tuple(self.search(cr, uid, [('parent_id', 'child_of', ids)], context=context))
for i in child_ids:
res[i] = {}
for n in fields:
res[i][n] = 0.0
if not child_ids:
return res
where_date = ''
where_clause_args = [tuple(child_ids)]
if context.get('from_date', False):
where_date += " AND l.date >= %s"
where_clause_args += [context['from_date']]
if context.get('to_date', False):
where_date += " AND l.date <= %s"
where_clause_args += [context['to_date']]
cr.execute("""
SELECT a.id,
sum(
CASE WHEN l.amount > 0
THEN l.amount
ELSE 0.0
END
) as debit,
sum(
CASE WHEN l.amount < 0
THEN -l.amount
ELSE 0.0
END
) as credit,
COALESCE(SUM(l.amount),0) AS balance,
COALESCE(SUM(l.unit_amount),0) AS quantity
FROM account_analytic_account a
LEFT JOIN account_analytic_line l ON (a.id = l.account_id)
WHERE a.id IN %s
""" + where_date + """
GROUP BY a.id""", where_clause_args)
for row in cr.dictfetchall():
res[row['id']] = {}
for field in fields:
res[row['id']][field] = row[field]
return self._compute_level_tree(cr, uid, ids, child_ids, res, fields, context)
def name_get(self, cr, uid, ids, context=None):
res = []
if not ids:
return res
if isinstance(ids, (int, long)):
ids = [ids]
for id in ids:
elmt = self.browse(cr, uid, id, context=context)
res.append((id, self._get_one_full_name(elmt)))
return res
def _get_full_name(self, cr, uid, ids, name=None, args=None, context=None):
if context == None:
context = {}
res = {}
for elmt in self.browse(cr, uid, ids, context=context):
res[elmt.id] = self._get_one_full_name(elmt)
return res
def _get_one_full_name(self, elmt, level=6):
if level<=0:
return '...'
if elmt.parent_id and not elmt.type == 'template':
parent_path = self._get_one_full_name(elmt.parent_id, level-1) + " / "
else:
parent_path = ''
return parent_path + elmt.name
def _child_compute(self, cr, uid, ids, name, arg, context=None):
result = {}
if context is None:
context = {}
for account in self.browse(cr, uid, ids, context=context):
result[account.id] = map(lambda x: x.id, [child for child in account.child_ids if child.state != 'template'])
return result
def _get_analytic_account(self, cr, uid, ids, context=None):
company_obj = self.pool.get('res.company')
analytic_obj = self.pool.get('account.analytic.account')
accounts = []
for company in company_obj.browse(cr, uid, ids, context=context):
accounts += analytic_obj.search(cr, uid, [('company_id', '=', company.id)])
return accounts
def _set_company_currency(self, cr, uid, ids, name, value, arg, context=None):
if isinstance(ids, (int, long)):
ids=[ids]
for account in self.browse(cr, uid, ids, context=context):
if account.company_id:
if account.company_id.currency_id.id != value:
raise osv.except_osv(_('Error!'), _("If you set a company, the currency selected has to be the same as it's currency. \nYou can remove the company belonging, and thus change the currency, only on analytic account of type 'view'. This can be really useful for consolidation purposes of several companies charts with different currencies, for example."))
if value:
cr.execute("""update account_analytic_account set currency_id=%s where id=%s""", (value, account.id))
self.invalidate_cache(cr, uid, ['currency_id'], [account.id], context=context)
def _currency(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for rec in self.browse(cr, uid, ids, context=context):
if rec.company_id:
result[rec.id] = rec.company_id.currency_id.id
else:
result[rec.id] = rec.currency_id.id
return result
_columns = {
'name': fields.char('Account/Contract Name', required=True, track_visibility='onchange'),
'complete_name': fields.function(_get_full_name, type='char', string='Full Name'),
'code': fields.char('Reference', select=True, track_visibility='onchange', copy=False),
'type': fields.selection([('view','Analytic View'), ('normal','Analytic Account'),('contract','Contract or Project'),('template','Template of Contract')], 'Type of Account', required=True,
help="If you select the View Type, it means you won\'t allow to create journal entries using that account.\n"\
"The type 'Analytic account' stands for usual accounts that you only want to use in accounting.\n"\
"If you select Contract or Project, it offers you the possibility to manage the validity and the invoicing options for this account.\n"\
"The special type 'Template of Contract' allows you to define a template with default data that you can reuse easily."),
'template_id': fields.many2one('account.analytic.account', 'Template of Contract'),
'description': fields.text('Description'),
'parent_id': fields.many2one('account.analytic.account', 'Parent Analytic Account', select=2),
'child_ids': fields.one2many('account.analytic.account', 'parent_id', 'Child Accounts', copy=True),
'child_complete_ids': fields.function(_child_compute, relation='account.analytic.account', string="Account Hierarchy", type='many2many'),
'line_ids': fields.one2many('account.analytic.line', 'account_id', 'Analytic Entries', copy=False),
'balance': fields.function(_debit_credit_bal_qtty, type='float', string='Balance', multi='debit_credit_bal_qtty', digits_compute=dp.get_precision('Account')),
'debit': fields.function(_debit_credit_bal_qtty, type='float', string='Debit', multi='debit_credit_bal_qtty', digits_compute=dp.get_precision('Account')),
'credit': fields.function(_debit_credit_bal_qtty, type='float', string='Credit', multi='debit_credit_bal_qtty', digits_compute=dp.get_precision('Account')),
'quantity': fields.function(_debit_credit_bal_qtty, type='float', string='Quantity', multi='debit_credit_bal_qtty'),
'quantity_max': fields.float('Prepaid Service Units', help='Sets the higher limit of time to work on the contract, based on the timesheet. (for instance, number of hours in a limited support contract.)'),
'partner_id': fields.many2one('res.partner', 'Customer'),
'user_id': fields.many2one('res.users', 'Project Manager', track_visibility='onchange'),
'manager_id': fields.many2one('res.users', 'Account Manager', track_visibility='onchange'),
'date_start': fields.date('Start Date'),
'date': fields.date('Expiration Date', select=True, track_visibility='onchange'),
'company_id': fields.many2one('res.company', 'Company', required=False), #not required because we want to allow different companies to use the same chart of account, except for leaf accounts.
'state': fields.selection([('template', 'Template'),
('draft','New'),
('open','In Progress'),
('pending','To Renew'),
('close','Closed'),
('cancelled', 'Cancelled')],
'Status', required=True,
track_visibility='onchange', copy=False),
'currency_id': fields.function(_currency, fnct_inv=_set_company_currency, #the currency_id field is readonly except if it's a view account and if there is no company
store = {
'res.company': (_get_analytic_account, ['currency_id'], 10),
}, string='Currency', type='many2one', relation='res.currency'),
}
def on_change_template(self, cr, uid, ids, template_id, date_start=False, context=None):
if not template_id:
return {}
res = {'value':{}}
template = self.browse(cr, uid, template_id, context=context)
if template.date_start and template.date:
from_dt = datetime.strptime(template.date_start, tools.DEFAULT_SERVER_DATE_FORMAT)
to_dt = datetime.strptime(template.date, tools.DEFAULT_SERVER_DATE_FORMAT)
timedelta = to_dt - from_dt
res['value']['date'] = datetime.strftime(datetime.now() + timedelta, tools.DEFAULT_SERVER_DATE_FORMAT)
if not date_start:
res['value']['date_start'] = fields.date.today()
res['value']['quantity_max'] = template.quantity_max
res['value']['parent_id'] = template.parent_id and template.parent_id.id or False
res['value']['description'] = template.description
return res
def on_change_partner_id(self, cr, uid, ids,partner_id, name, context=None):
res={}
if partner_id:
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
if partner.user_id:
res['manager_id'] = partner.user_id.id
if not name:
res['name'] = _('Contract: ') + partner.name
return {'value': res}
def _default_company(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
if user.company_id:
return user.company_id.id
return self.pool.get('res.company').search(cr, uid, [('parent_id', '=', False)])[0]
def _get_default_currency(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return user.company_id.currency_id.id
_defaults = {
'type': 'normal',
'company_id': _default_company,
'code' : lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'account.analytic.account', context=context),
'state': 'open',
'user_id': lambda self, cr, uid, ctx: uid,
'partner_id': lambda self, cr, uid, ctx: ctx.get('partner_id', False),
'manager_id': lambda self, cr, uid, ctx: ctx.get('manager_id', False),
'date_start': lambda *a: time.strftime('%Y-%m-%d'),
'currency_id': _get_default_currency,
}
def check_recursion(self, cr, uid, ids, context=None, parent=None):
return super(account_analytic_account, self)._check_recursion(cr, uid, ids, context=context, parent=parent)
_order = 'code, name asc'
_constraints = [
(check_recursion, 'Error! You cannot create recursive analytic accounts.', ['parent_id']),
]
def name_create(self, cr, uid, name, context=None):
raise osv.except_osv(_('Warning'), _("Quick account creation disallowed."))
def copy(self, cr, uid, id, default=None, context=None):
""" executed only on the toplevel copied object of the hierarchy.
Subobject are actually copied with copy_data"""
if not default:
default = {}
analytic = self.browse(cr, uid, id, context=context)
default['name'] = _("%s (copy)") % analytic['name']
return super(account_analytic_account, self).copy(cr, uid, id, default, context=context)
def on_change_company(self, cr, uid, id, company_id):
if not company_id:
return {}
currency = self.pool.get('res.company').read(cr, uid, [company_id], ['currency_id'])[0]['currency_id']
return {'value': {'currency_id': currency}}
def on_change_parent(self, cr, uid, id, parent_id):
if not parent_id:
return {}
parent = self.read(cr, uid, [parent_id], ['partner_id','code'])[0]
if parent['partner_id']:
partner = parent['partner_id'][0]
else:
partner = False
res = {'value': {}}
if partner:
res['value']['partner_id'] = partner
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args=[]
if context is None:
context={}
account_ids = []
if name:
account_ids = self.search(cr, uid, [('code', '=', name)] + args, limit=limit, context=context)
if not account_ids:
dom = []
if '/' in name:
for name2 in name.split('/'):
# intermediate search without limit and args - could be expensive for large tables if `name` is not selective
account_ids = self.search(cr, uid, dom + [('name', operator, name2.strip())], limit=None, context=context)
if not account_ids: break
dom = [('parent_id','in',account_ids)]
if account_ids and args:
# final filtering according to domain (args)4
account_ids = self.search(cr, uid, [('id', 'in', account_ids)] + args, limit=limit, context=context)
if not account_ids:
return super(account_analytic_account, self).name_search(cr, uid, name, args, operator=operator, context=context, limit=limit)
return self.name_get(cr, uid, account_ids, context=context)
class account_analytic_line(osv.osv):
_name = 'account.analytic.line'
_description = 'Analytic Line'
_columns = {
'name': fields.char('Description', required=True),
'date': fields.date('Date', required=True, select=True),
'amount': fields.float('Amount', required=True, help='Calculated by multiplying the quantity and the price given in the Product\'s cost price. Always expressed in the company main currency.', digits_compute=dp.get_precision('Account')),
'unit_amount': fields.float('Quantity', help='Specifies the amount of quantity to count.'),
'account_id': fields.many2one('account.analytic.account', 'Analytic Account', required=True, ondelete='restrict', select=True, domain=[('type','<>','view')]),
'user_id': fields.many2one('res.users', 'User'),
'company_id': fields.related('account_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
}
def _get_default_date(self, cr, uid, context=None):
return fields.date.context_today(self, cr, uid, context=context)
def __get_default_date(self, cr, uid, context=None):
return self._get_default_date(cr, uid, context=context)
_defaults = {
'date': __get_default_date,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.analytic.line', context=c),
'amount': 0.00
}
_order = 'date desc'
def _check_no_view(self, cr, uid, ids, context=None):
analytic_lines = self.browse(cr, uid, ids, context=context)
for line in analytic_lines:
if line.account_id.type == 'view':
return False
return True
_constraints = [
(_check_no_view, 'You cannot create analytic line on view account.', ['account_id']),
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
highRPM/IM-A910S_msm8974_kernel-source | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
wujuguang/motor | test/tornado_tests/test_motor_web.py | 1 | 9001 | # Copyright 2012-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
"""Test utilities for using Motor with Tornado web applications."""
import datetime
import email
import hashlib
import time
import re
import unittest
import gridfs
from tornado.testing import AsyncHTTPTestCase
from tornado.web import Application
import motor
import motor.web
import test
from test.test_environment import env, CA_PEM, CLIENT_PEM
# We're using Tornado's AsyncHTTPTestCase instead of our own MotorTestCase for
# the convenience of self.fetch().
class GridFSHandlerTestBase(AsyncHTTPTestCase):
def setUp(self):
super(GridFSHandlerTestBase, self).setUp()
self.fs = gridfs.GridFS(test.env.sync_cx.motor_test)
# Make a 500k file in GridFS with filename 'foo'
self.contents = b'Jesse' * 100 * 1024
self.contents_hash = hashlib.md5(self.contents).hexdigest()
# Record when we created the file, to check the Last-Modified header
self.put_start = datetime.datetime.utcnow().replace(microsecond=0)
self.file_id = 'id'
self.fs.delete(self.file_id)
self.fs.put(
self.contents, _id='id', filename='foo', content_type='my type')
self.put_end = datetime.datetime.utcnow().replace(microsecond=0)
self.assertTrue(self.fs.get_last_version('foo'))
def motor_db(self, **kwargs):
if env.mongod_started_with_ssl:
kwargs.setdefault('ssl_certfile', CLIENT_PEM)
kwargs.setdefault('ssl_ca_certs', CA_PEM)
kwargs.setdefault('ssl', env.mongod_started_with_ssl)
client = motor.MotorClient(
test.env.uri,
io_loop=self.io_loop,
**kwargs)
return client.motor_test
def tearDown(self):
self.fs.delete(self.file_id)
super(GridFSHandlerTestBase, self).tearDown()
def get_app(self):
return Application([
('/(.+)', motor.web.GridFSHandler, {'database': self.motor_db()})])
def stop(self, *args, **kwargs):
# A stop() method more permissive about the number of its positional
# arguments than AsyncHTTPTestCase.stop
if len(args) == 1:
AsyncHTTPTestCase.stop(self, args[0], **kwargs)
else:
AsyncHTTPTestCase.stop(self, args, **kwargs)
def parse_date(self, d):
date_tuple = email.utils.parsedate(d)
return datetime.datetime.fromtimestamp(time.mktime(date_tuple))
def last_mod(self, response):
"""Parse the 'Last-Modified' header from an HTTP response into a
datetime.
"""
return self.parse_date(response.headers['Last-Modified'])
def expires(self, response):
return self.parse_date(response.headers['Expires'])
class GridFSHandlerTest(GridFSHandlerTestBase):
def test_basic(self):
# First request
response = self.fetch('/foo')
self.assertEqual(200, response.code)
self.assertEqual(self.contents, response.body)
self.assertEqual(
len(self.contents), int(response.headers['Content-Length']))
self.assertEqual('my type', response.headers['Content-Type'])
self.assertEqual('public', response.headers['Cache-Control'])
self.assertTrue('Expires' not in response.headers)
etag = response.headers['Etag']
last_mod_dt = self.last_mod(response)
self.assertEqual(self.contents_hash, etag.strip('"'))
self.assertTrue(self.put_start <= last_mod_dt <= self.put_end)
# Now check we get 304 NOT MODIFIED responses as appropriate
for ims_value in (
last_mod_dt,
last_mod_dt + datetime.timedelta(seconds=1)
):
response = self.fetch('/foo', if_modified_since=ims_value)
self.assertEqual(304, response.code)
self.assertEqual(b'', response.body)
# If-Modified-Since in the past, get whole response back
response = self.fetch(
'/foo',
if_modified_since=last_mod_dt - datetime.timedelta(seconds=1))
self.assertEqual(200, response.code)
self.assertEqual(self.contents, response.body)
# Matching Etag
response = self.fetch('/foo', headers={'If-None-Match': etag})
self.assertEqual(304, response.code)
self.assertEqual(b'', response.body)
# Mismatched Etag
response = self.fetch('/foo', headers={'If-None-Match': etag + 'a'})
self.assertEqual(200, response.code)
self.assertEqual(self.contents, response.body)
def test_404(self):
response = self.fetch('/bar')
self.assertEqual(404, response.code)
def test_head(self):
response = self.fetch('/foo', method='HEAD')
# Get Etag and parse Last-Modified into a datetime
etag = response.headers['Etag']
last_mod_dt = self.last_mod(response)
# Test the result
self.assertEqual(200, response.code)
self.assertEqual(b'', response.body) # Empty body for HEAD request
self.assertEqual(
len(self.contents), int(response.headers['Content-Length']))
self.assertEqual('my type', response.headers['Content-Type'])
self.assertEqual(self.contents_hash, etag.strip('"'))
self.assertTrue(self.put_start <= last_mod_dt <= self.put_end)
self.assertEqual('public', response.headers['Cache-Control'])
def test_content_type(self):
# Check that GridFSHandler uses file extension to guess Content-Type
# if not provided
for filename, expected_type in [
('foo.jpg', 'jpeg'),
('foo.png', 'png'),
('ht.html', 'html'),
('jscr.js', 'javascript'),
]:
# 'fs' is PyMongo's blocking GridFS
self.fs.put(b'', filename=filename)
for method in 'GET', 'HEAD':
response = self.fetch('/' + filename, method=method)
self.assertEqual(200, response.code)
# mimetypes are platform-defined, be fuzzy
self.assertTrue(
response.headers['Content-Type'].lower().endswith(
expected_type))
class TZAwareGridFSHandlerTest(GridFSHandlerTestBase):
def motor_db(self):
return super(TZAwareGridFSHandlerTest, self).motor_db(tz_aware=True)
def test_tz_aware(self):
now = datetime.datetime.utcnow()
ago = now - datetime.timedelta(minutes=10)
hence = now + datetime.timedelta(minutes=10)
response = self.fetch('/foo', if_modified_since=ago)
self.assertEqual(200, response.code)
response = self.fetch('/foo', if_modified_since=hence)
self.assertEqual(304, response.code)
class CustomGridFSHandlerTest(GridFSHandlerTestBase):
def get_app(self):
class CustomGridFSHandler(motor.web.GridFSHandler):
def get_gridfs_file(self, bucket, filename, request):
# Test overriding the get_gridfs_file() method, path is
# interpreted as file_id instead of filename.
return bucket.open_download_stream(file_id=filename)
def get_cache_time(self, path, modified, mime_type):
return 10
def set_extra_headers(self, path, gridout):
self.set_header('quux', 'fizzledy')
return Application([
('/(.+)', CustomGridFSHandler, {'database': self.motor_db()})])
def test_get_gridfs_file(self):
# We overrode get_gridfs_file so we expect getting by filename *not* to
# work now; we'll get a 404. We have to get by file_id now.
response = self.fetch('/foo')
self.assertEqual(404, response.code)
response = self.fetch('/' + str(self.file_id))
self.assertEqual(200, response.code)
self.assertEqual(self.contents, response.body)
cache_control = response.headers['Cache-Control']
self.assertTrue(re.match(r'max-age=\d+', cache_control))
self.assertEqual(10, int(cache_control.split('=')[1]))
expires = self.expires(response)
# It should expire about 10 seconds from now
self.assertTrue(
datetime.timedelta(seconds=8)
< expires - datetime.datetime.utcnow()
< datetime.timedelta(seconds=12))
self.assertEqual('fizzledy', response.headers['quux'])
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
michaelgallacher/intellij-community | python/lib/Lib/site-packages/django/contrib/localflavor/id/id_choices.py | 439 | 3217 | import warnings
from django.utils.translation import ugettext_lazy as _
# Reference: http://id.wikipedia.org/wiki/Daftar_provinsi_Indonesia
# Indonesia does not have an official Province code standard.
# I decided to use unambiguous and consistent (some are common) 3-letter codes.
warnings.warn(
'There have been recent changes to the ID localflavor. See the release notes for details',
RuntimeWarning
)
PROVINCE_CHOICES = (
('ACE', _('Aceh')),
('BLI', _('Bali')),
('BTN', _('Banten')),
('BKL', _('Bengkulu')),
('DIY', _('Yogyakarta')),
('JKT', _('Jakarta')),
('GOR', _('Gorontalo')),
('JMB', _('Jambi')),
('JBR', _('Jawa Barat')),
('JTG', _('Jawa Tengah')),
('JTM', _('Jawa Timur')),
('KBR', _('Kalimantan Barat')),
('KSL', _('Kalimantan Selatan')),
('KTG', _('Kalimantan Tengah')),
('KTM', _('Kalimantan Timur')),
('BBL', _('Kepulauan Bangka-Belitung')),
('KRI', _('Kepulauan Riau')),
('LPG', _('Lampung')),
('MLK', _('Maluku')),
('MUT', _('Maluku Utara')),
('NTB', _('Nusa Tenggara Barat')),
('NTT', _('Nusa Tenggara Timur')),
('PPA', _('Papua')),
('PPB', _('Papua Barat')),
('RIU', _('Riau')),
('SLB', _('Sulawesi Barat')),
('SLS', _('Sulawesi Selatan')),
('SLT', _('Sulawesi Tengah')),
('SLR', _('Sulawesi Tenggara')),
('SLU', _('Sulawesi Utara')),
('SMB', _('Sumatera Barat')),
('SMS', _('Sumatera Selatan')),
('SMU', _('Sumatera Utara')),
)
LICENSE_PLATE_PREFIX_CHOICES = (
('A', _('Banten')),
('AA', _('Magelang')),
('AB', _('Yogyakarta')),
('AD', _('Surakarta - Solo')),
('AE', _('Madiun')),
('AG', _('Kediri')),
('B', _('Jakarta')),
('BA', _('Sumatera Barat')),
('BB', _('Tapanuli')),
('BD', _('Bengkulu')),
('BE', _('Lampung')),
('BG', _('Sumatera Selatan')),
('BH', _('Jambi')),
('BK', _('Sumatera Utara')),
('BL', _('Nanggroe Aceh Darussalam')),
('BM', _('Riau')),
('BN', _('Kepulauan Bangka Belitung')),
('BP', _('Kepulauan Riau')),
('CC', _('Corps Consulate')),
('CD', _('Corps Diplomatic')),
('D', _('Bandung')),
('DA', _('Kalimantan Selatan')),
('DB', _('Sulawesi Utara Daratan')),
('DC', _('Sulawesi Barat')),
('DD', _('Sulawesi Selatan')),
('DE', _('Maluku')),
('DG', _('Maluku Utara')),
('DH', _('NTT - Timor')),
('DK', _('Bali')),
('DL', _('Sulawesi Utara Kepulauan')),
('DM', _('Gorontalo')),
('DN', _('Sulawesi Tengah')),
('DR', _('NTB - Lombok')),
('DS', _('Papua dan Papua Barat')),
('DT', _('Sulawesi Tenggara')),
('E', _('Cirebon')),
('EA', _('NTB - Sumbawa')),
('EB', _('NTT - Flores')),
('ED', _('NTT - Sumba')),
('F', _('Bogor')),
('G', _('Pekalongan')),
('H', _('Semarang')),
('K', _('Pati')),
('KB', _('Kalimantan Barat')),
('KH', _('Kalimantan Tengah')),
('KT', _('Kalimantan Timur')),
('L', _('Surabaya')),
('M', _('Madura')),
('N', _('Malang')),
('P', _('Jember')),
('R', _('Banyumas')),
('RI', _('Federal Government')),
('S', _('Bojonegoro')),
('T', _('Purwakarta')),
('W', _('Sidoarjo')),
('Z', _('Garut')),
)
| apache-2.0 |
ronnienv/antTrails | libs/requests/packages/chardet/chardetect.py | 743 | 1141 | #!/usr/bin/env python
"""
Script which takes one or more file paths and reports on their detected
encodings
Example::
% chardetect somefile someotherfile
somefile: windows-1252 with confidence 0.5
someotherfile: ascii with confidence 1.0
If no paths are provided, it takes its input from stdin.
"""
from io import open
from sys import argv, stdin
from chardet.universaldetector import UniversalDetector
def description_of(file, name='stdin'):
"""Return a string describing the probable encoding of a file."""
u = UniversalDetector()
for line in file:
u.feed(line)
u.close()
result = u.result
if result['encoding']:
return '%s: %s with confidence %s' % (name,
result['encoding'],
result['confidence'])
else:
return '%s: no result' % name
def main():
if len(argv) <= 1:
print(description_of(stdin))
else:
for path in argv[1:]:
with open(path, 'rb') as f:
print(description_of(f, path))
if __name__ == '__main__':
main()
| apache-2.0 |
androidarmv6/android_external_chromium_org | build/android/gyp/javac.py | 25 | 3161 | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import fnmatch
import optparse
import os
import sys
from util import build_utils
from util import md5_check
def DoJavac(options):
output_dir = options.output_dir
src_dirs = build_utils.ParseGypList(options.src_dirs)
java_files = build_utils.FindInDirectories(src_dirs, '*.java')
if options.javac_includes:
javac_includes = build_utils.ParseGypList(options.javac_includes)
filtered_java_files = []
for f in java_files:
for include in javac_includes:
if fnmatch.fnmatch(f, include):
filtered_java_files.append(f)
break
java_files = filtered_java_files
# Compiling guava with certain orderings of input files causes a compiler
# crash... Sorted order works, so use that.
# See https://code.google.com/p/guava-libraries/issues/detail?id=950
java_files.sort()
classpath = build_utils.ParseGypList(options.classpath)
jar_inputs = []
for path in classpath:
if os.path.exists(path + '.TOC'):
jar_inputs.append(path + '.TOC')
else:
jar_inputs.append(path)
javac_cmd = [
'javac',
'-g',
'-source', '1.5',
'-target', '1.5',
'-classpath', ':'.join(classpath),
'-d', output_dir,
'-Xlint:unchecked',
'-Xlint:deprecation',
] + java_files
def Compile():
# Delete the classes directory. This ensures that all .class files in the
# output are actually from the input .java files. For example, if a .java
# file is deleted or an inner class is removed, the classes directory should
# not contain the corresponding old .class file after running this action.
build_utils.DeleteDirectory(output_dir)
build_utils.MakeDirectory(output_dir)
build_utils.CheckOutput(javac_cmd, print_stdout=options.chromium_code)
record_path = '%s/javac.md5.stamp' % options.output_dir
md5_check.CallAndRecordIfStale(
Compile,
record_path=record_path,
input_paths=java_files + jar_inputs,
input_strings=javac_cmd)
def main(argv):
parser = optparse.OptionParser()
parser.add_option('--src-dirs', help='Directories containing java files.')
parser.add_option('--javac-includes',
help='A list of file patterns. If provided, only java files that match' +
'one of the patterns will be compiled.')
parser.add_option('--classpath', help='Classpath for javac.')
parser.add_option('--output-dir', help='Directory for javac output.')
parser.add_option('--stamp', help='Path to touch on success.')
parser.add_option('--chromium-code', type='int', help='Whether code being '
'compiled should be built with stricter warnings for '
'chromium code.')
# TODO(newt): remove this once http://crbug.com/177552 is fixed in ninja.
parser.add_option('--ignore', help='Ignored.')
options, _ = parser.parse_args()
DoJavac(options)
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
chiefspace/udemy-rest-api | udemy_rest_api_section4/env/lib/python3.4/site-packages/setuptools/py31compat.py | 10 | 1184 | __all__ = ['get_config_vars', 'get_path']
try:
# Python 2.7 or >=3.2
from sysconfig import get_config_vars, get_path
except ImportError:
from distutils.sysconfig import get_config_vars, get_python_lib
def get_path(name):
if name not in ('platlib', 'purelib'):
raise ValueError("Name must be purelib or platlib")
return get_python_lib(name=='platlib')
try:
# Python >=3.2
from tempfile import TemporaryDirectory
except ImportError:
import shutil
import tempfile
class TemporaryDirectory(object):
""""
Very simple temporary directory context manager.
Will try to delete afterward, but will also ignore OS and similar
errors on deletion.
"""
def __init__(self):
self.name = None # Handle mkdtemp raising an exception
self.name = tempfile.mkdtemp()
def __enter__(self):
return self.name
def __exit__(self, exctype, excvalue, exctrace):
try:
shutil.rmtree(self.name, True)
except OSError: #removal errors are not the only possible
pass
self.name = None
| gpl-2.0 |
haya14busa/alc-etm-searcher | nltk-3.0a3/nltk/parse/generate.py | 2 | 2078 | # -*- coding: utf-8 -*-
# Natural Language Toolkit: Generating from a CFG
#
# Copyright (C) 2001-2013 NLTK Project
# Author: Steven Bird <stevenbird1@gmail.com>
# Peter Ljunglöf <peter.ljunglof@heatherleaf.se>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
#
from __future__ import print_function
import itertools
import sys
from nltk.grammar import Nonterminal, parse_cfg
def generate(grammar, start=None, depth=None, n=None):
"""
Generates an iterator of all sentences from a CFG.
:param grammar: The Grammar used to generate sentences.
:param start: The Nonterminal from which to start generate sentences.
:param depth: The maximal depth of the generated tree.
:param n: The maximum number of sentences to return.
:return: An iterator of lists of terminal tokens.
"""
if not start:
start = grammar.start()
if depth is None:
depth = sys.maxsize
iter = _generate_all(grammar, [start], depth)
if n:
iter = itertools.islice(iter, n)
return iter
def _generate_all(grammar, items, depth):
if items:
for frag1 in _generate_one(grammar, items[0], depth):
for frag2 in _generate_all(grammar, items[1:], depth):
yield frag1 + frag2
else:
yield []
def _generate_one(grammar, item, depth):
if depth > 0:
if isinstance(item, Nonterminal):
for prod in grammar.productions(lhs=item):
for frag in _generate_all(grammar, prod.rhs(), depth-1):
yield frag
else:
yield [item]
demo_grammar = """
S -> NP VP
NP -> Det N
PP -> P NP
VP -> 'slept' | 'saw' NP | 'walked' PP
Det -> 'the' | 'a'
N -> 'man' | 'park' | 'dog'
P -> 'in' | 'with'
"""
def demo(N=23):
print('Generating the first %d sentences for demo grammar:' % (N,))
print(demo_grammar)
grammar = parse_cfg(demo_grammar)
for n, sent in enumerate(generate(grammar, n=N), 1):
print('%3d. %s' % (n, ' '.join(sent)))
if __name__ == '__main__':
demo()
| mit |
OmnesRes/pan_cancer | paper/figures/figure_1/bar_graphs/CESC.py | 1 | 1883 | ##script for creating a histogram
## Load necessary modules
import pylab as plt
import numpy as np
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'cox_regression','CESC','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
pvalues=map(float,pvalues)
##decide how man bins, 100 is the maximum possible due to only having two sig figs
number=100.0
counts={}
##use a dictionary to populate the bins
for i in range(int(number)):
for j in pvalues:
if i/number<j<=(i+1)/number:
counts[i]=counts.get(i,0)+1
##convert the dictionary to a list
mylist=zip(counts.keys(),counts.values())
##sort the list so that the bins are in order
mylist.sort()
##plot the data with pylab
fig = plt.figure()
ax = fig.add_subplot(111)
fig.subplots_adjust(bottom=.2)
ax.bar([i[0]/number for i in mylist],[i[1] for i in mylist],color='b',width=1/number,linewidth=2.0)
ax.set_xlim((0,1))
for item in ax.get_yticklabels():
item.set_fontsize(30)
for item in ax.get_xticklabels():
item.set_fontsize(30)
ax.tick_params(axis='x',length=15,width=3,direction='out',labelsize=30)
ax.tick_params(axis='y',length=15,width=3,direction='out',labelsize=30)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_linewidth(3)
ax.spines['bottom'].set_linewidth(3)
ax.spines['bottom'].set_position(['outward',10])
ax.spines['left'].set_position(['outward',10])
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_xticks([i/10.0 for i in range(0,11)])
ax.set_xticklabels(['0']+[str(i/10.0) for i in range(1,11)])
ax.set_ylabel('Frequency',fontsize=60,labelpad=20)
ax.set_xlabel('Raw Cox P-value',fontsize=60,labelpad=20)
plt.show()
| mit |
jbzdarkid/HearthstonePro | Cards.py | 1 | 13333 | '''
Special:
"Anub'ar Ambusher"
"Blood Warriors"
"Burgly Bully"
"Captain's Parrot"
"Chromaggus"
"Echo of Mediv"
"Ethereal Peddler"
"Flame Leviathan"
"Getaway Kodo"
"Gnomish Experimenter"
"Headcrack"
"Holy Wrath"
"Ivory Knight"
"Kazakus"
"King's Elekk"
"Krul the Unshackled"
"Lock and Load"
"Lorewalker Cho"
"Sea Reaver"
"Shadowfiend"
"Small-Time Recruits"
"Thistle Tea"
"Tinkertown Technician"
"Trade Prince Gallywix"
"Vanish"
"Wilfred Fizzlebang"
"Wrathion"
'''
# Deathrattle: "Voidcaller", "The Skeleton Knight"
# Discard: "Succubus", "Darkshire Librarian", "Astral Communion", "Dark Bargain", "Deathwing"
# Buff: "Smuggler's Crate", "Hidden Cache", "Trogg Beastrager", "Grimscale Chum", "Grimestreet Outfitter", "Grimestreet Enforcer", "Grimestreet Gadgeteer", "Stolen Goods", "Grimestreet Pawnbroker", "Brass Knuckles", "Hobart Grapplehammer", "Grimestreet Smuggler", "Don Han'Cho"
# Within this file, I've separated out names of cards in "double quotes", so that I can search for them via splitter.py.
# It also means there won't be any \'s in card names.
import logging
import Hand, Utilities, Legendaries
# When a card hits the board, and we can see what its name is
def play2(entity):
if entity['player'] == Utilities.them:
if entity['name'] in ['Armor Up!', 'Ballista Shot', 'Dagger Mastery', 'DIE, INSECT!', 'Dire Shapeshift', 'INFERNO!', 'Life Tap', 'Poisoned Daggers', 'Reinforce', 'Shapeshift', 'Soul Tap', 'Steady Shot', 'Tank Up!', 'The Silver Hand', 'The Tidal Hand', 'Totemic Call', 'Totemic Slam']:
logging.info('Opponent uses their hero power')
else:
logging.info('Opponent plays %s' % entity['name'])
if entity['name'] in ["Crackle", "Dunemaul Shaman", "Finders Keepers", "Fireguard Destroyer", "Jinyu Waterspeaker", "Lightning Bolt", "Siltfin Spiritwalker", "Stormforged Axe", "Stormcrack", "Totem Golem"]:
Utilities.overload += 1
elif entity['name'] in ["Ancestral Knowledge", "Doomhammer", "Dust Devil", "Feral Spirit", "Flamewreathed Faceless", "Forked Lightning", "Lava Burst", "Lightning Storm"]:
Utilities.overload += 2
elif entity['name'] in ["Earth Elemental", "Neptulon"]:
Utilities.overload += 3
elif entity['name'] in ["Elemental Destruction"]:
Utilities.overload += 5
elif entity['name'] in ["Eternal Sentinel", "Lava Shock"]:
Utilities.overload = 0
elif entity['name'] in ["Astral Communion", "Dark Bargain", "Darkshire Librarian", "Deathwing", "Doomguard", "Soulfire", "Succubus"]:
global showentity
showentity = discard
elif entity['name'] == "Varian Wrynn":
Legendaries.varianWrynn = True
elif entity['name'] == "A Light in the Darkness":
Hand.draw(source='random', kind='minion', buff=+1)
elif entity['name'] == "Arch-Thief Rafaam":
Hand.draw(note='A powerful artifact', kind='spell')
elif entity['name'] == "Babbling Book":
Hand.draw(source='random', hero='mage', kind='spell')
elif entity['name'] == "Burgle":
Hand.draw(source='random', hero=Utilities.our_hero)
Hand.draw(source='random', hero=Utilities.our_hero)
elif entity['name'] == "Cabalist's Tomb":
Hand.draw(source='random', hero='mage', kind='spell')
Hand.draw(source='random', hero='mage', kind='spell')
Hand.draw(source='random', hero='mage', kind='spell')
elif entity['name'] == "Dark Peddler":
Hand.draw(source='discovered', note='A 1-cost card')
elif entity['name'] == "Ethereal Conjurer":
Hand.draw(source='discovered', hero='mage', kind='spell')
elif entity['name'] == "Finders Keepers":
Hand.draw(source='discovered', hero='shaman', note='A card with overload')
elif entity['name'] == "Gorillabot A-3":
Hand.draw(source='discovered', kind='mech minion')
elif entity['name'] == "Grand Crusader":
Hand.draw(source='random', hero='paladin')
elif entity['name'] == "Grimestreet Informant":
Hand.draw(source='discovered', hero='hunter, paladin, or warrior')
elif entity['name'] == "I Know a Guy":
Hand.draw(source='discovered', kind='taunt minion')
elif entity['name'] == "Jeweled Scarab":
Hand.draw(source='discovered', note='A 3-cost card')
elif entity['name'] == "Journey Below":
Hand.draw(source='discovered', note='A deathrattle card')
elif entity['name'] == "Kabal Chemist":
Hand.draw(source='random', kind='potion spell')
elif entity['name'] == "Kabal Courier":
Hand.draw(source='discovered', hero='mage, priest, or warlock')
elif entity['name'] == "Lotus Agents":
Hand.draw(source='discovered', hero='druid, rogue, or shaman')
elif entity['name'] == "Mind Vision":
Hand.draw(note='A card from your hand')
elif entity['name'] == "Mukla, Tyrant of the Vale":
Hand.draw(note='Banana', kind='spell')
Hand.draw(note='Banana', kind='spell')
elif entity['name'] == "Museum Curator":
# I'm ignoring "Tentacles For Arms" because it's bad
Hand.draw(source='discovered', note='A deathrattle card', kind='minion')
elif entity['name'] == "Nefarian":
Hand.draw(source='random', hero=Utilities.our_hero)
Hand.draw(source='random', hero=Utilities.our_hero)
elif entity['name'] == "Neptulon":
Hand.draw(source='random', kind='murloc minion')
Hand.draw(source='random', kind='murloc minion')
Hand.draw(source='random', kind='murloc minion')
Hand.draw(source='random', kind='murloc minion')
elif entity['name'] == "Raven Idol":
Hand.draw(source='discovered', kind='minion or spell')
elif entity['name'] == "Sense Demons":
Hand.draw(kind='demon minion')
Hand.draw(kind='demon minion')
elif entity['name'] == "Swashburglar":
Hand.draw(source='random', hero=Utilities.our_hero)
elif entity['name'] == "Thoughtsteal":
Hand.draw(note='A random card from your deck')
Hand.draw(note='A random card from your deck')
elif entity['name'] == "Tomb Spider":
Hand.draw(source='discovered', kind='beast minion')
elif entity['name'] == "Toshley":
Hand.draw(note='Spare Part', kind='spell')
elif entity['name'] == "Unstable Portal":
Hand.draw(source='random', kind='minion', cost=-3)
elif entity['name'] == "Wild Growth":
if Utilities.resources == '10':
Hand.draw(note='Excess Mana', hero='druid', kind='spell')
elif entity['name'] == "Xaril, Poisoned Mind":
Hand.draw(source='random', kind='toxin spell')
elif entity['name'] == "Call Pet":
Hand.notes.append('If it\'s a beast, cost -4')
elif entity['name'] == "Far Sight":
Hand.notes.append('Costs (3) less')
elif entity['player'] == Utilities.us:
if entity['name'] == "King Mukla":
Hand.draw(kind='Banana')
Hand.draw(kind='Banana')
elif entity['name'] == "Mulch":
Hand.draw(source='random', kind='minion')
# if entity['player'] in [Utilities.us, Utilities.them]:
if entity['name'] == "Elite Tauren Chieftain":
Hand.draw(kind='Power Chord spell')
elif entity['name'] == "Lord Jaraxxus":
Utilities.set_hero(entity)
elif entity['name'] == "Spellslinger":
Hand.draw(source='random', kind='spell')
# When a card hits the board and we can see what its name and its target's name is.
def play3(entity, target):
if entity['player'] == Utilities.them:
if entity['name'] in ['Fireblast', 'Fireblast Rank 2', 'Lesser Heal', 'Lightning Jolt', 'Mind Shatter', 'Mind Spike', 'Heal']:
logging.info('Opponent uses their hero power, targetting %s' % target['name'])
else:
logging.info('Opponent plays %s targetting %s' % (entity['name'], target['name']))
if entity['name'] == "Soulfire":
global showentity
showentity = discard
if entity['name'] in ["Ancient Brewmaster", "Convert", "Gadgetzan Ferryman", "Time Rewinder", "Youthful Brewmaster"]:
Hand.draw(note=target['name'], kind='minion')
elif entity['name'] in ["Bloodthistle Toxin", "Shadowstep"]:
Hand.draw(note=target['name'], kind='minion', cost=-2)
elif entity['name'] == "Convert":
Hand.draw(note=target['name'], kind='minion')
elif entity['name'] == "Shadowcaster":
Hand.draw(note='A 1/1 copy of %s which costs (1)' % target['name'], kind='minion')
elif entity['player'] == Utilities.us:
if entity['name'] == "Freezing Trap":
Hand.draw(note=target['name'], kind='minion', cost=+2)
elif entity['name'] == "Sap":
Hand.draw(note=target['name'], kind='minion')
if target['player'] == Utilities.them:
if entity['name'] in ["Dream", "Kindapper"]:
Hand.draw(note=target['name'], kind='minion')
def die(entity):
if entity['player'] == Utilities.them:
logging.info('Opponent\'s %s dies' % entity['name'])
if entity['name'] == "Anub'arak":
Hand.draw(note='Anub\'arak')
elif entity['name'] == "Clockwork Gnome":
Hand.draw(note='Spare Part', kind='spell')
elif entity['name'] == "Deadly Fork":
Hand.draw(note='Sharp Fork', kind='weapon')
elif entity['name'] == "Rhonin":
Hand.draw(note='Arcane Missles', hero='mage', kind='spell')
Hand.draw(note='Arcane Missles', hero='mage', kind='spell')
Hand.draw(note='Arcane Missles', hero='mage', kind='spell')
elif entity['name'] == "Shifting Shade":
Hand.draw(note='A card from your deck')
elif entity['name'] == "Tentacles for Arms":
Hand.draw(note='Tentacles for Arms')
elif entity['name'] == "Tomb Pillager":
Hand.draw(note='The Coin', kind='spell')
elif entity['name'] == "Toshley":
Hand.draw(note='Spare Part', kind='spell')
elif entity['name'] == "Undercity Huckster":
Hand.draw(source='random', hero=Utilities.our_hero)
elif entity['name'] == "Xaril, Poisoned Mind":
Hand.draw(source='random', kind='toxin spell')
elif entity['name'] == "Webspinner":
Hand.draw(source='random', kind='beast minion')
# if entity['player'] in [Utilities.us, Utilities.them]:
if entity['name'] == "Mechanical Yeti":
Hand.draw(note='Spare Part', kind='spell')
elif entity['name'] == "Majordomo Executus":
Utilities.set_hero(entity)
def die2(entity):
if entity['player'] == Utilities.them:
if entity['name'] == "Explorer's Hat":
Hand.draw(note='Explorer\'s Hat', hero='Hunter', kind='spell')
elif entity['name'] == "Nerubian Spores": # "Infest"
Hand.draw(source='random', kind='beast minion')
# Be careful of Blessing of Wisdom (others?) which can 'trigger' an effect on a card that already has a triggered effect.
def trigger(entity):
if entity['player'] == Utilities.them:
logging.info('Opponent\'s %s triggers' % entity['name'])
if entity['name'] == "Alarm-o-Bot":
Hand.draw(note='Alarm-o-Bot', kind='minion')
elif entity['name'] == "Archmage Antonidas":
Hand.draw(note='Fireball', hero='mage', kind='spell')
elif entity['name'] == "Colliseum Manager":
Hand.draw(note='Colliseum Manager', kind='minion')
elif entity['name'] == "Cutpurse":
Hand.draw(note='The Coin', kind='spell')
elif entity['name'] == "Emperor Thaurissan":
for card in Hand.hand:
card.cost -= 1
elif entity['name'] == "Gazlowe":
Hand.draw(source='random', kind='mech minion')
elif entity['name'] == "Kabal Trafficker":
Hand.draw(source='random', kind='demon minion')
elif entity['name'] == "Mech-Bear-Cat":
Hand.draw(note='Spare Part', kind='spell')
elif entity['name'] == "Nexus-Champion Saraad":
Hand.draw(source='random', kind='spell')
elif entity['name'] == "Recruiter":
Hand.draw(note='Squire', kind='minion')
elif entity['name'] == "Shaku, the Collector":
Hand.draw(source='random', hero=Utilities.our_hero)
elif entity['name'] == "Ysera":
Hand.draw(note='A Dream card', kind='spell')
# Show Entity blocks are used for a number of things. Here, this is used for
# getting the hand position of discarded cards, and determining cards drawn for
# King's Elekk Joust victories.
def blockEnd():
global showentity
def showentity(data):
pass
blockEnd()
def discard(data):
logging.info('Opponent discards %s' % data['CardID'])
Hand.hand.pop(int(data['Entity']['zonePos'])-1)
def turnover():
if Utilities.overload != 0:
logging.info('Overload next turn: %d' % Utilities.overload)
Utilities.overload = 0
| apache-2.0 |
lliendo/Radar | radar/network/monitor/select_monitor.py | 1 | 1307 | # -*- coding: utf-8 -*-
"""
This file is part of Radar.
Radar is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Radar is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Lesser GNU General Public License for more details.
You should have received a copy of the Lesser GNU General Public License
along with Radar. If not, see <http://www.gnu.org/licenses/>.
Copyright 2015 Lucas Liendo.
"""
from . import NetworkMonitor, NetworkMonitorError
class SelectMonitor(NetworkMonitor):
def __new__(cls, *args, **kwargs):
try:
global select
from select import select
except ImportError:
raise NetworkMonitorError(cls.__name__)
return super(SelectMonitor, cls).__new__(cls, *args, **kwargs)
def watch(self):
sockets = [self._server.socket] + [c.socket for c in self._server._clients]
ready_fds, _, _ = select([s.fileno() for s in sockets], [], [], self._timeout)
super(SelectMonitor, self)._watch(ready_fds)
| lgpl-3.0 |
WoLpH/dropbox | dropbox/util.py | 1 | 1940 | import os
class AnalyzeFileObjBug(Exception):
msg = ("\n"
"Expected file object to have %d bytes, instead we read %d bytes.\n"
"File size detection may have failed (see dropbox.util.AnalyzeFileObj)\n")
def __init__(self, expected, actual):
self.expected = expected
self.actual = actual
def __str__(self):
return self.msg % (self.expected, self.actual)
def analyze_file_obj(obj):
''' Get the size and contents of a file-like object.
Returns: (size, raw_data)
size: The amount of data waiting to be read
raw_data: If not None, the entire contents of the stream (as a string).
None if the stream should be read() in chunks.
'''
pos = 0
if hasattr(obj, 'tell'):
pos = obj.tell()
# Handle cStringIO and StringIO
if hasattr(obj, 'getvalue'):
# Why using getvalue() makes sense:
# For StringIO, this string is pre-computed anyway by read().
# For cStringIO, getvalue() is the only way
# to determine the length without read()'ing the whole thing.
raw_data = obj.getvalue()
if pos == 0:
return (len(raw_data), raw_data)
else:
# We could return raw_data[pos:], but that could drastically
# increase memory usage. Better to read it block at a time.
size = max(0, len(raw_data) - pos)
return (size, None)
# Handle real files
if hasattr(obj, 'fileno'):
size = max(0, os.fstat(obj.fileno()).st_size - pos)
return (size, None)
# User-defined object with len()
if hasattr(obj, '__len__'):
size = max(0, len(obj) - pos)
return (size, None)
# We don't know what kind of stream this is.
# To determine the size, we must read the whole thing.
raw_data = obj.read()
return (len(raw_data), raw_data)
| mit |
sbaks0820/bitcoin | contrib/zmq/zmq_sub3.4.py | 12 | 3275 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
ZMQ example using python3's asyncio
Bitcoin should be started with the command line arguments:
bitcoind -testnet -daemon \
-zmqpubhashblock=tcp://127.0.0.1:28332 \
-zmqpubrawtx=tcp://127.0.0.1:28332 \
-zmqpubhashtx=tcp://127.0.0.1:28332 \
-zmqpubhashblock=tcp://127.0.0.1:28332
We use the asyncio library here. `self.handle()` installs itself as a
future at the end of the function. Since it never returns with the event
loop having an empty stack of futures, this creates an infinite loop. An
alternative is to wrap the contents of `handle` inside `while True`.
The `@asyncio.coroutine` decorator and the `yield from` syntax found here
was introduced in python 3.4 and has been deprecated in favor of the `async`
and `await` keywords respectively.
A blocking example using python 2.7 can be obtained from the git history:
https://github.com/bitcoin/bitcoin/blob/37a7fe9e440b83e2364d5498931253937abe9294/contrib/zmq/zmq_sub.py
"""
import binascii
import asyncio
import zmq
import zmq.asyncio
import signal
import struct
import sys
if not (sys.version_info.major >= 3 and sys.version_info.minor >= 4):
print("This example only works with Python 3.4 and greater")
exit(1)
port = 28332
class ZMQHandler():
def __init__(self):
self.loop = zmq.asyncio.install()
self.zmqContext = zmq.asyncio.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtx")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtx")
self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % port)
@asyncio.coroutine
def handle(self) :
msg = yield from self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
sequence = "Unknown";
if len(msg[-1]) == 4:
msgSequence = struct.unpack('<I', msg[-1])[-1]
sequence = str(msgSequence)
if topic == b"hashblock":
print('- HASH BLOCK ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"hashtx":
print('- HASH TX ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"rawblock":
print('- RAW BLOCK HEADER ('+sequence+') -')
print(binascii.hexlify(body[:80]))
elif topic == b"rawtx":
print('- RAW TX ('+sequence+') -')
print(binascii.hexlify(body))
# schedule ourselves to receive the next message
asyncio.ensure_future(self.handle())
def start(self):
self.loop.add_signal_handler(signal.SIGINT, self.stop)
self.loop.create_task(self.handle())
self.loop.run_forever()
def stop(self):
self.loop.stop()
self.zmqContext.destroy()
daemon = ZMQHandler()
daemon.start()
| mit |
saurvs/servo | tests/wpt/web-platform-tests/tools/py/py/_builtin.py | 259 | 6521 | import sys
try:
reversed = reversed
except NameError:
def reversed(sequence):
"""reversed(sequence) -> reverse iterator over values of the sequence
Return a reverse iterator
"""
if hasattr(sequence, '__reversed__'):
return sequence.__reversed__()
if not hasattr(sequence, '__getitem__'):
raise TypeError("argument to reversed() must be a sequence")
return reversed_iterator(sequence)
class reversed_iterator(object):
def __init__(self, seq):
self.seq = seq
self.remaining = len(seq)
def __iter__(self):
return self
def next(self):
i = self.remaining
if i > 0:
i -= 1
item = self.seq[i]
self.remaining = i
return item
raise StopIteration
def __length_hint__(self):
return self.remaining
try:
any = any
except NameError:
def any(iterable):
for x in iterable:
if x:
return True
return False
try:
all = all
except NameError:
def all(iterable):
for x in iterable:
if not x:
return False
return True
try:
sorted = sorted
except NameError:
builtin_cmp = cmp # need to use cmp as keyword arg
def sorted(iterable, cmp=None, key=None, reverse=0):
use_cmp = None
if key is not None:
if cmp is None:
def use_cmp(x, y):
return builtin_cmp(x[0], y[0])
else:
def use_cmp(x, y):
return cmp(x[0], y[0])
l = [(key(element), element) for element in iterable]
else:
if cmp is not None:
use_cmp = cmp
l = list(iterable)
if use_cmp is not None:
l.sort(use_cmp)
else:
l.sort()
if reverse:
l.reverse()
if key is not None:
return [element for (_, element) in l]
return l
try:
set, frozenset = set, frozenset
except NameError:
from sets import set, frozenset
# pass through
enumerate = enumerate
try:
BaseException = BaseException
except NameError:
BaseException = Exception
try:
GeneratorExit = GeneratorExit
except NameError:
class GeneratorExit(Exception):
""" This exception is never raised, it is there to make it possible to
write code compatible with CPython 2.5 even in lower CPython
versions."""
pass
GeneratorExit.__module__ = 'exceptions'
_sysex = (KeyboardInterrupt, SystemExit, MemoryError, GeneratorExit)
try:
callable = callable
except NameError:
def callable(obj):
return hasattr(obj, "__call__")
if sys.version_info >= (3, 0):
exec ("print_ = print ; exec_=exec")
import builtins
# some backward compatibility helpers
_basestring = str
def _totext(obj, encoding=None, errors=None):
if isinstance(obj, bytes):
if errors is None:
obj = obj.decode(encoding)
else:
obj = obj.decode(encoding, errors)
elif not isinstance(obj, str):
obj = str(obj)
return obj
def _isbytes(x):
return isinstance(x, bytes)
def _istext(x):
return isinstance(x, str)
text = str
bytes = bytes
def _getimself(function):
return getattr(function, '__self__', None)
def _getfuncdict(function):
return getattr(function, "__dict__", None)
def _getcode(function):
return getattr(function, "__code__", None)
def execfile(fn, globs=None, locs=None):
if globs is None:
back = sys._getframe(1)
globs = back.f_globals
locs = back.f_locals
del back
elif locs is None:
locs = globs
fp = open(fn, "r")
try:
source = fp.read()
finally:
fp.close()
co = compile(source, fn, "exec", dont_inherit=True)
exec_(co, globs, locs)
else:
import __builtin__ as builtins
_totext = unicode
_basestring = basestring
text = unicode
bytes = str
execfile = execfile
callable = callable
def _isbytes(x):
return isinstance(x, str)
def _istext(x):
return isinstance(x, unicode)
def _getimself(function):
return getattr(function, 'im_self', None)
def _getfuncdict(function):
return getattr(function, "__dict__", None)
def _getcode(function):
try:
return getattr(function, "__code__")
except AttributeError:
return getattr(function, "func_code", None)
def print_(*args, **kwargs):
""" minimal backport of py3k print statement. """
sep = ' '
if 'sep' in kwargs:
sep = kwargs.pop('sep')
end = '\n'
if 'end' in kwargs:
end = kwargs.pop('end')
file = 'file' in kwargs and kwargs.pop('file') or sys.stdout
if kwargs:
args = ", ".join([str(x) for x in kwargs])
raise TypeError("invalid keyword arguments: %s" % args)
at_start = True
for x in args:
if not at_start:
file.write(sep)
file.write(str(x))
at_start = False
file.write(end)
def exec_(obj, globals=None, locals=None):
""" minimal backport of py3k exec statement. """
__tracebackhide__ = True
if globals is None:
frame = sys._getframe(1)
globals = frame.f_globals
if locals is None:
locals = frame.f_locals
elif locals is None:
locals = globals
exec2(obj, globals, locals)
if sys.version_info >= (3, 0):
def _reraise(cls, val, tb):
__tracebackhide__ = True
assert hasattr(val, '__traceback__')
raise cls.with_traceback(val, tb)
else:
exec ("""
def _reraise(cls, val, tb):
__tracebackhide__ = True
raise cls, val, tb
def exec2(obj, globals, locals):
__tracebackhide__ = True
exec obj in globals, locals
""")
def _tryimport(*names):
""" return the first successfully imported module. """
assert names
for name in names:
try:
__import__(name)
except ImportError:
excinfo = sys.exc_info()
else:
return sys.modules[name]
_reraise(*excinfo)
| mpl-2.0 |
kawie/pybikes | pybikes/keolis.py | 3 | 5671 | # -*- coding: utf-8 -*-
# Copyright (C) 2010-2012, eskerda <eskerda@gmail.com>
# Distributed under the AGPL license, see LICENSE.txt
import re
import json
from lxml import etree
import lxml.html
from .base import BikeShareSystem, BikeShareStation
from . import utils
__all__ = ['Keolis', 'KeolisStation', 'Keolis_v2', 'KeolisStation_v2']
xml_parser = etree.XMLParser(recover = True)
_re_float = "([+-]?\\d*\\.\\d+)(?![-+0-9\\.])"
class Keolis(BikeShareSystem):
sync = True
meta = {
'system': 'Keolis',
'company': 'Keolis'
}
_re_fuzzle = '\"latitude\"\:\ \"{0}\"\,\ '\
'\"longitude\"\:\ \"{0}\"\,\ '\
'\"text\"\:\ \"(.*?)\"\,\ '\
'\"markername'.format(_re_float)
_re_num_name = "\#(\d+)\ \-\ (.*)" # #10 - Place Lyautey
def __init__(self, tag, meta, feed_url):
super(Keolis, self).__init__(tag, meta)
self.feed_url = feed_url
def update(self, scraper = None):
if scraper is None:
scraper = utils.PyBikesScraper()
raw_fuzzle = scraper.request(self.feed_url)
data = re.findall(Keolis._re_fuzzle, raw_fuzzle)
self.stations = map(KeolisStation, data)
class KeolisStation(BikeShareStation):
def __init__(self, data):
"""
fuzzle is something like
Must be utf8 encoded and string-escaped
<div class="gmap-popup">
<div class="gmap-infobulle">
<div class="gmap-titre">#16 - Universite Sud</div>
<div class="gmap-adresse">
AVENUE DE L'UNIVERSITE FACE A L'ENTREE DE L'UNIVERSITE
</div>
<div class="gmap-velos">
<table>
<tr>
<td class="ok">
<strong>9</strong> vélos disponibles
</td>
<td class="ok">
<strong>17</strong> places disponibles
</td>
<td>
<acronym title="Carte Bancaire">CB</acronym>
</td>
</tr>
</table>
</div>
<div class="gmap-datemaj">
dernière mise à jour il y a <strong>00 min</strong>
</div>
</div>
</div>
"""
super(KeolisStation, self).__init__(0)
fuzzle = lxml.html.fromstring(
data[2].encode('utf8').decode('string-escape')
)
num_name = re.findall(
Keolis._re_num_name,
fuzzle.xpath('//div[@class="gmap-titre"]/text()')[0]
)[0]
bikes_places_upd = fuzzle.xpath('//strong/text()')
# Will not use
# address = fuzzle.xpath('//div[@class="gmap-adresse"]/text()')[0]
self.latitude = float(data[0])
self.longitude = float(data[1])
self.name = num_name[1]
self.extra = {
'uid': int(num_name[0])
}
if len(bikes_places_upd) > 1:
self.bikes = int(bikes_places_upd[0])
self.free = int(bikes_places_upd[1])
self.extra['status'] = 'online'
else:
self.bikes = 0
self.free = 0
self.extra['status'] = 'offline'
class Keolis_v2(BikeShareSystem):
sync = False
meta = {
'system': 'Keolis',
'company': 'Keolis'
}
_list_url = '/stations/xml-stations.aspx'
_station_url = '/stations/xml-station.aspx?borne={id}'
def __init__(self, tag, feed_url, meta):
super(Keolis_v2, self).__init__(tag, meta)
self.feed_url = feed_url + self._list_url
self.station_url = feed_url + self._station_url
def update(self, scraper = None):
if scraper is None:
scraper = utils.PyBikesScraper()
raw_list = scraper.request(self.feed_url).encode('utf-16')
xml_list = etree.fromstring(raw_list, xml_parser)
stations = []
for index, marker in enumerate(xml_list.iter('marker')):
station = KeolisStation_v2(index, marker, self.station_url)
stations.append(station)
self.stations = stations
class KeolisStation_v2(BikeShareStation):
def __init__(self, index, marker, station_url):
super(KeolisStation_v2, self).__init__(index)
self.name = marker.get('name')
self.latitude = float(marker.get('lat'))
self.longitude = float(marker.get('lng'))
self.extra = {
'uid': int(marker.get('id'))
}
self._station_url = station_url.format(id = self.extra['uid'])
def update(self, scraper = None):
if scraper is None:
scraper = utils.PyBikesScraper()
raw_status = scraper.request(self._station_url).encode('utf-16')
xml_status = etree.fromstring(raw_status, xml_parser)
self.bikes = int(xml_status.find('bikes').text)
self.free = int(xml_status.find('attachs').text)
self.extra['address'] = xml_status.find('adress').text.title()
# TODO: Try to standarize these fields
# 0 means online, 1 means temporarily unavailable
# are there more status?
self.extra['status'] = xml_status.find('status').text
# payment: AVEC_TPE | SANS_TPE
# as in, accepts bank cards or not
self.extra['payment'] = xml_status.find('paiement').text
# Update time as in 47 seconds ago: '47 secondes'
self.extra['lastupd'] = xml_status.find('lastupd').text
| lgpl-3.0 |
seann1/portfolio5 | .meteor/dev_bundle/python/Lib/test/test_wsgiref.py | 25 | 19750 | from __future__ import nested_scopes # Backward compat for 2.1
from unittest import TestCase
from wsgiref.util import setup_testing_defaults
from wsgiref.headers import Headers
from wsgiref.handlers import BaseHandler, BaseCGIHandler
from wsgiref import util
from wsgiref.validate import validator
from wsgiref.simple_server import WSGIServer, WSGIRequestHandler, demo_app
from wsgiref.simple_server import make_server
from StringIO import StringIO
from SocketServer import BaseServer
import os
import re
import sys
from test import test_support
class MockServer(WSGIServer):
"""Non-socket HTTP server"""
def __init__(self, server_address, RequestHandlerClass):
BaseServer.__init__(self, server_address, RequestHandlerClass)
self.server_bind()
def server_bind(self):
host, port = self.server_address
self.server_name = host
self.server_port = port
self.setup_environ()
class MockHandler(WSGIRequestHandler):
"""Non-socket HTTP handler"""
def setup(self):
self.connection = self.request
self.rfile, self.wfile = self.connection
def finish(self):
pass
def hello_app(environ,start_response):
start_response("200 OK", [
('Content-Type','text/plain'),
('Date','Mon, 05 Jun 2006 18:49:54 GMT')
])
return ["Hello, world!"]
def run_amock(app=hello_app, data="GET / HTTP/1.0\n\n"):
server = make_server("", 80, app, MockServer, MockHandler)
inp, out, err, olderr = StringIO(data), StringIO(), StringIO(), sys.stderr
sys.stderr = err
try:
server.finish_request((inp,out), ("127.0.0.1",8888))
finally:
sys.stderr = olderr
return out.getvalue(), err.getvalue()
def compare_generic_iter(make_it,match):
"""Utility to compare a generic 2.1/2.2+ iterator with an iterable
If running under Python 2.2+, this tests the iterator using iter()/next(),
as well as __getitem__. 'make_it' must be a function returning a fresh
iterator to be tested (since this may test the iterator twice)."""
it = make_it()
n = 0
for item in match:
if not it[n]==item: raise AssertionError
n+=1
try:
it[n]
except IndexError:
pass
else:
raise AssertionError("Too many items from __getitem__",it)
try:
iter, StopIteration
except NameError:
pass
else:
# Only test iter mode under 2.2+
it = make_it()
if not iter(it) is it: raise AssertionError
for item in match:
if not it.next()==item: raise AssertionError
try:
it.next()
except StopIteration:
pass
else:
raise AssertionError("Too many items from .next()",it)
class IntegrationTests(TestCase):
def check_hello(self, out, has_length=True):
self.assertEqual(out,
"HTTP/1.0 200 OK\r\n"
"Server: WSGIServer/0.1 Python/"+sys.version.split()[0]+"\r\n"
"Content-Type: text/plain\r\n"
"Date: Mon, 05 Jun 2006 18:49:54 GMT\r\n" +
(has_length and "Content-Length: 13\r\n" or "") +
"\r\n"
"Hello, world!"
)
def test_plain_hello(self):
out, err = run_amock()
self.check_hello(out)
def test_request_length(self):
out, err = run_amock(data="GET " + ("x" * 65537) + " HTTP/1.0\n\n")
self.assertEqual(out.splitlines()[0],
"HTTP/1.0 414 Request-URI Too Long")
def test_validated_hello(self):
out, err = run_amock(validator(hello_app))
# the middleware doesn't support len(), so content-length isn't there
self.check_hello(out, has_length=False)
def test_simple_validation_error(self):
def bad_app(environ,start_response):
start_response("200 OK", ('Content-Type','text/plain'))
return ["Hello, world!"]
out, err = run_amock(validator(bad_app))
self.assertTrue(out.endswith(
"A server error occurred. Please contact the administrator."
))
self.assertEqual(
err.splitlines()[-2],
"AssertionError: Headers (('Content-Type', 'text/plain')) must"
" be of type list: <type 'tuple'>"
)
class UtilityTests(TestCase):
def checkShift(self,sn_in,pi_in,part,sn_out,pi_out):
env = {'SCRIPT_NAME':sn_in,'PATH_INFO':pi_in}
util.setup_testing_defaults(env)
self.assertEqual(util.shift_path_info(env),part)
self.assertEqual(env['PATH_INFO'],pi_out)
self.assertEqual(env['SCRIPT_NAME'],sn_out)
return env
def checkDefault(self, key, value, alt=None):
# Check defaulting when empty
env = {}
util.setup_testing_defaults(env)
if isinstance(value, StringIO):
self.assertIsInstance(env[key], StringIO)
else:
self.assertEqual(env[key], value)
# Check existing value
env = {key:alt}
util.setup_testing_defaults(env)
self.assertIs(env[key], alt)
def checkCrossDefault(self,key,value,**kw):
util.setup_testing_defaults(kw)
self.assertEqual(kw[key],value)
def checkAppURI(self,uri,**kw):
util.setup_testing_defaults(kw)
self.assertEqual(util.application_uri(kw),uri)
def checkReqURI(self,uri,query=1,**kw):
util.setup_testing_defaults(kw)
self.assertEqual(util.request_uri(kw,query),uri)
def checkFW(self,text,size,match):
def make_it(text=text,size=size):
return util.FileWrapper(StringIO(text),size)
compare_generic_iter(make_it,match)
it = make_it()
self.assertFalse(it.filelike.closed)
for item in it:
pass
self.assertFalse(it.filelike.closed)
it.close()
self.assertTrue(it.filelike.closed)
def testSimpleShifts(self):
self.checkShift('','/', '', '/', '')
self.checkShift('','/x', 'x', '/x', '')
self.checkShift('/','', None, '/', '')
self.checkShift('/a','/x/y', 'x', '/a/x', '/y')
self.checkShift('/a','/x/', 'x', '/a/x', '/')
def testNormalizedShifts(self):
self.checkShift('/a/b', '/../y', '..', '/a', '/y')
self.checkShift('', '/../y', '..', '', '/y')
self.checkShift('/a/b', '//y', 'y', '/a/b/y', '')
self.checkShift('/a/b', '//y/', 'y', '/a/b/y', '/')
self.checkShift('/a/b', '/./y', 'y', '/a/b/y', '')
self.checkShift('/a/b', '/./y/', 'y', '/a/b/y', '/')
self.checkShift('/a/b', '///./..//y/.//', '..', '/a', '/y/')
self.checkShift('/a/b', '///', '', '/a/b/', '')
self.checkShift('/a/b', '/.//', '', '/a/b/', '')
self.checkShift('/a/b', '/x//', 'x', '/a/b/x', '/')
self.checkShift('/a/b', '/.', None, '/a/b', '')
def testDefaults(self):
for key, value in [
('SERVER_NAME','127.0.0.1'),
('SERVER_PORT', '80'),
('SERVER_PROTOCOL','HTTP/1.0'),
('HTTP_HOST','127.0.0.1'),
('REQUEST_METHOD','GET'),
('SCRIPT_NAME',''),
('PATH_INFO','/'),
('wsgi.version', (1,0)),
('wsgi.run_once', 0),
('wsgi.multithread', 0),
('wsgi.multiprocess', 0),
('wsgi.input', StringIO("")),
('wsgi.errors', StringIO()),
('wsgi.url_scheme','http'),
]:
self.checkDefault(key,value)
def testCrossDefaults(self):
self.checkCrossDefault('HTTP_HOST',"foo.bar",SERVER_NAME="foo.bar")
self.checkCrossDefault('wsgi.url_scheme',"https",HTTPS="on")
self.checkCrossDefault('wsgi.url_scheme',"https",HTTPS="1")
self.checkCrossDefault('wsgi.url_scheme',"https",HTTPS="yes")
self.checkCrossDefault('wsgi.url_scheme',"http",HTTPS="foo")
self.checkCrossDefault('SERVER_PORT',"80",HTTPS="foo")
self.checkCrossDefault('SERVER_PORT',"443",HTTPS="on")
def testGuessScheme(self):
self.assertEqual(util.guess_scheme({}), "http")
self.assertEqual(util.guess_scheme({'HTTPS':"foo"}), "http")
self.assertEqual(util.guess_scheme({'HTTPS':"on"}), "https")
self.assertEqual(util.guess_scheme({'HTTPS':"yes"}), "https")
self.assertEqual(util.guess_scheme({'HTTPS':"1"}), "https")
def testAppURIs(self):
self.checkAppURI("http://127.0.0.1/")
self.checkAppURI("http://127.0.0.1/spam", SCRIPT_NAME="/spam")
self.checkAppURI("http://127.0.0.1/sp%E4m", SCRIPT_NAME="/sp\xe4m")
self.checkAppURI("http://spam.example.com:2071/",
HTTP_HOST="spam.example.com:2071", SERVER_PORT="2071")
self.checkAppURI("http://spam.example.com/",
SERVER_NAME="spam.example.com")
self.checkAppURI("http://127.0.0.1/",
HTTP_HOST="127.0.0.1", SERVER_NAME="spam.example.com")
self.checkAppURI("https://127.0.0.1/", HTTPS="on")
self.checkAppURI("http://127.0.0.1:8000/", SERVER_PORT="8000",
HTTP_HOST=None)
def testReqURIs(self):
self.checkReqURI("http://127.0.0.1/")
self.checkReqURI("http://127.0.0.1/spam", SCRIPT_NAME="/spam")
self.checkReqURI("http://127.0.0.1/sp%E4m", SCRIPT_NAME="/sp\xe4m")
self.checkReqURI("http://127.0.0.1/spammity/spam",
SCRIPT_NAME="/spammity", PATH_INFO="/spam")
self.checkReqURI("http://127.0.0.1/spammity/sp%E4m",
SCRIPT_NAME="/spammity", PATH_INFO="/sp\xe4m")
self.checkReqURI("http://127.0.0.1/spammity/spam;ham",
SCRIPT_NAME="/spammity", PATH_INFO="/spam;ham")
self.checkReqURI("http://127.0.0.1/spammity/spam;cookie=1234,5678",
SCRIPT_NAME="/spammity", PATH_INFO="/spam;cookie=1234,5678")
self.checkReqURI("http://127.0.0.1/spammity/spam?say=ni",
SCRIPT_NAME="/spammity", PATH_INFO="/spam",QUERY_STRING="say=ni")
self.checkReqURI("http://127.0.0.1/spammity/spam?s%E4y=ni",
SCRIPT_NAME="/spammity", PATH_INFO="/spam",QUERY_STRING="s%E4y=ni")
self.checkReqURI("http://127.0.0.1/spammity/spam", 0,
SCRIPT_NAME="/spammity", PATH_INFO="/spam",QUERY_STRING="say=ni")
def testFileWrapper(self):
self.checkFW("xyz"*50, 120, ["xyz"*40,"xyz"*10])
def testHopByHop(self):
for hop in (
"Connection Keep-Alive Proxy-Authenticate Proxy-Authorization "
"TE Trailers Transfer-Encoding Upgrade"
).split():
for alt in hop, hop.title(), hop.upper(), hop.lower():
self.assertTrue(util.is_hop_by_hop(alt))
# Not comprehensive, just a few random header names
for hop in (
"Accept Cache-Control Date Pragma Trailer Via Warning"
).split():
for alt in hop, hop.title(), hop.upper(), hop.lower():
self.assertFalse(util.is_hop_by_hop(alt))
class HeaderTests(TestCase):
def testMappingInterface(self):
test = [('x','y')]
self.assertEqual(len(Headers([])),0)
self.assertEqual(len(Headers(test[:])),1)
self.assertEqual(Headers(test[:]).keys(), ['x'])
self.assertEqual(Headers(test[:]).values(), ['y'])
self.assertEqual(Headers(test[:]).items(), test)
self.assertIsNot(Headers(test).items(), test) # must be copy!
h=Headers([])
del h['foo'] # should not raise an error
h['Foo'] = 'bar'
for m in h.has_key, h.__contains__, h.get, h.get_all, h.__getitem__:
self.assertTrue(m('foo'))
self.assertTrue(m('Foo'))
self.assertTrue(m('FOO'))
self.assertFalse(m('bar'))
self.assertEqual(h['foo'],'bar')
h['foo'] = 'baz'
self.assertEqual(h['FOO'],'baz')
self.assertEqual(h.get_all('foo'),['baz'])
self.assertEqual(h.get("foo","whee"), "baz")
self.assertEqual(h.get("zoo","whee"), "whee")
self.assertEqual(h.setdefault("foo","whee"), "baz")
self.assertEqual(h.setdefault("zoo","whee"), "whee")
self.assertEqual(h["foo"],"baz")
self.assertEqual(h["zoo"],"whee")
def testRequireList(self):
self.assertRaises(TypeError, Headers, "foo")
def testExtras(self):
h = Headers([])
self.assertEqual(str(h),'\r\n')
h.add_header('foo','bar',baz="spam")
self.assertEqual(h['foo'], 'bar; baz="spam"')
self.assertEqual(str(h),'foo: bar; baz="spam"\r\n\r\n')
h.add_header('Foo','bar',cheese=None)
self.assertEqual(h.get_all('foo'),
['bar; baz="spam"', 'bar; cheese'])
self.assertEqual(str(h),
'foo: bar; baz="spam"\r\n'
'Foo: bar; cheese\r\n'
'\r\n'
)
class ErrorHandler(BaseCGIHandler):
"""Simple handler subclass for testing BaseHandler"""
# BaseHandler records the OS environment at import time, but envvars
# might have been changed later by other tests, which trips up
# HandlerTests.testEnviron().
os_environ = dict(os.environ.items())
def __init__(self,**kw):
setup_testing_defaults(kw)
BaseCGIHandler.__init__(
self, StringIO(''), StringIO(), StringIO(), kw,
multithread=True, multiprocess=True
)
class TestHandler(ErrorHandler):
"""Simple handler subclass for testing BaseHandler, w/error passthru"""
def handle_error(self):
raise # for testing, we want to see what's happening
class HandlerTests(TestCase):
def checkEnvironAttrs(self, handler):
env = handler.environ
for attr in [
'version','multithread','multiprocess','run_once','file_wrapper'
]:
if attr=='file_wrapper' and handler.wsgi_file_wrapper is None:
continue
self.assertEqual(getattr(handler,'wsgi_'+attr),env['wsgi.'+attr])
def checkOSEnviron(self,handler):
empty = {}; setup_testing_defaults(empty)
env = handler.environ
from os import environ
for k,v in environ.items():
if k not in empty:
self.assertEqual(env[k],v)
for k,v in empty.items():
self.assertIn(k, env)
def testEnviron(self):
h = TestHandler(X="Y")
h.setup_environ()
self.checkEnvironAttrs(h)
self.checkOSEnviron(h)
self.assertEqual(h.environ["X"],"Y")
def testCGIEnviron(self):
h = BaseCGIHandler(None,None,None,{})
h.setup_environ()
for key in 'wsgi.url_scheme', 'wsgi.input', 'wsgi.errors':
self.assertIn(key, h.environ)
def testScheme(self):
h=TestHandler(HTTPS="on"); h.setup_environ()
self.assertEqual(h.environ['wsgi.url_scheme'],'https')
h=TestHandler(); h.setup_environ()
self.assertEqual(h.environ['wsgi.url_scheme'],'http')
def testAbstractMethods(self):
h = BaseHandler()
for name in [
'_flush','get_stdin','get_stderr','add_cgi_vars'
]:
self.assertRaises(NotImplementedError, getattr(h,name))
self.assertRaises(NotImplementedError, h._write, "test")
def testContentLength(self):
# Demo one reason iteration is better than write()... ;)
def trivial_app1(e,s):
s('200 OK',[])
return [e['wsgi.url_scheme']]
def trivial_app2(e,s):
s('200 OK',[])(e['wsgi.url_scheme'])
return []
def trivial_app4(e,s):
# Simulate a response to a HEAD request
s('200 OK',[('Content-Length', '12345')])
return []
h = TestHandler()
h.run(trivial_app1)
self.assertEqual(h.stdout.getvalue(),
"Status: 200 OK\r\n"
"Content-Length: 4\r\n"
"\r\n"
"http")
h = TestHandler()
h.run(trivial_app2)
self.assertEqual(h.stdout.getvalue(),
"Status: 200 OK\r\n"
"\r\n"
"http")
h = TestHandler()
h.run(trivial_app4)
self.assertEqual(h.stdout.getvalue(),
b'Status: 200 OK\r\n'
b'Content-Length: 12345\r\n'
b'\r\n')
def testBasicErrorOutput(self):
def non_error_app(e,s):
s('200 OK',[])
return []
def error_app(e,s):
raise AssertionError("This should be caught by handler")
h = ErrorHandler()
h.run(non_error_app)
self.assertEqual(h.stdout.getvalue(),
"Status: 200 OK\r\n"
"Content-Length: 0\r\n"
"\r\n")
self.assertEqual(h.stderr.getvalue(),"")
h = ErrorHandler()
h.run(error_app)
self.assertEqual(h.stdout.getvalue(),
"Status: %s\r\n"
"Content-Type: text/plain\r\n"
"Content-Length: %d\r\n"
"\r\n%s" % (h.error_status,len(h.error_body),h.error_body))
self.assertNotEqual(h.stderr.getvalue().find("AssertionError"), -1)
def testErrorAfterOutput(self):
MSG = "Some output has been sent"
def error_app(e,s):
s("200 OK",[])(MSG)
raise AssertionError("This should be caught by handler")
h = ErrorHandler()
h.run(error_app)
self.assertEqual(h.stdout.getvalue(),
"Status: 200 OK\r\n"
"\r\n"+MSG)
self.assertNotEqual(h.stderr.getvalue().find("AssertionError"), -1)
def testHeaderFormats(self):
def non_error_app(e,s):
s('200 OK',[])
return []
stdpat = (
r"HTTP/%s 200 OK\r\n"
r"Date: \w{3}, [ 0123]\d \w{3} \d{4} \d\d:\d\d:\d\d GMT\r\n"
r"%s" r"Content-Length: 0\r\n" r"\r\n"
)
shortpat = (
"Status: 200 OK\r\n" "Content-Length: 0\r\n" "\r\n"
)
for ssw in "FooBar/1.0", None:
sw = ssw and "Server: %s\r\n" % ssw or ""
for version in "1.0", "1.1":
for proto in "HTTP/0.9", "HTTP/1.0", "HTTP/1.1":
h = TestHandler(SERVER_PROTOCOL=proto)
h.origin_server = False
h.http_version = version
h.server_software = ssw
h.run(non_error_app)
self.assertEqual(shortpat,h.stdout.getvalue())
h = TestHandler(SERVER_PROTOCOL=proto)
h.origin_server = True
h.http_version = version
h.server_software = ssw
h.run(non_error_app)
if proto=="HTTP/0.9":
self.assertEqual(h.stdout.getvalue(),"")
else:
self.assertTrue(
re.match(stdpat%(version,sw), h.stdout.getvalue()),
(stdpat%(version,sw), h.stdout.getvalue())
)
def testCloseOnError(self):
side_effects = {'close_called': False}
MSG = b"Some output has been sent"
def error_app(e,s):
s("200 OK",[])(MSG)
class CrashyIterable(object):
def __iter__(self):
while True:
yield b'blah'
raise AssertionError("This should be caught by handler")
def close(self):
side_effects['close_called'] = True
return CrashyIterable()
h = ErrorHandler()
h.run(error_app)
self.assertEqual(side_effects['close_called'], True)
def test_main():
test_support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| gpl-2.0 |
fbradyirl/home-assistant | homeassistant/components/arlo/sensor.py | 7 | 6412 | """Sensor support for Netgear Arlo IP cameras."""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_MONITORED_CONDITIONS,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.icon import icon_for_battery_level
from . import ATTRIBUTION, DATA_ARLO, DEFAULT_BRAND, SIGNAL_UPDATE_ARLO
_LOGGER = logging.getLogger(__name__)
# sensor_type [ description, unit, icon ]
SENSOR_TYPES = {
"last_capture": ["Last", None, "run-fast"],
"total_cameras": ["Arlo Cameras", None, "video"],
"captured_today": ["Captured Today", None, "file-video"],
"battery_level": ["Battery Level", "%", "battery-50"],
"signal_strength": ["Signal Strength", None, "signal"],
"temperature": ["Temperature", TEMP_CELSIUS, "thermometer"],
"humidity": ["Humidity", "%", "water-percent"],
"air_quality": ["Air Quality", "ppm", "biohazard"],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
)
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up an Arlo IP sensor."""
arlo = hass.data.get(DATA_ARLO)
if not arlo:
return
sensors = []
for sensor_type in config.get(CONF_MONITORED_CONDITIONS):
if sensor_type == "total_cameras":
sensors.append(ArloSensor(SENSOR_TYPES[sensor_type][0], arlo, sensor_type))
else:
for camera in arlo.cameras:
if sensor_type in ("temperature", "humidity", "air_quality"):
continue
name = "{0} {1}".format(SENSOR_TYPES[sensor_type][0], camera.name)
sensors.append(ArloSensor(name, camera, sensor_type))
for base_station in arlo.base_stations:
if (
sensor_type in ("temperature", "humidity", "air_quality")
and base_station.model_id == "ABC1000"
):
name = "{0} {1}".format(
SENSOR_TYPES[sensor_type][0], base_station.name
)
sensors.append(ArloSensor(name, base_station, sensor_type))
add_entities(sensors, True)
class ArloSensor(Entity):
"""An implementation of a Netgear Arlo IP sensor."""
def __init__(self, name, device, sensor_type):
"""Initialize an Arlo sensor."""
_LOGGER.debug("ArloSensor created for %s", name)
self._name = name
self._data = device
self._sensor_type = sensor_type
self._state = None
self._icon = "mdi:{}".format(SENSOR_TYPES.get(self._sensor_type)[2])
@property
def name(self):
"""Return the name of this camera."""
return self._name
async def async_added_to_hass(self):
"""Register callbacks."""
async_dispatcher_connect(self.hass, SIGNAL_UPDATE_ARLO, self._update_callback)
@callback
def _update_callback(self):
"""Call update method."""
self.async_schedule_update_ha_state(True)
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Icon to use in the frontend, if any."""
if self._sensor_type == "battery_level" and self._state is not None:
return icon_for_battery_level(
battery_level=int(self._state), charging=False
)
return self._icon
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return SENSOR_TYPES.get(self._sensor_type)[1]
@property
def device_class(self):
"""Return the device class of the sensor."""
if self._sensor_type == "temperature":
return DEVICE_CLASS_TEMPERATURE
if self._sensor_type == "humidity":
return DEVICE_CLASS_HUMIDITY
return None
def update(self):
"""Get the latest data and updates the state."""
_LOGGER.debug("Updating Arlo sensor %s", self.name)
if self._sensor_type == "total_cameras":
self._state = len(self._data.cameras)
elif self._sensor_type == "captured_today":
self._state = len(self._data.captured_today)
elif self._sensor_type == "last_capture":
try:
video = self._data.last_video
self._state = video.created_at_pretty("%m-%d-%Y %H:%M:%S")
except (AttributeError, IndexError):
error_msg = "Video not found for {0}. Older than {1} days?".format(
self.name, self._data.min_days_vdo_cache
)
_LOGGER.debug(error_msg)
self._state = None
elif self._sensor_type == "battery_level":
try:
self._state = self._data.battery_level
except TypeError:
self._state = None
elif self._sensor_type == "signal_strength":
try:
self._state = self._data.signal_strength
except TypeError:
self._state = None
elif self._sensor_type == "temperature":
try:
self._state = self._data.ambient_temperature
except TypeError:
self._state = None
elif self._sensor_type == "humidity":
try:
self._state = self._data.ambient_humidity
except TypeError:
self._state = None
elif self._sensor_type == "air_quality":
try:
self._state = self._data.ambient_air_quality
except TypeError:
self._state = None
@property
def device_state_attributes(self):
"""Return the device state attributes."""
attrs = {}
attrs[ATTR_ATTRIBUTION] = ATTRIBUTION
attrs["brand"] = DEFAULT_BRAND
if self._sensor_type != "total_cameras":
attrs["model"] = self._data.model_id
return attrs
| apache-2.0 |
asaolabs/python-lambda | aws_lambda/aws_lambda.py | 1 | 25699 | # -*- coding: utf-8 -*-
from __future__ import print_function
import hashlib
import json
import logging
import os
import sys
import time
from collections import defaultdict
from imp import load_source
from shutil import copy
from shutil import copyfile
from shutil import copystat
from shutil import copytree
from tempfile import mkdtemp
import boto3
import botocore
import yaml
import subprocess
from .helpers import archive
from .helpers import get_environment_variable_value
from .helpers import mkdir
from .helpers import read
from .helpers import timestamp
from .helpers import LambdaContext
ARN_PREFIXES = {
'cn-north-1': 'aws-cn',
'cn-northwest-1': 'aws-cn',
'us-gov-west-1': 'aws-us-gov',
}
log = logging.getLogger(__name__)
def cleanup_old_versions(
src, keep_last_versions,
config_file='config.yaml', profile_name=None,
):
"""Deletes old deployed versions of the function in AWS Lambda.
Won't delete $Latest and any aliased version
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param int keep_last_versions:
The number of recent versions to keep and not delete
"""
if keep_last_versions <= 0:
print("Won't delete all versions. Please do this manually")
else:
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
profile_name = cfg.get('profile')
aws_access_key_id = cfg.get('aws_access_key_id')
aws_secret_access_key = cfg.get('aws_secret_access_key')
client = get_client(
'lambda', profile_name, aws_access_key_id, aws_secret_access_key,
cfg.get('region'),
)
response = client.list_versions_by_function(
FunctionName=cfg.get('function_name'),
)
versions = response.get('Versions')
if len(response.get('Versions')) < keep_last_versions:
print('Nothing to delete. (Too few versions published)')
else:
version_numbers = [elem.get('Version') for elem in
versions[1:-keep_last_versions]]
for version_number in version_numbers:
try:
client.delete_function(
FunctionName=cfg.get('function_name'),
Qualifier=version_number,
)
except botocore.exceptions.ClientError as e:
print('Skipping Version {}: {}'
.format(version_number, e.message))
def deploy(
src, requirements=None, local_package=None,
config_file='config.yaml', profile_name=None,
preserve_vpc=False
):
"""Deploys a new function to AWS Lambda.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Copy all the pip dependencies required to run your code into a temporary
# folder then add the handler file in the root of this directory.
# Zip the contents of this folder into a single file and output to the dist
# directory.
path_to_zip_file = build(
src, config_file=config_file,
requirements=requirements,
local_package=local_package,
)
existing_config = get_function_config(cfg)
if existing_config:
update_function(cfg, path_to_zip_file, existing_config, preserve_vpc=preserve_vpc)
else:
create_function(cfg, path_to_zip_file)
def deploy_s3(
src, requirements=None, local_package=None,
config_file='config.yaml', profile_name=None,
preserve_vpc=False
):
"""Deploys a new function via AWS S3.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Copy all the pip dependencies required to run your code into a temporary
# folder then add the handler file in the root of this directory.
# Zip the contents of this folder into a single file and output to the dist
# directory.
path_to_zip_file = build(
src, config_file=config_file, requirements=requirements,
local_package=local_package,
)
use_s3 = True
s3_file = upload_s3(cfg, path_to_zip_file, use_s3)
existing_config = get_function_config(cfg)
if existing_config:
update_function(cfg, path_to_zip_file, existing_config, use_s3=use_s3,
s3_file=s3_file, preserve_vpc=preserve_vpc)
else:
create_function(cfg, path_to_zip_file, use_s3=use_s3, s3_file=s3_file)
def upload(
src, requirements=None, local_package=None,
config_file='config.yaml', profile_name=None,
):
"""Uploads a new function to AWS S3.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Copy all the pip dependencies required to run your code into a temporary
# folder then add the handler file in the root of this directory.
# Zip the contents of this folder into a single file and output to the dist
# directory.
path_to_zip_file = build(
src, config_file=config_file, requirements=requirements,
local_package=local_package,
)
upload_s3(cfg, path_to_zip_file)
def invoke(
src, event_file='event.json',
config_file='config.yaml', profile_name=None,
verbose=False,
):
"""Simulates a call to your function.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str alt_event:
An optional argument to override which event file to use.
:param bool verbose:
Whether to print out verbose details.
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Set AWS_PROFILE environment variable based on `--profile` option.
if profile_name:
os.environ['AWS_PROFILE'] = profile_name
# Load environment variables from the config file into the actual
# environment.
env_vars = cfg.get('environment_variables')
if env_vars:
for key, value in env_vars.items():
os.environ[key] = get_environment_variable_value(value)
# Load and parse event file.
path_to_event_file = os.path.join(src, event_file)
event = read(path_to_event_file, loader=json.loads)
# Tweak to allow module to import local modules
try:
sys.path.index(src)
except ValueError:
sys.path.append(src)
handler = cfg.get('handler')
# Inspect the handler string (<module>.<function name>) and translate it
# into a function we can execute.
fn = get_callable_handler_function(src, handler)
timeout = cfg.get('timeout')
if timeout:
context = LambdaContext(cfg.get('function_name'),timeout)
else:
context = LambdaContext(cfg.get('function_name'))
start = time.time()
results = fn(event, context)
end = time.time()
print('{0}'.format(results))
if verbose:
print('\nexecution time: {:.8f}s\nfunction execution '
'timeout: {:2}s'.format(end - start, cfg.get('timeout', 15)))
def init(src, minimal=False):
"""Copies template files to a given directory.
:param str src:
The path to output the template lambda project files.
:param bool minimal:
Minimal possible template files (excludes event.json).
"""
templates_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'project_templates',
)
for filename in os.listdir(templates_path):
if (minimal and filename == 'event.json') or filename.endswith('.pyc'):
continue
dest_path = os.path.join(templates_path, filename)
if not os.path.isdir(dest_path):
copy(dest_path, src)
def build(
src, requirements=None, local_package=None,
config_file='config.yaml', profile_name=None,
):
"""Builds the file bundle.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Get the absolute path to the output directory and create it if it doesn't
# already exist.
dist_directory = cfg.get('dist_directory', 'dist')
path_to_dist = os.path.join(src, dist_directory)
mkdir(path_to_dist)
# Combine the name of the Lambda function with the current timestamp to use
# for the output filename.
function_name = cfg.get('function_name')
output_filename = '{0}-{1}.zip'.format(timestamp(), function_name)
path_to_temp = mkdtemp(prefix='aws-lambda')
pip_install_to_target(
path_to_temp,
requirements=requirements,
local_package=local_package,
)
# Hack for Zope.
if 'zope' in os.listdir(path_to_temp):
print(
'Zope packages detected; fixing Zope package paths to '
'make them importable.',
)
# Touch.
with open(os.path.join(path_to_temp, 'zope/__init__.py'), 'wb'):
pass
# Gracefully handle whether ".zip" was included in the filename or not.
output_filename = (
'{0}.zip'.format(output_filename)
if not output_filename.endswith('.zip')
else output_filename
)
# Allow definition of source code directories we want to build into our
# zipped package.
build_config = defaultdict(**cfg.get('build', {}))
build_source_directories = build_config.get('source_directories', '')
build_source_directories = (
build_source_directories
if build_source_directories is not None
else ''
)
source_directories = [
d.strip() for d in build_source_directories.split(',')
]
files = []
for filename in os.listdir(src):
if os.path.isfile(filename):
if filename == '.DS_Store':
continue
if filename == config_file:
continue
print('Bundling: %r' % filename)
files.append(os.path.join(src, filename))
elif os.path.isdir(filename) and filename in source_directories:
print('Bundling directory: %r' % filename)
files.append(os.path.join(src, filename))
# "cd" into `temp_path` directory.
os.chdir(path_to_temp)
for f in files:
if os.path.isfile(f):
_, filename = os.path.split(f)
# Copy handler file into root of the packages folder.
copyfile(f, os.path.join(path_to_temp, filename))
copystat(f, os.path.join(path_to_temp, filename))
elif os.path.isdir(f):
destination_folder = os.path.join(path_to_temp, f[len(src) + 1:])
copytree(f, destination_folder)
# Zip them together into a single file.
# TODO: Delete temp directory created once the archive has been compiled.
path_to_zip_file = archive('./', path_to_dist, output_filename)
return path_to_zip_file
def get_callable_handler_function(src, handler):
"""Tranlate a string of the form "module.function" into a callable
function.
:param str src:
The path to your Lambda project containing a valid handler file.
:param str handler:
A dot delimited string representing the `<module>.<function name>`.
"""
# "cd" into `src` directory.
os.chdir(src)
module_name, function_name = handler.split('.')
filename = get_handler_filename(handler)
path_to_module_file = os.path.join(src, filename)
module = load_source(module_name, path_to_module_file)
return getattr(module, function_name)
def get_handler_filename(handler):
"""Shortcut to get the filename from the handler string.
:param str handler:
A dot delimited string representing the `<module>.<function name>`.
"""
module_name, _ = handler.split('.')
return '{0}.py'.format(module_name)
def _install_packages(path, packages):
"""Install all packages listed to the target directory.
Ignores any package that includes Python itself and python-lambda as well
since its only needed for deploying and not running the code
:param str path:
Path to copy installed pip packages to.
:param list packages:
A list of packages to be installed via pip.
"""
def _filter_blacklist(package):
blacklist = ['-i', '#', 'Python==', 'python-lambda==']
return all(package.startswith(entry) is False for entry in blacklist)
filtered_packages = filter(_filter_blacklist, packages)
for package in filtered_packages:
if package.startswith('-e '):
package = package.replace('-e ', '')
print('Installing {package}'.format(package=package))
subprocess.check_call([sys.executable, '-m', 'pip', 'install', package, '-t', path, '--ignore-installed'])
print ('Install directory contents are now: {directory}'.format(directory=os.listdir(path)))
def pip_install_to_target(path, requirements=None, local_package=None):
"""For a given active virtualenv, gather all installed pip packages then
copy (re-install) them to the path provided.
:param str path:
Path to copy installed pip packages to.
:param str requirements:
If set, only the packages in the supplied requirements file are
installed.
If not set then installs all packages found via pip freeze.
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
packages = []
if not requirements:
print('Gathering pip packages')
pkgStr = subprocess.check_output([sys.executable, '-m', 'pip', 'freeze'])
packages.extend(pkgStr.decode('utf-8').splitlines())
else:
if os.path.exists(requirements):
print('Gathering requirement packages')
data = read(requirements)
packages.extend(data.splitlines())
if not packages:
print('No dependency packages installed!')
if local_package is not None:
if not isinstance(local_package, (list, tuple)):
local_package = [local_package]
for l_package in local_package:
packages.append(l_package)
_install_packages(path, packages)
def get_role_name(region, account_id, role):
"""Shortcut to insert the `account_id` and `role` into the iam string."""
prefix = ARN_PREFIXES.get(region, 'aws')
return 'arn:{0}:iam::{1}:role/{2}'.format(prefix, account_id, role)
def get_account_id(
profile_name, aws_access_key_id, aws_secret_access_key,
region=None,
):
"""Query STS for a users' account_id"""
client = get_client(
'sts', profile_name, aws_access_key_id, aws_secret_access_key,
region,
)
return client.get_caller_identity().get('Account')
def get_client(
client, profile_name, aws_access_key_id, aws_secret_access_key,
region=None,
):
"""Shortcut for getting an initialized instance of the boto3 client."""
boto3.setup_default_session(
profile_name=profile_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region,
)
return boto3.client(client)
def create_function(cfg, path_to_zip_file, use_s3=False, s3_file=None):
"""Register and upload a function to AWS Lambda."""
print('Creating your new Lambda function')
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get('profile')
aws_access_key_id = cfg.get('aws_access_key_id')
aws_secret_access_key = cfg.get('aws_secret_access_key')
account_id = get_account_id(
profile_name, aws_access_key_id, aws_secret_access_key, cfg.get(
'region',
),
)
role = get_role_name(
cfg.get('region'), account_id,
cfg.get('role', 'lambda_basic_execution'),
)
client = get_client(
'lambda', profile_name, aws_access_key_id, aws_secret_access_key,
cfg.get('region'),
)
# Do we prefer development variable over config?
buck_name = (
os.environ.get('S3_BUCKET_NAME') or cfg.get('bucket_name')
)
func_name = (
os.environ.get('LAMBDA_FUNCTION_NAME') or cfg.get('function_name')
)
print('Creating lambda function with name: {}'.format(func_name))
if use_s3:
kwargs = {
'FunctionName': func_name,
'Runtime': cfg.get('runtime', 'python2.7'),
'Role': role,
'Handler': cfg.get('handler'),
'Code': {
'S3Bucket': '{}'.format(buck_name),
'S3Key': '{}'.format(s3_file),
},
'Description': cfg.get('description', ''),
'Timeout': cfg.get('timeout', 15),
'MemorySize': cfg.get('memory_size', 512),
'VpcConfig': {
'SubnetIds': cfg.get('subnet_ids', []),
'SecurityGroupIds': cfg.get('security_group_ids', []),
},
'Publish': True,
}
else:
kwargs = {
'FunctionName': func_name,
'Runtime': cfg.get('runtime', 'python2.7'),
'Role': role,
'Handler': cfg.get('handler'),
'Code': {'ZipFile': byte_stream},
'Description': cfg.get('description', ''),
'Timeout': cfg.get('timeout', 15),
'MemorySize': cfg.get('memory_size', 512),
'VpcConfig': {
'SubnetIds': cfg.get('subnet_ids', []),
'SecurityGroupIds': cfg.get('security_group_ids', []),
},
'Publish': True,
}
if 'tags' in cfg:
kwargs.update(
Tags={
key: str(value)
for key, value in cfg.get('tags').items()
}
)
if 'environment_variables' in cfg:
kwargs.update(
Environment={
'Variables': {
key: get_environment_variable_value(value)
for key, value
in cfg.get('environment_variables').items()
},
},
)
client.create_function(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(FunctionName=func_name, ReservedConcurrentExecutions=concurrency)
def update_function(
cfg, path_to_zip_file, existing_cfg, use_s3=False, s3_file=None, preserve_vpc=False
):
"""Updates the code of an existing Lambda function"""
print('Updating your Lambda function')
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get('profile')
aws_access_key_id = cfg.get('aws_access_key_id')
aws_secret_access_key = cfg.get('aws_secret_access_key')
account_id = get_account_id(
profile_name, aws_access_key_id, aws_secret_access_key, cfg.get(
'region',
),
)
role = get_role_name(
cfg.get('region'), account_id,
cfg.get('role', 'lambda_basic_execution'),
)
client = get_client(
'lambda', profile_name, aws_access_key_id, aws_secret_access_key,
cfg.get('region'),
)
# Do we prefer development variable over config?
buck_name = (
os.environ.get('S3_BUCKET_NAME') or cfg.get('bucket_name')
)
if use_s3:
client.update_function_code(
FunctionName=cfg.get('function_name'),
S3Bucket='{}'.format(buck_name),
S3Key='{}'.format(s3_file),
Publish=True,
)
else:
client.update_function_code(
FunctionName=cfg.get('function_name'),
ZipFile=byte_stream,
Publish=True,
)
kwargs = {
'FunctionName': cfg.get('function_name'),
'Role': role,
'Runtime': cfg.get('runtime'),
'Handler': cfg.get('handler'),
'Description': cfg.get('description', ''),
'Timeout': cfg.get('timeout', 15),
'MemorySize': cfg.get('memory_size', 512),
}
if preserve_vpc:
kwargs['VpcConfig'] = existing_cfg.get('Configuration', {}).get('VpcConfig')
if kwargs['VpcConfig'] is None:
kwargs['VpcConfig'] = {
'SubnetIds': cfg.get('subnet_ids', []),
'SecurityGroupIds': cfg.get('security_group_ids', []),
}
else:
del kwargs['VpcConfig']['VpcId']
else:
kwargs['VpcConfig'] = {
'SubnetIds': cfg.get('subnet_ids', []),
'SecurityGroupIds': cfg.get('security_group_ids', []),
}
if 'environment_variables' in cfg:
kwargs.update(
Environment={
'Variables': {
key: str(get_environment_variable_value(value))
for key, value
in cfg.get('environment_variables').items()
},
},
)
ret = client.update_function_configuration(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(FunctionName=cfg.get('function_name'), ReservedConcurrentExecutions=concurrency)
elif 'Concurrency' in existing_cfg:
client.delete_function_concurrency(FunctionName=cfg.get('function_name'))
if 'tags' in cfg:
tags = {
key: str(value)
for key, value in cfg.get('tags').items()
}
if tags != existing_cfg.get('Tags'):
if existing_cfg.get('Tags'):
client.untag_resource(Resource=ret['FunctionArn'],
TagKeys=list(existing_cfg['Tags'].keys()))
client.tag_resource(Resource=ret['FunctionArn'], Tags=tags)
def upload_s3(cfg, path_to_zip_file, *use_s3):
"""Upload a function to AWS S3."""
print('Uploading your new Lambda function')
profile_name = cfg.get('profile')
aws_access_key_id = cfg.get('aws_access_key_id')
aws_secret_access_key = cfg.get('aws_secret_access_key')
client = get_client(
's3', profile_name, aws_access_key_id, aws_secret_access_key,
cfg.get('region'),
)
byte_stream = b''
with open(path_to_zip_file, mode='rb') as fh:
byte_stream = fh.read()
s3_key_prefix = cfg.get('s3_key_prefix', '/dist')
checksum = hashlib.new('md5', byte_stream).hexdigest()
timestamp = str(time.time())
filename = '{prefix}{checksum}-{ts}.zip'.format(
prefix=s3_key_prefix, checksum=checksum, ts=timestamp,
)
# Do we prefer development variable over config?
buck_name = (
os.environ.get('S3_BUCKET_NAME') or cfg.get('bucket_name')
)
func_name = (
os.environ.get('LAMBDA_FUNCTION_NAME') or cfg.get('function_name')
)
kwargs = {
'Bucket': '{}'.format(buck_name),
'Key': '{}'.format(filename),
'Body': byte_stream,
}
client.put_object(**kwargs)
print('Finished uploading {} to S3 bucket {}'.format(func_name, buck_name))
if use_s3:
return filename
def get_function_config(cfg):
"""Check whether a function exists or not and return its config"""
function_name = cfg.get('function_name')
profile_name = cfg.get('profile')
aws_access_key_id = cfg.get('aws_access_key_id')
aws_secret_access_key = cfg.get('aws_secret_access_key')
client = get_client(
'lambda', profile_name, aws_access_key_id, aws_secret_access_key,
cfg.get('region'),
)
try:
return client.get_function(FunctionName=function_name)
except client.exceptions.ResourceNotFoundException as e:
if 'Function not found' in str(e):
return False
def get_concurrency(cfg):
"""Return the Reserved Concurrent Executions if present in the config"""
concurrency = int(cfg.get('concurrency', 0))
return max(0, concurrency)
def read_cfg(path_to_config_file, profile_name):
cfg = read(path_to_config_file, loader=yaml.load)
if profile_name is not None:
cfg['profile'] = profile_name
elif 'AWS_PROFILE' in os.environ:
cfg['profile'] = os.environ['AWS_PROFILE']
return cfg
| isc |
phobson/pygridtools | pygridtools/tests/test_core.py | 2 | 24947 | import os
import warnings
from pkg_resources import resource_filename
import tempfile
import numpy
from numpy import nan
import pandas
from shapely.geometry import Polygon
import geopandas
import pytest
import numpy.testing as nptest
import pandas.util.testing as pdtest
from pygridtools import core
from pygridgen.tests import raises
from . import utils
BASELINE_IMAGES = 'baseline_files/test_core'
try:
import pygridgen
HASPGG = True
except ImportError:
HASPGG = False
@pytest.fixture
def A():
return numpy.arange(12).reshape(4, 3).astype(float)
@pytest.fixture
def B():
return numpy.arange(8).reshape(2, 4).astype(float)
@pytest.fixture
def C():
return numpy.arange(25).reshape(5, 5).astype(float)
@pytest.mark.parametrize('fxn', [numpy.fliplr, numpy.flipud, numpy.fliplr])
def test_transform(A, fxn):
result = core.transform(A, fxn)
expected = fxn(A)
nptest.assert_array_equal(result, expected)
@pytest.mark.parametrize(('index', 'axis', 'first', 'second'), [
(3, 0, 'top', 'bottom'),
(2, 1, 'left', 'right'),
(5, 0, None, None),
(5, 1, None, None),
])
def test_split_rows(C, index, axis, first, second):
expected = {
'top': numpy.array([
[ 0.0, 1.0, 2.0, 3.0, 4.0],
[ 5.0, 6.0, 7.0, 8.0, 9.0],
[10.0, 11.0, 12.0, 13.0, 14.0],
]),
'bottom': numpy.array([
[15., 16., 17., 18., 19.],
[20., 21., 22., 23., 24.],
]),
'left': numpy.array([
[ 0., 1.],
[ 5., 6.],
[10., 11.],
[15., 16.],
[20., 21.],
]),
'right': numpy.array([
[ 2., 3., 4.],
[ 7., 8., 9.],
[12., 13., 14.],
[17., 18., 19.],
[22., 23., 24.],
]),
}
if first and second:
a, b = core.split(C, index, axis)
nptest.assert_array_equal(a, expected[first])
nptest.assert_array_equal(b, expected[second])
else:
with raises(ValueError):
left, right = core.split(C, index, axis=axis)
@pytest.mark.parametrize('N', [1, 3, None])
def test__interp_between_vectors(N):
index = numpy.arange(0, 4)
vector1 = -1 * index**2 - 1
vector2 = 2 * index**2 + 2
expected = {
1: numpy.array([
[ -1.0, -2.0, -5.0, -10.0],
[ 0.5, 1.0, 2.5, 5.0],
[ 2.0, 4.0, 10.0, 20.0],
]),
3: numpy.array([
[ -1.00, -2.00, -5.00, -10.00],
[ -0.25, -0.50, -1.25, -2.50],
[ 0.50, 1.00, 2.50, 5.00],
[ 1.25, 2.50, 6.25, 12.50],
[ 2.00, 4.00, 10.00, 20.00],
])
}
if N:
result = core._interp_between_vectors(vector1, vector2, n_nodes=N)
nptest.assert_array_equal(result, expected[N])
else:
with raises(ValueError):
core._interp_between_vectors(vector1, vector2, n_nodes=0)
@pytest.mark.parametrize(('n', 'axis'), [
(1, 0), (4, 0), (1, 1), (3, 1)
])
def test_insert(C, n, axis):
expected = {
(1, 0): numpy.array([
[ 0.0, 1.0, 2.0, 3.0, 4.0],
[ 5.0, 6.0, 7.0, 8.0, 9.0],
[ 7.5, 8.5, 9.5, 10.5, 11.5],
[10.0, 11.0, 12.0, 13.0, 14.0],
[15.0, 16.0, 17.0, 18.0, 19.0],
[20.0, 21.0, 22.0, 23.0, 24.0],
]),
(4, 0): numpy.array([
[ 0.0, 1.0, 2.0, 3.0, 4.0],
[ 5.0, 6.0, 7.0, 8.0, 9.0],
[ 6.0, 7.0, 8.0, 9.0, 10.0],
[ 7.0, 8.0, 9.0, 10.0, 11.0],
[ 8.0, 9.0, 10.0, 11.0, 12.0],
[ 9.0, 10.0, 11.0, 12.0, 13.0],
[10.0, 11.0, 12.0, 13.0, 14.0],
[15.0, 16.0, 17.0, 18.0, 19.0],
[20.0, 21.0, 22.0, 23.0, 24.0],
]),
(1, 1): numpy.array([
[ 0.0, 1.0, 1.5, 2.0, 3.0, 4.0],
[ 5.0, 6.0, 6.5, 7.0, 8.0, 9.0],
[10.0, 11.0, 11.5, 12.0, 13.0, 14.0],
[15.0, 16.0, 16.5, 17.0, 18.0, 19.0],
[20.0, 21.0, 21.5, 22.0, 23.0, 24.0],
]),
(3, 1): numpy.array([
[ 0.00, 1.00, 1.25, 1.50, 1.75, 2.00, 3.00, 4.00],
[ 5.00, 6.00, 6.25, 6.50, 6.75, 7.00, 8.00, 9.00],
[10.00, 11.00, 11.25, 11.50, 11.75, 12.00, 13.00, 14.00],
[15.00, 16.00, 16.25, 16.50, 16.75, 17.00, 18.00, 19.00],
[20.00, 21.00, 21.25, 21.50, 21.75, 22.00, 23.00, 24.00],
])
}
result = core.insert(C, 2, axis=axis, n_nodes=n)
nptest.assert_array_equal(result, expected[(n, axis)])
@pytest.mark.parametrize('how', ['h', 'v'])
@pytest.mark.parametrize('where', ['+', '-'])
@pytest.mark.parametrize('shift', [0, 2, -1])
def test_merge(A, B, how, where, shift):
expected = {
('v', '+', 0): numpy.array([
[0., 1., 2., nan],
[3., 4., 5., nan],
[6., 7., 8., nan],
[9., 10., 11., nan],
[0., 1., 2., 3.],
[4., 5., 6., 7.]
]),
('v', '-', 0): numpy.array([
[0., 1., 2., 3.],
[4., 5., 6., 7.],
[0., 1., 2., nan],
[3., 4., 5., nan],
[6., 7., 8., nan],
[9., 10., 11., nan]
]),
('v', '+', 2): numpy.array([
[ 0., 1., 2., nan, nan, nan],
[ 3., 4., 5., nan, nan, nan],
[ 6., 7., 8., nan, nan, nan],
[ 9., 10., 11., nan, nan, nan],
[nan, nan, 0., 1., 2., 3.],
[nan, nan, 4., 5., 6., 7.]
]),
('v', '-', 2): numpy.array([
[nan, nan, 0., 1., 2., 3.],
[nan, nan, 4., 5., 6., 7.],
[ 0., 1., 2., nan, nan, nan],
[ 3., 4., 5., nan, nan, nan],
[ 6., 7., 8., nan, nan, nan],
[ 9., 10., 11., nan, nan, nan]
]),
('v', '+', -1): numpy.array([
[nan, 0., 1., 2.],
[nan, 3., 4., 5.],
[nan, 6., 7., 8.],
[nan, 9., 10., 11.],
[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]
]),
('v', '-', -1): numpy.array([
[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[nan, 0., 1., 2.],
[nan, 3., 4., 5.],
[nan, 6., 7., 8.],
[nan, 9., 10., 11.]
]),
('h', '+', 0): numpy.array([
[0., 1., 2., 0., 1., 2., 3.],
[3., 4., 5., 4., 5., 6., 7.],
[6., 7., 8., nan, nan, nan, nan],
[9., 10., 11., nan, nan, nan, nan]
]),
('h', '-', 0): numpy.array([
[ 0., 1., 2., 3., 0., 1., 2.],
[ 4., 5., 6., 7., 3., 4., 5.],
[nan, nan, nan, nan, 6., 7., 8.],
[nan, nan, nan, nan, 9., 10., 11.]
]),
('h', '+', 2): numpy.array([
[0., 1., 2., nan, nan, nan, nan],
[3., 4., 5., nan, nan, nan, nan],
[6., 7., 8., 0., 1., 2., 3.],
[9., 10., 11., 4., 5., 6., 7.]
]),
('h', '-', 2): numpy.array([
[nan, nan, nan, nan, 0., 1., 2.],
[nan, nan, nan, nan, 3., 4., 5.],
[ 0., 1., 2., 3., 6., 7., 8.],
[ 4., 5., 6., 7., 9., 10., 11.]
]),
('h', '+', -1): numpy.array([
[nan, nan, nan, 0., 1., 2., 3.],
[ 0., 1., 2., 4., 5., 6., 7.],
[ 3., 4., 5., nan, nan, nan, nan],
[ 6., 7., 8., nan, nan, nan, nan],
[ 9., 10., 11., nan, nan, nan, nan]
]),
('h', '-', -1): numpy.array([
[ 0., 1., 2., 3., nan, nan, nan],
[ 4., 5., 6., 7., 0., 1., 2.],
[nan, nan, nan, nan, 3., 4., 5.],
[nan, nan, nan, nan, 6., 7., 8.],
[nan, nan, nan, nan, 9., 10., 11.]
]),
}
result = core.merge(A, B, how=how, where=where, shift=shift)
nptest.assert_array_equal(result, expected[(how, where, shift)])
@pytest.fixture
def g1(simple_nodes):
xn, yn = simple_nodes
g = core.ModelGrid(xn[:, :3], yn[:, :3])
mask = g.cell_mask
mask[:2, :2] = True
g.cell_mask = mask
return g
@pytest.fixture
def g2(simple_nodes):
xn, yn = simple_nodes
g = core.ModelGrid(xn[2:5, 3:], yn[2:5, 3:])
return g
@pytest.fixture
def polyverts():
return geopandas.GeoSeries(Polygon([(2.4, 0.9), (3.6, 0.9), (3.6, 2.4), (2.4, 2.4)]))
def test_ModelGrid_bad_shapes(simple_cells):
xc, yc = simple_cells
with raises(ValueError):
mg = core.ModelGrid(xc, yc[2:, 2:])
def test_ModelGrid_nodes_and_cells(g1, simple_cells):
xc, yc = simple_cells
assert (isinstance(g1.nodes_x, numpy.ndarray))
assert (isinstance(g1.nodes_y, numpy.ndarray))
assert (isinstance(g1.cells_x, numpy.ndarray))
nptest.assert_array_equal(g1.cells_x, xc[:, :2])
assert (isinstance(g1.cells_y, numpy.ndarray))
nptest.assert_array_equal(g1.cells_y, yc[:, :2])
def test_ModelGrid_counts_and_shapes(g1):
expected_rows = 9
expected_cols = 3
assert (g1.icells == expected_cols - 1)
assert (g1.jcells == expected_rows - 1)
assert (g1.inodes == expected_cols)
assert (g1.jnodes == expected_rows)
assert (g1.shape == (expected_rows, expected_cols))
assert (g1.cell_shape == (expected_rows - 1, expected_cols - 1))
def test_ModelGrid_cell_mask(g1):
expected_mask = numpy.array([
[1, 1], [1, 1], [0, 0], [0, 0],
[0, 0], [0, 0], [0, 0], [0, 0],
])
nptest.assert_array_equal(g1.cell_mask, expected_mask)
@pytest.mark.parametrize(('usemask', 'which', 'error'), [
(True, 'nodes', ValueError),
(False, 'nodes', None),
(True, 'cells', None),
])
def test_ModelGrid_to_dataframe(g1, usemask, which, error):
def name_cols(df):
df.columns.names = ['coord', 'ii']
df.index.names = ['jj']
return df
if error:
with raises(ValueError):
g1.to_dataframe(usemask=usemask, which=which)
else:
expected = {
(False, 'nodes'): pandas.DataFrame({
('easting', 0): {
0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0,
5: 1.0, 6: 1.0, 7: 1.0, 8: 1.0
}, ('easting', 1): {
0: 1.5, 1: 1.5, 2: 1.5, 3: 1.5, 4: 1.5,
5: 1.5, 6: 1.5, 7: 1.5, 8: 1.5
}, ('easting', 2): {
0: 2.0, 1: 2.0, 2: 2.0, 3: 2.0, 4: 2.0,
5: 2.0, 6: 2.0, 7: 2.0, 8: 2.0
}, ('northing', 0): {
0: 0.0, 1: 0.5, 2: 1.0, 3: 1.5, 4: 2.0,
5: 2.5, 6: 3.0, 7: 3.5, 8: 4.0
}, ('northing', 1): {
0: 0.0, 1: 0.5, 2: 1.0, 3: 1.5, 4: 2.0,
5: 2.5, 6: 3.0, 7: 3.5, 8: 4.0
}, ('northing', 2): {
0: 0.0, 1: 0.5, 2: 1.0, 3: 1.5, 4: 2.0,
5: 2.5, 6: 3.0, 7: 3.5, 8: 4.0}
}).pipe(name_cols),
(True, 'cells'): pandas.DataFrame({
('easting', 0): {
0: nan, 1: nan, 2: 1.25, 3: 1.25, 4: 1.25,
5: 1.25, 6: 1.25, 7: 1.25
}, ('easting', 1): {
0: nan, 1: nan, 2: 1.75, 3: 1.75, 4: 1.75,
5: 1.75, 6: 1.75, 7: 1.75
}, ('northing', 0): {
0: nan, 1: nan, 2: 1.25, 3: 1.75, 4: 2.25,
5: 2.75, 6: 3.25, 7: 3.75
}, ('northing', 1): {
0: nan, 1: nan, 2: 1.25, 3: 1.75, 4: 2.25,
5: 2.75, 6: 3.25, 7: 3.75
}
}).pipe(name_cols),
}
result = g1.to_dataframe(usemask=usemask, which=which)
pdtest.assert_frame_equal(result, expected[(usemask, which)], check_names=False)
pdtest.assert_index_equal(result.columns, expected[(usemask, which)].columns)
@pytest.mark.parametrize(('usemask', 'which', 'error'), [
(True, 'nodes', ValueError),
(False, 'nodes', None),
(True, 'cells', None),
(False, 'cells', None),
])
def test_ModelGrid_to_coord_pairs(g1, usemask, which, error):
if error:
with raises(error):
g1.to_coord_pairs(usemask=usemask, which=which)
else:
expected = {
('nodes', False): numpy.array([
[1.0, 0.0], [1.5, 0.0], [2.0, 0.0], [1.0, 0.5],
[1.5, 0.5], [2.0, 0.5], [1.0, 1.0], [1.5, 1.0],
[2.0, 1.0], [1.0, 1.5], [1.5, 1.5], [2.0, 1.5],
[1.0, 2.0], [1.5, 2.0], [2.0, 2.0], [1.0, 2.5],
[1.5, 2.5], [2.0, 2.5], [1.0, 3.0], [1.5, 3.0],
[2.0, 3.0], [1.0, 3.5], [1.5, 3.5], [2.0, 3.5],
[1.0, 4.0], [1.5, 4.0], [2.0, 4.0]
]),
('cells', False): numpy.array([
[1.25, 0.25], [1.75, 0.25], [1.25, 0.75], [1.75, 0.75],
[1.25, 1.25], [1.75, 1.25], [1.25, 1.75], [1.75, 1.75],
[1.25, 2.25], [1.75, 2.25], [1.25, 2.75], [1.75, 2.75],
[1.25, 3.25], [1.75, 3.25], [1.25, 3.75], [1.75, 3.75]
]),
('cells', True): numpy.array([
[nan, nan], [nan, nan], [nan, nan], [nan, nan],
[1.25, 1.25], [1.75, 1.25], [1.25, 1.75], [1.75, 1.75],
[1.25, 2.25], [1.75, 2.25], [1.25, 2.75], [1.75, 2.75],
[1.25, 3.25], [1.75, 3.25], [1.25, 3.75], [1.75, 3.75]
])
}
result = g1.to_coord_pairs(usemask=usemask, which=which)
nptest.assert_array_equal(result, expected[which, usemask])
def test_ModelGrid_transform(mg, simple_nodes):
xn, yn = simple_nodes
g = mg.transform(lambda x: x * 10)
nptest.assert_array_equal(g.xn, xn * 10)
nptest.assert_array_equal(g.yn, yn * 10)
def test_ModelGrid_transform_x(mg, simple_nodes):
xn, yn = simple_nodes
g = mg.transform_x(lambda x: x * 10)
nptest.assert_array_equal(g.xn, xn * 10)
nptest.assert_array_equal(g.yn, yn)
def test_ModelGrid_transform_y(mg, simple_nodes):
xn, yn = simple_nodes
g = mg.transform_y(lambda y: y * 10)
nptest.assert_array_equal(g.xn, xn)
nptest.assert_array_equal(g.yn, yn * 10)
def test_ModelGrid_transpose(mg, simple_nodes):
xn, yn = simple_nodes
g = mg.transpose()
nptest.assert_array_equal(g.xn, xn.T)
nptest.assert_array_equal(g.yn, yn.T)
def test_ModelGrid_fliplr(mg, simple_nodes):
xn, yn = simple_nodes
g = mg.fliplr()
nptest.assert_array_equal(g.xn, numpy.fliplr(xn))
nptest.assert_array_equal(g.yn, numpy.fliplr(yn))
def test_ModelGrid_flipud(mg, simple_nodes):
xn, yn = simple_nodes
g = mg.flipud()
nptest.assert_array_equal(g.xn, numpy.flipud(xn))
nptest.assert_array_equal(g.yn, numpy.flipud(yn))
def test_ModelGrid_split_ax0(mg, simple_nodes):
xn, yn = simple_nodes
mgtop, mgbottom = mg.split(3, axis=0)
nptest.assert_array_equal(mgtop.nodes_x, xn[:3, :])
nptest.assert_array_equal(mgtop.nodes_y, yn[:3, :])
nptest.assert_array_equal(mgbottom.nodes_x, xn[3:, :])
nptest.assert_array_equal(mgbottom.nodes_y, yn[3:, :])
def test_ModelGrid_node_mask(simple_nodes):
g = core.ModelGrid(*simple_nodes).update_cell_mask()
expected = numpy.array([
[0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1]
]).astype(bool)
nptest.assert_array_equal(expected, g.node_mask)
def test_ModelGrid_merge(g1, g2, simple_nodes):
g3 = g1.merge(g2, how='horiz', where='+', shift=2)
g4 = core.ModelGrid(*simple_nodes).update_cell_mask()
nptest.assert_array_equal(g3.xn, g4.xn)
nptest.assert_array_equal(g3.xc, g4.xc)
@pytest.mark.mpl_image_compare(baseline_dir=BASELINE_IMAGES, tolerance=15)
def test_ModelGrid_merge_with_mask(simple_nodes):
mg1 = core.ModelGrid(*simple_nodes).update_cell_mask()
mg2 = (
mg1.transform_x(lambda x: x + 1)
.transform_y(lambda y: y + 5)
.update_cell_mask(mask=mg1.cell_mask)
)
merged = mg1.merge(mg2, where='+', shift=1, min_nodes=1)
expected = numpy.array([
[0, 0, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1],
[0, 0, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1],
[1, 0, 1, 1, 1, 1, 1],
[1, 0, 0, 1, 1, 1, 1],
[1, 0, 0, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 1, 1, 1, 1],
[1, 0, 0, 1, 1, 1, 1],
[1, 0, 0, 1, 1, 1, 1],
[1, 0, 0, 1, 1, 1, 1]
]).astype(bool)
nptest.assert_array_equal(merged.cell_mask, expected)
fig, artists = merged.plot_cells()
return fig
def test_ModelGrid_insert_3_ax0(mg):
known_xnodes = numpy.ma.masked_invalid(numpy.array([
[1.0, 1.5, 2.0, nan, nan, nan, nan],
[1.0, 1.5, 2.0, nan, nan, nan, nan],
[1.0, 1.5, 2.0, nan, nan, nan, nan],
[1.0, 1.5, 2.0, nan, nan, nan, nan],
[1.0, 1.5, 2.0, nan, nan, nan, nan],
[1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0],
[1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0],
[1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0],
[1.0, 1.5, 2.0, nan, nan, nan, nan],
[1.0, 1.5, 2.0, nan, nan, nan, nan],
[1.0, 1.5, 2.0, nan, nan, nan, nan],
[1.0, 1.5, 2.0, nan, nan, nan, nan],
]))
known_ynodes = numpy.ma.masked_invalid(numpy.array([
[0.000, 0.000, 0.000, nan, nan, nan, nan],
[0.500, 0.500, 0.500, nan, nan, nan, nan],
[0.625, 0.625, 0.625, nan, nan, nan, nan],
[0.750, 0.750, 0.750, nan, nan, nan, nan],
[0.875, 0.875, 0.875, nan, nan, nan, nan],
[1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000],
[1.500, 1.500, 1.500, 1.500, 1.500, 1.500, 1.500],
[2.000, 2.000, 2.000, 2.000, 2.000, 2.000, 2.000],
[2.500, 2.500, 2.500, nan, nan, nan, nan],
[3.000, 3.000, 3.000, nan, nan, nan, nan],
[3.500, 3.500, 3.500, nan, nan, nan, nan],
[4.000, 4.000, 4.000, nan, nan, nan, nan],
]))
result = mg.insert(2, axis=0, n_nodes=3)
nptest.assert_array_equal(result.nodes_x, known_xnodes)
nptest.assert_array_equal(result.nodes_y, known_ynodes)
def test_ModelGrid_insert_3_ax1(mg):
known_xnodes = numpy.ma.masked_invalid(numpy.array([
[1.000, 1.500, 1.625, 1.750, 1.875, 2.000, nan, nan, nan, nan],
[1.000, 1.500, 1.625, 1.750, 1.875, 2.000, nan, nan, nan, nan],
[1.000, 1.500, 1.625, 1.750, 1.875, 2.000, 2.500, 3.000, 3.500, 4.000],
[1.000, 1.500, 1.625, 1.750, 1.875, 2.000, 2.500, 3.000, 3.500, 4.000],
[1.000, 1.500, 1.625, 1.750, 1.875, 2.000, 2.500, 3.000, 3.500, 4.000],
[1.000, 1.500, 1.625, 1.750, 1.875, 2.000, nan, nan, nan, nan],
[1.000, 1.500, 1.625, 1.750, 1.875, 2.000, nan, nan, nan, nan],
[1.000, 1.500, 1.625, 1.750, 1.875, 2.000, nan, nan, nan, nan],
[1.000, 1.500, 1.625, 1.750, 1.875, 2.000, nan, nan, nan, nan]
]))
known_ynodes = numpy.ma.masked_invalid(numpy.array([
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan, nan, nan, nan],
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, nan, nan, nan, nan],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5],
[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0],
[2.5, 2.5, 2.5, 2.5, 2.5, 2.5, nan, nan, nan, nan],
[3.0, 3.0, 3.0, 3.0, 3.0, 3.0, nan, nan, nan, nan],
[3.5, 3.5, 3.5, 3.5, 3.5, 3.5, nan, nan, nan, nan],
[4.0, 4.0, 4.0, 4.0, 4.0, 4.0, nan, nan, nan, nan],
]))
result = mg.insert(2, axis=1, n_nodes=3)
nptest.assert_array_equal(result.nodes_x, known_xnodes)
nptest.assert_array_equal(result.nodes_y, known_ynodes)
def test_extract(mg, simple_nodes):
xn, yn = simple_nodes
result = mg.extract(jstart=2, jend=5, istart=3, iend=6)
nptest.assert_array_equal(result.nodes_x, xn[2:5, 3:6])
nptest.assert_array_equal(result.nodes_y, yn[2:5, 3:6])
@pytest.mark.parametrize(('where', 'use_existing'), [
('inside', False),
('inside', True),
('outside', False)
])
def test_ModelGrid_mask_centroids(mg, polyverts, where, use_existing):
expected = {
('inside', False): numpy.array([
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0],
[0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]
]),
('inside', True): numpy.array([
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 0],
[0, 0, 0, 1, 1, 0],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1]
]),
('outside', False): numpy.array([
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 0, 0, 1],
[1, 1, 1, 0, 0, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1]
])
}
result = mg.mask_centroids(**{where: polyverts}, use_existing=use_existing)
nptest.assert_array_equal(
result.cell_mask.astype(int),
expected[(where, use_existing)].astype(int)
)
@pytest.mark.parametrize(('kwargs', 'error'), [
[dict(min_nodes=0), ValueError],
[dict(min_nodes=5), ValueError],
[dict(triangles=True), NotImplementedError],
])
def test_ModelGrid_mask_nodes_errors(mg, polyverts, kwargs, error):
with raises(error):
mg.mask_nodes(inside=polyverts, **kwargs)
def test_masks_no_polys(mg):
with raises(ValueError):
mg.mask_nodes()
with raises(ValueError):
mg.mask_centroids()
def test_ModelGrid_to_point_geodataframe(g1):
expectedfile = resource_filename('pygridtools.tests.baseline_files', 'mgshp_nomask_nodes_points.shp')
expected = geopandas.read_file(expectedfile)
result = g1.to_point_geodataframe(which='nodes', usemask=False)
utils.assert_gdfs_equal(expected.drop(columns=['river', 'reach']), result)
@pytest.mark.xfail
@pytest.mark.parametrize('usemask', [True, False])
def test_ModelGrid_to_gis_cells(g1, usemask):
expectedfile = {
True: 'mgshp_mask_cells_polys.shp',
False: 'mgshp_nomask_cells_polys.shp',
}
expectedfile = resource_filename('pygridtools.tests.baseline_files',
expectedfile[usemask])
expected = geopandas.read_file(expectedfile)
result = g1.to_polygon_geodataframe(usemask=usemask)
utils.assert_gdfs_equal(expected.drop(columns=['river', 'reach']), result)
@pytest.mark.parametrize(('which', 'usemask', 'error'), [
('nodes', True, ValueError),
('junk', False, ValueError),
('nodes', False, None),
('cells', False, None),
])
def test_ModelGrid__get_x_y_nodes_and_mask(g1, which, usemask, error):
if error:
with raises(error):
g1._get_x_y(which, usemask=usemask)
else:
x, y = g1._get_x_y(which, usemask=usemask)
nptest.assert_array_equal(x, getattr(g1, 'x' + which[0]))
nptest.assert_array_equal(y, getattr(g1, 'y' + which[0]))
@pytest.mark.mpl_image_compare(baseline_dir=BASELINE_IMAGES, tolerance=15)
def test_ModelGrid_plots_basic(simple_nodes):
mg = core.ModelGrid(*simple_nodes)
mg.cell_mask = numpy.ma.masked_invalid(mg.xc).mask
fig, artists = mg.plot_cells()
return fig
@pytest.mark.mpl_image_compare(baseline_dir=BASELINE_IMAGES, tolerance=15)
def test_ModelGrid_plots_masked(river_grid, river_bathy):
fig, artists = river_grid.plot_cells(cell_kws=dict(colors=river_bathy, cmap='Reds_r'))
return fig
@pytest.mark.parametrize(('otherargs', 'gridtype'), [
(dict(), None),
(dict(verbose=True), None),
(dict(rawgrid=False), core.ModelGrid)
])
@pytest.mark.skipif(not HASPGG, reason='pygridgen unavailabile')
def test_make_grid(simple_boundary_gdf, otherargs, gridtype):
if not gridtype:
gridtype = pygridgen.Gridgen
gridparams = {'nnodes': 12, 'verbose': False, 'ul_idx': 0}
gridparams.update(otherargs)
grid = core.make_grid(9, 7, domain=simple_boundary_gdf, **gridparams)
assert (isinstance(grid, gridtype))
| bsd-3-clause |
bwsblake/lettercounter | django-norel-env/lib/python2.7/site-packages/django/test/testcases.py | 47 | 46941 | from __future__ import unicode_literals
import difflib
import json
import os
import re
import socket
import sys
from copy import copy
from functools import wraps
try:
from urllib.parse import urlsplit, urlunsplit
except ImportError: # Python 2
from urlparse import urlsplit, urlunsplit
import select
import socket
import threading
import errno
from django.conf import settings
from django.contrib.staticfiles.handlers import StaticFilesHandler
from django.core import mail
from django.core.exceptions import ValidationError, ImproperlyConfigured
from django.core.handlers.wsgi import WSGIHandler
from django.core.management import call_command
from django.core.management.color import no_style
from django.core.signals import request_started
from django.core.servers.basehttp import WSGIRequestHandler, WSGIServer
from django.core.urlresolvers import clear_url_caches
from django.core.validators import EMPTY_VALUES
from django.db import (transaction, connection, connections, DEFAULT_DB_ALIAS,
reset_queries)
from django.forms.fields import CharField
from django.http import QueryDict
from django.test import _doctest as doctest
from django.test.client import Client
from django.test.html import HTMLParseError, parse_html
from django.test.signals import template_rendered
from django.test.utils import (get_warnings_state, restore_warnings_state,
override_settings, compare_xml, strip_quotes)
from django.test.utils import ContextList
from django.utils import unittest as ut2
from django.utils.encoding import force_text
from django.utils import six
from django.utils.unittest.util import safe_repr
from django.utils.unittest import skipIf
from django.views.static import serve
__all__ = ('DocTestRunner', 'OutputChecker', 'TestCase', 'TransactionTestCase',
'SimpleTestCase', 'skipIfDBFeature', 'skipUnlessDBFeature')
normalize_long_ints = lambda s: re.sub(r'(?<![\w])(\d+)L(?![\w])', '\\1', s)
normalize_decimals = lambda s: re.sub(r"Decimal\('(\d+(\.\d*)?)'\)",
lambda m: "Decimal(\"%s\")" % m.groups()[0], s)
def to_list(value):
"""
Puts value into a list if it's not already one.
Returns an empty list if value is None.
"""
if value is None:
value = []
elif not isinstance(value, list):
value = [value]
return value
real_commit = transaction.commit
real_rollback = transaction.rollback
real_enter_transaction_management = transaction.enter_transaction_management
real_leave_transaction_management = transaction.leave_transaction_management
real_managed = transaction.managed
real_abort = transaction.abort
def nop(*args, **kwargs):
return
def disable_transaction_methods():
transaction.commit = nop
transaction.rollback = nop
transaction.enter_transaction_management = nop
transaction.leave_transaction_management = nop
transaction.managed = nop
transaction.abort = nop
def restore_transaction_methods():
transaction.commit = real_commit
transaction.rollback = real_rollback
transaction.enter_transaction_management = real_enter_transaction_management
transaction.leave_transaction_management = real_leave_transaction_management
transaction.managed = real_managed
transaction.abort = real_abort
def assert_and_parse_html(self, html, user_msg, msg):
try:
dom = parse_html(html)
except HTMLParseError as e:
standardMsg = '%s\n%s' % (msg, e.msg)
self.fail(self._formatMessage(user_msg, standardMsg))
return dom
class OutputChecker(doctest.OutputChecker):
def check_output(self, want, got, optionflags):
"""
The entry method for doctest output checking. Defers to a sequence of
child checkers
"""
checks = (self.check_output_default,
self.check_output_numeric,
self.check_output_xml,
self.check_output_json)
for check in checks:
if check(want, got, optionflags):
return True
return False
def check_output_default(self, want, got, optionflags):
"""
The default comparator provided by doctest - not perfect, but good for
most purposes
"""
return doctest.OutputChecker.check_output(self, want, got, optionflags)
def check_output_numeric(self, want, got, optionflags):
"""Doctest does an exact string comparison of output, which means that
some numerically equivalent values aren't equal. This check normalizes
* long integers (22L) so that they equal normal integers. (22)
* Decimals so that they are comparable, regardless of the change
made to __repr__ in Python 2.6.
"""
return doctest.OutputChecker.check_output(self,
normalize_decimals(normalize_long_ints(want)),
normalize_decimals(normalize_long_ints(got)),
optionflags)
def check_output_xml(self, want, got, optionsflags):
try:
return compare_xml(want, got)
except Exception:
return False
def check_output_json(self, want, got, optionsflags):
"""
Tries to compare want and got as if they were JSON-encoded data
"""
want, got = strip_quotes(want, got)
try:
want_json = json.loads(want)
got_json = json.loads(got)
except Exception:
return False
return want_json == got_json
class DocTestRunner(doctest.DocTestRunner):
def __init__(self, *args, **kwargs):
doctest.DocTestRunner.__init__(self, *args, **kwargs)
self.optionflags = doctest.ELLIPSIS
def report_unexpected_exception(self, out, test, example, exc_info):
doctest.DocTestRunner.report_unexpected_exception(self, out, test,
example, exc_info)
# Rollback, in case of database errors. Otherwise they'd have
# side effects on other tests.
for conn in connections:
transaction.rollback_unless_managed(using=conn)
class _AssertNumQueriesContext(object):
def __init__(self, test_case, num, connection):
self.test_case = test_case
self.num = num
self.connection = connection
def __enter__(self):
self.old_debug_cursor = self.connection.use_debug_cursor
self.connection.use_debug_cursor = True
self.starting_queries = len(self.connection.queries)
request_started.disconnect(reset_queries)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.connection.use_debug_cursor = self.old_debug_cursor
request_started.connect(reset_queries)
if exc_type is not None:
return
final_queries = len(self.connection.queries)
executed = final_queries - self.starting_queries
self.test_case.assertEqual(
executed, self.num, "%d queries executed, %d expected" % (
executed, self.num
)
)
class _AssertTemplateUsedContext(object):
def __init__(self, test_case, template_name):
self.test_case = test_case
self.template_name = template_name
self.rendered_templates = []
self.rendered_template_names = []
self.context = ContextList()
def on_template_render(self, sender, signal, template, context, **kwargs):
self.rendered_templates.append(template)
self.rendered_template_names.append(template.name)
self.context.append(copy(context))
def test(self):
return self.template_name in self.rendered_template_names
def message(self):
return '%s was not rendered.' % self.template_name
def __enter__(self):
template_rendered.connect(self.on_template_render)
return self
def __exit__(self, exc_type, exc_value, traceback):
template_rendered.disconnect(self.on_template_render)
if exc_type is not None:
return
if not self.test():
message = self.message()
if len(self.rendered_templates) == 0:
message += ' No template was rendered.'
else:
message += ' Following templates were rendered: %s' % (
', '.join(self.rendered_template_names))
self.test_case.fail(message)
class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext):
def test(self):
return self.template_name not in self.rendered_template_names
def message(self):
return '%s was rendered.' % self.template_name
class SimpleTestCase(ut2.TestCase):
def __call__(self, result=None):
"""
Wrapper around default __call__ method to perform common Django test
set up. This means that user-defined Test Cases aren't required to
include a call to super().setUp().
"""
testMethod = getattr(self, self._testMethodName)
skipped = (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False))
if not skipped:
try:
self._pre_setup()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
result.addError(self, sys.exc_info())
return
super(SimpleTestCase, self).__call__(result)
if not skipped:
try:
self._post_teardown()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
result.addError(self, sys.exc_info())
return
def _pre_setup(self):
pass
def _post_teardown(self):
pass
def save_warnings_state(self):
"""
Saves the state of the warnings module
"""
self._warnings_state = get_warnings_state()
def restore_warnings_state(self):
"""
Restores the state of the warnings module to the state
saved by save_warnings_state()
"""
restore_warnings_state(self._warnings_state)
def settings(self, **kwargs):
"""
A context manager that temporarily sets a setting and reverts
back to the original value when exiting the context.
"""
return override_settings(**kwargs)
def assertRaisesMessage(self, expected_exception, expected_message,
callable_obj=None, *args, **kwargs):
"""
Asserts that the message in a raised exception matches the passed
value.
Args:
expected_exception: Exception class expected to be raised.
expected_message: expected error message string value.
callable_obj: Function to be called.
args: Extra args.
kwargs: Extra kwargs.
"""
return six.assertRaisesRegex(self, expected_exception,
re.escape(expected_message), callable_obj, *args, **kwargs)
def assertFieldOutput(self, fieldclass, valid, invalid, field_args=None,
field_kwargs=None, empty_value=''):
"""
Asserts that a form field behaves correctly with various inputs.
Args:
fieldclass: the class of the field to be tested.
valid: a dictionary mapping valid inputs to their expected
cleaned values.
invalid: a dictionary mapping invalid inputs to one or more
raised error messages.
field_args: the args passed to instantiate the field
field_kwargs: the kwargs passed to instantiate the field
empty_value: the expected clean output for inputs in EMPTY_VALUES
"""
if field_args is None:
field_args = []
if field_kwargs is None:
field_kwargs = {}
required = fieldclass(*field_args, **field_kwargs)
optional = fieldclass(*field_args,
**dict(field_kwargs, required=False))
# test valid inputs
for input, output in valid.items():
self.assertEqual(required.clean(input), output)
self.assertEqual(optional.clean(input), output)
# test invalid inputs
for input, errors in invalid.items():
with self.assertRaises(ValidationError) as context_manager:
required.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
with self.assertRaises(ValidationError) as context_manager:
optional.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
# test required inputs
error_required = [force_text(required.error_messages['required'])]
for e in EMPTY_VALUES:
with self.assertRaises(ValidationError) as context_manager:
required.clean(e)
self.assertEqual(context_manager.exception.messages,
error_required)
self.assertEqual(optional.clean(e), empty_value)
# test that max_length and min_length are always accepted
if issubclass(fieldclass, CharField):
field_kwargs.update({'min_length':2, 'max_length':20})
self.assertTrue(isinstance(fieldclass(*field_args, **field_kwargs),
fieldclass))
def assertHTMLEqual(self, html1, html2, msg=None):
"""
Asserts that two HTML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid HTML.
"""
dom1 = assert_and_parse_html(self, html1, msg,
'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg,
'Second argument is not valid HTML:')
if dom1 != dom2:
standardMsg = '%s != %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
six.text_type(dom1).splitlines(),
six.text_type(dom2).splitlines())))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertHTMLNotEqual(self, html1, html2, msg=None):
"""Asserts that two HTML snippets are not semantically equivalent."""
dom1 = assert_and_parse_html(self, html1, msg,
'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg,
'Second argument is not valid HTML:')
if dom1 == dom2:
standardMsg = '%s == %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
self.fail(self._formatMessage(msg, standardMsg))
def assertInHTML(self, needle, haystack, count = None, msg_prefix=''):
needle = assert_and_parse_html(self, needle, None,
'First argument is not valid HTML:')
haystack = assert_and_parse_html(self, haystack, None,
'Second argument is not valid HTML:')
real_count = haystack.count(needle)
if count is not None:
self.assertEqual(real_count, count,
msg_prefix + "Found %d instances of '%s' in response"
" (expected %d)" % (real_count, needle, count))
else:
self.assertTrue(real_count != 0,
msg_prefix + "Couldn't find '%s' in response" % needle)
def assertJSONEqual(self, raw, expected_data, msg=None):
try:
data = json.loads(raw)
except ValueError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, six.string_types):
try:
expected_data = json.loads(expected_data)
except ValueError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertEqual(data, expected_data, msg=msg)
def assertXMLEqual(self, xml1, xml2, msg=None):
"""
Asserts that two XML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if not result:
standardMsg = '%s != %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
self.fail(self._formatMessage(msg, standardMsg))
def assertXMLNotEqual(self, xml1, xml2, msg=None):
"""
Asserts that two XML snippets are not semantically equivalent.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if result:
standardMsg = '%s == %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
self.fail(self._formatMessage(msg, standardMsg))
class TransactionTestCase(SimpleTestCase):
# The class we'll use for the test client self.client.
# Can be overridden in derived classes.
client_class = Client
# Subclasses can ask for resetting of auto increment sequence before each
# test case
reset_sequences = False
def _pre_setup(self):
"""Performs any pre-test setup. This includes:
* Flushing the database.
* If the Test Case class has a 'fixtures' member, installing the
named fixtures.
* If the Test Case class has a 'urls' member, replace the
ROOT_URLCONF with it.
* Clearing the mail test outbox.
"""
self.client = self.client_class()
self._fixture_setup()
self._urlconf_setup()
mail.outbox = []
def _databases_names(self, include_mirrors=True):
# If the test case has a multi_db=True flag, act on all databases,
# including mirrors or not. Otherwise, just on the default DB.
if getattr(self, 'multi_db', False):
return [alias for alias in connections
if include_mirrors or not connections[alias].settings_dict['TEST_MIRROR']]
else:
return [DEFAULT_DB_ALIAS]
def _reset_sequences(self, db_name):
conn = connections[db_name]
if conn.features.supports_sequence_reset:
sql_list = \
conn.ops.sequence_reset_by_name_sql(no_style(),
conn.introspection.sequence_list())
if sql_list:
try:
cursor = conn.cursor()
for sql in sql_list:
cursor.execute(sql)
except Exception:
transaction.rollback_unless_managed(using=db_name)
raise
transaction.commit_unless_managed(using=db_name)
def _fixture_setup(self):
for db_name in self._databases_names(include_mirrors=False):
# Reset sequences
if self.reset_sequences:
self._reset_sequences(db_name)
if hasattr(self, 'fixtures'):
# We have to use this slightly awkward syntax due to the fact
# that we're using *args and **kwargs together.
call_command('loaddata', *self.fixtures,
**{'verbosity': 0, 'database': db_name, 'skip_validation': True})
def _urlconf_setup(self):
if hasattr(self, 'urls'):
self._old_root_urlconf = settings.ROOT_URLCONF
settings.ROOT_URLCONF = self.urls
clear_url_caches()
def _post_teardown(self):
""" Performs any post-test things. This includes:
* Putting back the original ROOT_URLCONF if it was changed.
* Force closing the connection, so that the next test gets
a clean cursor.
"""
self._fixture_teardown()
self._urlconf_teardown()
# Some DB cursors include SQL statements as part of cursor
# creation. If you have a test that does rollback, the effect
# of these statements is lost, which can effect the operation
# of tests (e.g., losing a timezone setting causing objects to
# be created with the wrong time).
# To make sure this doesn't happen, get a clean connection at the
# start of every test.
for conn in connections.all():
conn.close()
def _fixture_teardown(self):
# Roll back any pending transactions in order to avoid a deadlock
# during flush when TEST_MIRROR is used (#18984).
for conn in connections.all():
conn.rollback_unless_managed()
for db in self._databases_names(include_mirrors=False):
call_command('flush', verbosity=0, interactive=False, database=db,
skip_validation=True, reset_sequences=False)
def _urlconf_teardown(self):
if hasattr(self, '_old_root_urlconf'):
settings.ROOT_URLCONF = self._old_root_urlconf
clear_url_caches()
def assertRedirects(self, response, expected_url, status_code=302,
target_status_code=200, host=None, msg_prefix=''):
"""Asserts that a response redirected to a specific URL, and that the
redirect URL can be loaded.
Note that assertRedirects won't work for external links since it uses
TestClient to do a request.
"""
if msg_prefix:
msg_prefix += ": "
if hasattr(response, 'redirect_chain'):
# The request was a followed redirect
self.assertTrue(len(response.redirect_chain) > 0,
msg_prefix + "Response didn't redirect as expected: Response"
" code was %d (expected %d)" %
(response.status_code, status_code))
self.assertEqual(response.redirect_chain[0][1], status_code,
msg_prefix + "Initial response didn't redirect as expected:"
" Response code was %d (expected %d)" %
(response.redirect_chain[0][1], status_code))
url, status_code = response.redirect_chain[-1]
self.assertEqual(response.status_code, target_status_code,
msg_prefix + "Response didn't redirect as expected: Final"
" Response code was %d (expected %d)" %
(response.status_code, target_status_code))
else:
# Not a followed redirect
self.assertEqual(response.status_code, status_code,
msg_prefix + "Response didn't redirect as expected: Response"
" code was %d (expected %d)" %
(response.status_code, status_code))
url = response['Location']
scheme, netloc, path, query, fragment = urlsplit(url)
redirect_response = response.client.get(path, QueryDict(query))
# Get the redirection page, using the same client that was used
# to obtain the original response.
self.assertEqual(redirect_response.status_code, target_status_code,
msg_prefix + "Couldn't retrieve redirection page '%s':"
" response code was %d (expected %d)" %
(path, redirect_response.status_code, target_status_code))
e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(
expected_url)
if not (e_scheme or e_netloc):
expected_url = urlunsplit(('http', host or 'testserver', e_path,
e_query, e_fragment))
self.assertEqual(url, expected_url,
msg_prefix + "Response redirected to '%s', expected '%s'" %
(url, expected_url))
def assertContains(self, response, text, count=None, status_code=200,
msg_prefix='', html=False):
"""
Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` occurs ``count`` times in the content of the response.
If ``count`` is None, the count doesn't matter - the assertion is true
if the text occurs at least once in the response.
"""
# If the response supports deferred rendering and hasn't been rendered
# yet, then ensure that it does get rendered before proceeding further.
if (hasattr(response, 'render') and callable(response.render)
and not response.is_rendered):
response.render()
if msg_prefix:
msg_prefix += ": "
self.assertEqual(response.status_code, status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code))
if response.streaming:
content = b''.join(response.streaming_content)
else:
content = response.content
if not isinstance(text, bytes) or html:
text = force_text(text, encoding=response._charset)
content = content.decode(response._charset)
text_repr = "'%s'" % text
else:
text_repr = repr(text)
if html:
content = assert_and_parse_html(self, content, None,
"Response's content is not valid HTML:")
text = assert_and_parse_html(self, text, None,
"Second argument is not valid HTML:")
real_count = content.count(text)
if count is not None:
self.assertEqual(real_count, count,
msg_prefix + "Found %d instances of %s in response"
" (expected %d)" % (real_count, text_repr, count))
else:
self.assertTrue(real_count != 0,
msg_prefix + "Couldn't find %s in response" % text_repr)
def assertNotContains(self, response, text, status_code=200,
msg_prefix='', html=False):
"""
Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` doesn't occurs in the content of the response.
"""
# If the response supports deferred rendering and hasn't been rendered
# yet, then ensure that it does get rendered before proceeding further.
if (hasattr(response, 'render') and callable(response.render)
and not response.is_rendered):
response.render()
if msg_prefix:
msg_prefix += ": "
self.assertEqual(response.status_code, status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code))
content = response.content
if not isinstance(text, bytes) or html:
text = force_text(text, encoding=response._charset)
content = content.decode(response._charset)
text_repr = "'%s'" % text
else:
text_repr = repr(text)
if html:
content = assert_and_parse_html(self, content, None,
'Response\'s content is not valid HTML:')
text = assert_and_parse_html(self, text, None,
'Second argument is not valid HTML:')
self.assertEqual(content.count(text), 0,
msg_prefix + "Response should not contain %s" % text_repr)
def assertFormError(self, response, form, field, errors, msg_prefix=''):
"""
Asserts that a form used to render the response has a specific field
error.
"""
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + "Response did not use any contexts to "
"render the response")
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_form = False
for i,context in enumerate(contexts):
if form not in context:
continue
found_form = True
for err in errors:
if field:
if field in context[form].errors:
field_errors = context[form].errors[field]
self.assertTrue(err in field_errors,
msg_prefix + "The field '%s' on form '%s' in"
" context %d does not contain the error '%s'"
" (actual errors: %s)" %
(field, form, i, err, repr(field_errors)))
elif field in context[form].fields:
self.fail(msg_prefix + "The field '%s' on form '%s'"
" in context %d contains no errors" %
(field, form, i))
else:
self.fail(msg_prefix + "The form '%s' in context %d"
" does not contain the field '%s'" %
(form, i, field))
else:
non_field_errors = context[form].non_field_errors()
self.assertTrue(err in non_field_errors,
msg_prefix + "The form '%s' in context %d does not"
" contain the non-field error '%s'"
" (actual errors: %s)" %
(form, i, err, non_field_errors))
if not found_form:
self.fail(msg_prefix + "The form '%s' was not used to render the"
" response" % form)
def assertTemplateUsed(self, response=None, template_name=None, msg_prefix=''):
"""
Asserts that the template with the provided name was used in rendering
the response. Also usable as context manager.
"""
if response is None and template_name is None:
raise TypeError('response and/or template_name argument must be provided')
if msg_prefix:
msg_prefix += ": "
# Use assertTemplateUsed as context manager.
if not hasattr(response, 'templates') or (response is None and template_name):
if response:
template_name = response
response = None
context = _AssertTemplateUsedContext(self, template_name)
return context
template_names = [t.name for t in response.templates]
if not template_names:
self.fail(msg_prefix + "No templates used to render the response")
self.assertTrue(template_name in template_names,
msg_prefix + "Template '%s' was not a template used to render"
" the response. Actual template(s) used: %s" %
(template_name, ', '.join(template_names)))
def assertTemplateNotUsed(self, response=None, template_name=None, msg_prefix=''):
"""
Asserts that the template with the provided name was NOT used in
rendering the response. Also usable as context manager.
"""
if response is None and template_name is None:
raise TypeError('response and/or template_name argument must be provided')
if msg_prefix:
msg_prefix += ": "
# Use assertTemplateUsed as context manager.
if not hasattr(response, 'templates') or (response is None and template_name):
if response:
template_name = response
response = None
context = _AssertTemplateNotUsedContext(self, template_name)
return context
template_names = [t.name for t in response.templates]
self.assertFalse(template_name in template_names,
msg_prefix + "Template '%s' was used unexpectedly in rendering"
" the response" % template_name)
def assertQuerysetEqual(self, qs, values, transform=repr, ordered=True):
items = six.moves.map(transform, qs)
if not ordered:
return self.assertEqual(set(items), set(values))
return self.assertEqual(list(items), values)
def assertNumQueries(self, num, func=None, *args, **kwargs):
using = kwargs.pop("using", DEFAULT_DB_ALIAS)
conn = connections[using]
context = _AssertNumQueriesContext(self, num, conn)
if func is None:
return context
with context:
func(*args, **kwargs)
def connections_support_transactions():
"""
Returns True if all connections support transactions.
"""
return all(conn.features.supports_transactions
for conn in connections.all())
class TestCase(TransactionTestCase):
"""
Does basically the same as TransactionTestCase, but surrounds every test
with a transaction, monkey-patches the real transaction management routines
to do nothing, and rollsback the test transaction at the end of the test.
You have to use TransactionTestCase, if you need transaction management
inside a test.
"""
def _fixture_setup(self):
if not connections_support_transactions():
return super(TestCase, self)._fixture_setup()
assert not self.reset_sequences, 'reset_sequences cannot be used on TestCase instances'
for db_name in self._databases_names():
transaction.enter_transaction_management(using=db_name)
transaction.managed(True, using=db_name)
disable_transaction_methods()
from django.contrib.sites.models import Site
Site.objects.clear_cache()
for db in self._databases_names(include_mirrors=False):
if hasattr(self, 'fixtures'):
call_command('loaddata', *self.fixtures,
**{
'verbosity': 0,
'commit': False,
'database': db,
'skip_validation': True,
})
def _fixture_teardown(self):
if not connections_support_transactions():
return super(TestCase, self)._fixture_teardown()
restore_transaction_methods()
for db in self._databases_names():
transaction.rollback(using=db)
transaction.leave_transaction_management(using=db)
def _deferredSkip(condition, reason):
def decorator(test_func):
if not (isinstance(test_func, type) and
issubclass(test_func, TestCase)):
@wraps(test_func)
def skip_wrapper(*args, **kwargs):
if condition():
raise ut2.SkipTest(reason)
return test_func(*args, **kwargs)
test_item = skip_wrapper
else:
test_item = test_func
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIfDBFeature(feature):
"""
Skip a test if a database has the named feature
"""
return _deferredSkip(lambda: getattr(connection.features, feature),
"Database has feature %s" % feature)
def skipUnlessDBFeature(feature):
"""
Skip a test unless a database has the named feature
"""
return _deferredSkip(lambda: not getattr(connection.features, feature),
"Database doesn't support feature %s" % feature)
class QuietWSGIRequestHandler(WSGIRequestHandler):
"""
Just a regular WSGIRequestHandler except it doesn't log to the standard
output any of the requests received, so as to not clutter the output for
the tests' results.
"""
def log_message(*args):
pass
if sys.version_info >= (3, 3, 0):
_ImprovedEvent = threading.Event
elif sys.version_info >= (2, 7, 0):
_ImprovedEvent = threading._Event
else:
class _ImprovedEvent(threading._Event):
"""
Does the same as `threading.Event` except it overrides the wait() method
with some code borrowed from Python 2.7 to return the set state of the
event (see: http://hg.python.org/cpython/rev/b5aa8aa78c0f/). This allows
to know whether the wait() method exited normally or because of the
timeout. This class can be removed when Django supports only Python >= 2.7.
"""
def wait(self, timeout=None):
self._Event__cond.acquire()
try:
if not self._Event__flag:
self._Event__cond.wait(timeout)
return self._Event__flag
finally:
self._Event__cond.release()
class StoppableWSGIServer(WSGIServer):
"""
The code in this class is borrowed from the `SocketServer.BaseServer` class
in Python 2.6. The important functionality here is that the server is non-
blocking and that it can be shut down at any moment. This is made possible
by the server regularly polling the socket and checking if it has been
asked to stop.
Note for the future: Once Django stops supporting Python 2.6, this class
can be removed as `WSGIServer` will have this ability to shutdown on
demand and will not require the use of the _ImprovedEvent class whose code
is borrowed from Python 2.7.
"""
def __init__(self, *args, **kwargs):
super(StoppableWSGIServer, self).__init__(*args, **kwargs)
self.__is_shut_down = _ImprovedEvent()
self.__serving = False
def serve_forever(self, poll_interval=0.5):
"""
Handle one request at a time until shutdown.
Polls for shutdown every poll_interval seconds.
"""
self.__serving = True
self.__is_shut_down.clear()
while self.__serving:
r, w, e = select.select([self], [], [], poll_interval)
if r:
self._handle_request_noblock()
self.__is_shut_down.set()
def shutdown(self):
"""
Stops the serve_forever loop.
Blocks until the loop has finished. This must be called while
serve_forever() is running in another thread, or it will
deadlock.
"""
self.__serving = False
if not self.__is_shut_down.wait(2):
raise RuntimeError(
"Failed to shutdown the live test server in 2 seconds. The "
"server might be stuck or generating a slow response.")
def handle_request(self):
"""Handle one request, possibly blocking.
"""
fd_sets = select.select([self], [], [], None)
if not fd_sets[0]:
return
self._handle_request_noblock()
def _handle_request_noblock(self):
"""
Handle one request, without blocking.
I assume that select.select has returned that the socket is
readable before this function was called, so there should be
no risk of blocking in get_request().
"""
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
try:
self.process_request(request, client_address)
except Exception:
self.handle_error(request, client_address)
self.close_request(request)
class _MediaFilesHandler(StaticFilesHandler):
"""
Handler for serving the media files. This is a private class that is
meant to be used solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.MEDIA_ROOT
def get_base_url(self):
return settings.MEDIA_URL
def serve(self, request):
relative_url = request.path[len(self.base_url[2]):]
return serve(request, relative_url, document_root=self.get_base_dir())
class LiveServerThread(threading.Thread):
"""
Thread for running a live http server while the tests are running.
"""
def __init__(self, host, possible_ports, connections_override=None):
self.host = host
self.port = None
self.possible_ports = possible_ports
self.is_ready = threading.Event()
self.error = None
self.connections_override = connections_override
super(LiveServerThread, self).__init__()
def run(self):
"""
Sets up the live server and databases, and then loops over handling
http requests.
"""
if self.connections_override:
from django.db import connections
# Override this thread's database connections with the ones
# provided by the main thread.
for alias, conn in self.connections_override.items():
connections[alias] = conn
try:
# Create the handler for serving static and media files
handler = StaticFilesHandler(_MediaFilesHandler(WSGIHandler()))
# Go through the list of possible ports, hoping that we can find
# one that is free to use for the WSGI server.
for index, port in enumerate(self.possible_ports):
try:
self.httpd = StoppableWSGIServer(
(self.host, port), QuietWSGIRequestHandler)
except socket.error as e:
if (index + 1 < len(self.possible_ports) and
e.errno == errno.EADDRINUSE):
# This port is already in use, so we go on and try with
# the next one in the list.
continue
else:
# Either none of the given ports are free or the error
# is something else than "Address already in use". So
# we let that error bubble up to the main thread.
raise
else:
# A free port was found.
self.port = port
break
self.httpd.set_app(handler)
self.is_ready.set()
self.httpd.serve_forever()
except Exception as e:
self.error = e
self.is_ready.set()
def join(self, timeout=None):
if hasattr(self, 'httpd'):
# Stop the WSGI server
self.httpd.shutdown()
self.httpd.server_close()
super(LiveServerThread, self).join(timeout)
class LiveServerTestCase(TransactionTestCase):
"""
Does basically the same as TransactionTestCase but also launches a live
http server in a separate thread so that the tests may use another testing
framework, such as Selenium for example, instead of the built-in dummy
client.
Note that it inherits from TransactionTestCase instead of TestCase because
the threads do not share the same transactions (unless if using in-memory
sqlite) and each thread needs to commit all their transactions so that the
other thread can see the changes.
"""
@property
def live_server_url(self):
return 'http://%s:%s' % (
self.server_thread.host, self.server_thread.port)
@classmethod
def setUpClass(cls):
connections_override = {}
for conn in connections.all():
# If using in-memory sqlite databases, pass the connections to
# the server thread.
if (conn.settings_dict['ENGINE'].rsplit('.', 1)[-1] in ('sqlite3', 'spatialite')
and conn.settings_dict['NAME'] == ':memory:'):
# Explicitly enable thread-shareability for this connection
conn.allow_thread_sharing = True
connections_override[conn.alias] = conn
# Launch the live server's thread
specified_address = os.environ.get(
'DJANGO_LIVE_TEST_SERVER_ADDRESS', 'localhost:8081')
# The specified ports may be of the form '8000-8010,8080,9200-9300'
# i.e. a comma-separated list of ports or ranges of ports, so we break
# it down into a detailed list of all possible ports.
possible_ports = []
try:
host, port_ranges = specified_address.split(':')
for port_range in port_ranges.split(','):
# A port range can be of either form: '8000' or '8000-8010'.
extremes = list(map(int, port_range.split('-')))
assert len(extremes) in [1, 2]
if len(extremes) == 1:
# Port range of the form '8000'
possible_ports.append(extremes[0])
else:
# Port range of the form '8000-8010'
for port in range(extremes[0], extremes[1] + 1):
possible_ports.append(port)
except Exception:
raise ImproperlyConfigured('Invalid address ("%s") for live '
'server.' % specified_address)
cls.server_thread = LiveServerThread(
host, possible_ports, connections_override)
cls.server_thread.daemon = True
cls.server_thread.start()
# Wait for the live server to be ready
cls.server_thread.is_ready.wait()
if cls.server_thread.error:
# Clean up behind ourselves, since tearDownClass won't get called in
# case of errors.
cls._tearDownClassInternal()
raise cls.server_thread.error
super(LiveServerTestCase, cls).setUpClass()
@classmethod
def _tearDownClassInternal(cls):
# There may not be a 'server_thread' attribute if setUpClass() for some
# reasons has raised an exception.
if hasattr(cls, 'server_thread'):
# Terminate the live server's thread
cls.server_thread.join()
# Restore sqlite connections' non-sharability
for conn in connections.all():
if (conn.settings_dict['ENGINE'].rsplit('.', 1)[-1] in ('sqlite3', 'spatialite')
and conn.settings_dict['NAME'] == ':memory:'):
conn.allow_thread_sharing = False
@classmethod
def tearDownClass(cls):
cls._tearDownClassInternal()
super(LiveServerTestCase, cls).tearDownClass()
| mit |
sumanau7/Ele_CC_Sumanau | lib/IPython/lib/tests/test_deepreload.py | 9 | 2144 | # -*- coding: utf-8 -*-
"""Test suite for the deepreload module."""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import nose.tools as nt
from IPython.testing import decorators as dec
from IPython.utils.py3compat import builtin_mod_name, PY3
from IPython.utils.syspathcontext import prepended_to_syspath
from IPython.utils.tempdir import TemporaryDirectory
from IPython.lib.deepreload import reload as dreload
#-----------------------------------------------------------------------------
# Test functions begin
#-----------------------------------------------------------------------------
@dec.skipif_not_numpy
def test_deepreload_numpy():
"Test that NumPy can be deep reloaded."
import numpy
# TODO: Find a way to exclude all standard library modules from reloading.
exclude = [
# Standard exclusions:
'sys', 'os.path', builtin_mod_name, '__main__',
# Test-related exclusions:
'unittest', 'UserDict', '_collections_abc', 'tokenize',
'collections', 'collections.abc',
'importlib', 'importlib.machinery', '_imp',
'importlib._bootstrap', 'importlib._bootstrap_external',
'_frozen_importlib', '_frozen_importlib_external',
]
dreload(numpy, exclude=exclude)
def test_deepreload():
"Test that dreload does deep reloads and skips excluded modules."
with TemporaryDirectory() as tmpdir:
with prepended_to_syspath(tmpdir):
with open(os.path.join(tmpdir, 'A.py'), 'w') as f:
f.write("class Object(object):\n pass\n")
with open(os.path.join(tmpdir, 'B.py'), 'w') as f:
f.write("import A\n")
import A
import B
# Test that A is not reloaded.
obj = A.Object()
dreload(B, exclude=['A'])
nt.assert_true(isinstance(obj, A.Object))
# Test that A is reloaded.
obj = A.Object()
dreload(B)
nt.assert_false(isinstance(obj, A.Object))
| apache-2.0 |
sanghinitin/golismero | thirdparty_libs/django/conf/locale/mk/formats.py | 106 | 1468 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j. F Y. H:i'
YEAR_MONTH_FORMAT = 'F Y.'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y.'
SHORT_DATETIME_FORMAT = 'j.m.Y. H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y.', '%d.%m.%y.', # '25.10.2006.', '25.10.06.'
'%d. %m. %Y.', '%d. %m. %y.', # '25. 10. 2006.', '25. 10. 06.'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y. %H:%M:%S', # '25.10.2006. 14:30:59'
'%d.%m.%Y. %H:%M', # '25.10.2006. 14:30'
'%d.%m.%Y.', # '25.10.2006.'
'%d.%m.%y. %H:%M:%S', # '25.10.06. 14:30:59'
'%d.%m.%y. %H:%M', # '25.10.06. 14:30'
'%d.%m.%y.', # '25.10.06.'
'%d. %m. %Y. %H:%M:%S', # '25. 10. 2006. 14:30:59'
'%d. %m. %Y. %H:%M', # '25. 10. 2006. 14:30'
'%d. %m. %Y.', # '25. 10. 2006.'
'%d. %m. %y. %H:%M:%S', # '25. 10. 06. 14:30:59'
'%d. %m. %y. %H:%M', # '25. 10. 06. 14:30'
'%d. %m. %y.', # '25. 10. 06.'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| gpl-2.0 |
Rhoana/rh_aligner | old/filter_tiles.py | 1 | 2329 | # Takes a json file that contains many tiles with their bounding boxes (Tile-Spec format)
# and a bounding box, and outputs a json file for each tile that is overlapping with the bounding box
import sys
import os
import argparse
import json
from bounding_box import BoundingBox
# common functions
def load_tiles(tiles_spec_fname, bbox):
relevant_tiles = []
with open(tiles_spec_fname, 'r') as data_file:
data = json.load(data_file)
for tile in data:
tile_bbox = BoundingBox.fromList(tile['bbox'])
if bbox.overlap(tile_bbox):
relevant_tiles.append(tile)
return relevant_tiles
def filter_tiles(tiles_fname, out_fname, bbox):
# parse the bounding box arguments
bbox = BoundingBox.fromStr(bbox)
# load all tiles from the tile-spec json file that are relevant to our bounding box
relevant_tiles = load_tiles(tiles_fname, bbox)
# Create a tile-spec file that includes all relevant tiles
with open(out_fname, 'w') as outfile:
json.dump(relevant_tiles, outfile, sort_keys=True, indent=4)
def main():
# Command line parser
parser = argparse.ArgumentParser(description='Takes a json file that contains many tiles with their bounding boxes (Tile-Spec format)\
and a bounding box, and outputs a json file for each tile that is overlapping with the bounding box')
parser.add_argument('tiles_fname', metavar='tiles_json', type=str,
help='a tile_spec file that contains all the images to be aligned in json format')
parser.add_argument('-o', '--output_file', type=str,
help='an output tile_spec file, that will include only the relevant tiles (default: ./filtered.json)',
default='./filtered.json')
# the default bounding box is as big as the image can be
parser.add_argument('-b', '--bounding_box', type=str,
help='the bounding box of the part of image that needs to be aligned format: "from_x to_x from_y to_y" (default: all tiles)',
default='{0} {1} {2} {3}'.format((-sys.maxint - 1), sys.maxint, (-sys.maxint - 1), sys.maxint))
args = parser.parse_args()
#print args
filter_tiles(args.tiles_fname, args.output_file, args.bounding_box)
if __name__ == '__main__':
main()
| mit |
StevenBlack/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_unittest.py | 124 | 21459 | # Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.net.layouttestresults import LayoutTestResults
from webkitpy.common.net.buildbot import BuildBot, Builder, Build
from webkitpy.layout_tests.models import test_results
from webkitpy.layout_tests.models import test_failures
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
class BuilderTest(unittest.TestCase):
def _mock_test_result(self, testname):
return test_results.TestResult(testname, [test_failures.FailureTextMismatch()])
def _install_fetch_build(self, failure):
def _mock_fetch_build(build_number):
build = Build(
builder=self.builder,
build_number=build_number,
revision=build_number + 1000,
is_green=build_number < 4
)
results = [self._mock_test_result(testname) for testname in failure(build_number)]
layout_test_results = LayoutTestResults(results)
def mock_layout_test_results():
return layout_test_results
build.layout_test_results = mock_layout_test_results
return build
self.builder._fetch_build = _mock_fetch_build
def setUp(self):
self.buildbot = BuildBot()
self.builder = Builder(u"Test Builder \u2661", self.buildbot)
self._install_fetch_build(lambda build_number: ["test1", "test2"])
def test_latest_layout_test_results(self):
self.builder.fetch_layout_test_results = lambda results_url: LayoutTestResults([self._mock_test_result(testname) for testname in ["test1", "test2"]])
self.builder.accumulated_results_url = lambda: "http://dummy_url.org"
self.assertTrue(self.builder.latest_layout_test_results())
def test_find_regression_window(self):
regression_window = self.builder.find_regression_window(self.builder.build(10))
self.assertEqual(regression_window.build_before_failure().revision(), 1003)
self.assertEqual(regression_window.failing_build().revision(), 1004)
regression_window = self.builder.find_regression_window(self.builder.build(10), look_back_limit=2)
self.assertIsNone(regression_window.build_before_failure())
self.assertEqual(regression_window.failing_build().revision(), 1008)
def test_none_build(self):
self.builder._fetch_build = lambda build_number: None
regression_window = self.builder.find_regression_window(self.builder.build(10))
self.assertIsNone(regression_window.build_before_failure())
self.assertIsNone(regression_window.failing_build())
def test_flaky_tests(self):
self._install_fetch_build(lambda build_number: ["test1"] if build_number % 2 else ["test2"])
regression_window = self.builder.find_regression_window(self.builder.build(10))
self.assertEqual(regression_window.build_before_failure().revision(), 1009)
self.assertEqual(regression_window.failing_build().revision(), 1010)
def test_failure_and_flaky(self):
self._install_fetch_build(lambda build_number: ["test1", "test2"] if build_number % 2 else ["test2"])
regression_window = self.builder.find_regression_window(self.builder.build(10))
self.assertEqual(regression_window.build_before_failure().revision(), 1003)
self.assertEqual(regression_window.failing_build().revision(), 1004)
def test_no_results(self):
self._install_fetch_build(lambda build_number: ["test1", "test2"] if build_number % 2 else ["test2"])
regression_window = self.builder.find_regression_window(self.builder.build(10))
self.assertEqual(regression_window.build_before_failure().revision(), 1003)
self.assertEqual(regression_window.failing_build().revision(), 1004)
def test_failure_after_flaky(self):
self._install_fetch_build(lambda build_number: ["test1", "test2"] if build_number > 6 else ["test3"])
regression_window = self.builder.find_regression_window(self.builder.build(10))
self.assertEqual(regression_window.build_before_failure().revision(), 1006)
self.assertEqual(regression_window.failing_build().revision(), 1007)
def test_find_blameworthy_regression_window(self):
self.assertEqual(self.builder.find_blameworthy_regression_window(10).revisions(), [1004])
self.assertIsNone(self.builder.find_blameworthy_regression_window(10, look_back_limit=2))
# Flakey test avoidance requires at least 2 red builds:
self.assertIsNone(self.builder.find_blameworthy_regression_window(4))
self.assertEqual(self.builder.find_blameworthy_regression_window(4, avoid_flakey_tests=False).revisions(), [1004])
# Green builder:
self.assertIsNone(self.builder.find_blameworthy_regression_window(3))
def test_build_caching(self):
self.assertEqual(self.builder.build(10), self.builder.build(10))
def test_build_and_revision_for_filename(self):
expectations = {
"r47483 (1)/" : (47483, 1),
"r47483 (1).zip" : (47483, 1),
"random junk": None,
}
for filename, revision_and_build in expectations.items():
self.assertEqual(self.builder._revision_and_build_for_filename(filename), revision_and_build)
def test_file_info_list_to_revision_to_build_list(self):
file_info_list = [
{"filename": "r47483 (1)/"},
{"filename": "r47483 (1).zip"},
{"filename": "random junk"},
]
builds_and_revisions_list = [(47483, 1), (47483, 1)]
self.assertEqual(self.builder._file_info_list_to_revision_to_build_list(file_info_list), builds_and_revisions_list)
def test_fetch_build(self):
buildbot = BuildBot()
builder = Builder(u"Test Builder \u2661", buildbot)
def mock_fetch_build_dictionary(self, build_number):
build_dictionary = {
"sourceStamp": {
"revision": None, # revision=None means a trunk build started from the force-build button on the builder page.
},
"number": int(build_number),
# Intentionally missing the 'results' key, meaning it's a "pass" build.
}
return build_dictionary
buildbot._fetch_build_dictionary = mock_fetch_build_dictionary
self.assertIsNotNone(builder._fetch_build(1))
class BuildTest(unittest.TestCase):
def test_layout_test_results(self):
buildbot = BuildBot()
builder = Builder(u"Foo Builder (test)", buildbot)
builder._fetch_file_from_results = lambda results_url, file_name: None
build = Build(builder, None, None, None)
# Test that layout_test_results() returns None if the fetch fails.
self.assertIsNone(build.layout_test_results())
class BuildBotTest(unittest.TestCase):
_example_one_box_status = '''
<table>
<tr>
<td class="box"><a href="builders/Windows%20Debug%20%28Tests%29">Windows Debug (Tests)</a></td>
<td align="center" class="LastBuild box success"><a href="builders/Windows%20Debug%20%28Tests%29/builds/3693">47380</a><br />build<br />successful</td>
<td align="center" class="Activity building">building<br />ETA in<br />~ 14 mins<br />at 13:40</td>
<tr>
<td class="box"><a href="builders/SnowLeopard%20Intel%20Release">SnowLeopard Intel Release</a></td>
<td class="LastBuild box" >no build</td>
<td align="center" class="Activity building">building<br />< 1 min</td>
<tr>
<td class="box"><a href="builders/Qt%20Linux%20Release">Qt Linux Release</a></td>
<td align="center" class="LastBuild box failure"><a href="builders/Qt%20Linux%20Release/builds/654">47383</a><br />failed<br />compile-webkit</td>
<td align="center" class="Activity idle">idle<br />3 pending</td>
<tr>
<td class="box"><a href="builders/Qt%20Windows%2032-bit%20Debug">Qt Windows 32-bit Debug</a></td>
<td align="center" class="LastBuild box failure"><a href="builders/Qt%20Windows%2032-bit%20Debug/builds/2090">60563</a><br />failed<br />failed<br />slave<br />lost</td>
<td align="center" class="Activity building">building<br />ETA in<br />~ 5 mins<br />at 08:25</td>
</table>
'''
_expected_example_one_box_parsings = [
{
'is_green': True,
'build_number' : 3693,
'name': u'Windows Debug (Tests)',
'built_revision': 47380,
'activity': 'building',
'pending_builds': 0,
},
{
'is_green': False,
'build_number' : None,
'name': u'SnowLeopard Intel Release',
'built_revision': None,
'activity': 'building',
'pending_builds': 0,
},
{
'is_green': False,
'build_number' : 654,
'name': u'Qt Linux Release',
'built_revision': 47383,
'activity': 'idle',
'pending_builds': 3,
},
{
'is_green': True,
'build_number' : 2090,
'name': u'Qt Windows 32-bit Debug',
'built_revision': 60563,
'activity': 'building',
'pending_builds': 0,
},
]
def test_status_parsing(self):
buildbot = BuildBot()
soup = BeautifulSoup(self._example_one_box_status)
status_table = soup.find("table")
input_rows = status_table.findAll('tr')
for x in range(len(input_rows)):
status_row = input_rows[x]
expected_parsing = self._expected_example_one_box_parsings[x]
builder = buildbot._parse_builder_status_from_row(status_row)
# Make sure we aren't parsing more or less than we expect
self.assertEqual(builder.keys(), expected_parsing.keys())
for key, expected_value in expected_parsing.items():
self.assertEqual(builder[key], expected_value, ("Builder %d parse failure for key: %s: Actual='%s' Expected='%s'" % (x, key, builder[key], expected_value)))
def test_builder_with_name(self):
buildbot = BuildBot()
builder = buildbot.builder_with_name("Test Builder")
self.assertEqual(builder.name(), "Test Builder")
self.assertEqual(builder.url(), "http://build.webkit.org/builders/Test%20Builder")
self.assertEqual(builder.url_encoded_name(), "Test%20Builder")
self.assertEqual(builder.results_url(), "http://build.webkit.org/results/Test%20Builder")
# Override _fetch_build_dictionary function to not touch the network.
def mock_fetch_build_dictionary(self, build_number):
build_dictionary = {
"sourceStamp": {
"revision" : 2 * build_number,
},
"number" : int(build_number),
"results" : build_number % 2, # 0 means pass
}
return build_dictionary
buildbot._fetch_build_dictionary = mock_fetch_build_dictionary
build = builder.build(10)
self.assertEqual(build.builder(), builder)
self.assertEqual(build.url(), "http://build.webkit.org/builders/Test%20Builder/builds/10")
self.assertEqual(build.results_url(), "http://build.webkit.org/results/Test%20Builder/r20%20%2810%29")
self.assertEqual(build.revision(), 20)
self.assertTrue(build.is_green())
build = build.previous_build()
self.assertEqual(build.builder(), builder)
self.assertEqual(build.url(), "http://build.webkit.org/builders/Test%20Builder/builds/9")
self.assertEqual(build.results_url(), "http://build.webkit.org/results/Test%20Builder/r18%20%289%29")
self.assertEqual(build.revision(), 18)
self.assertFalse(build.is_green())
self.assertIsNone(builder.build(None))
_example_directory_listing = '''
<h1>Directory listing for /results/SnowLeopard Intel Leaks/</h1>
<table>
<tr class="alt">
<th>Filename</th>
<th>Size</th>
<th>Content type</th>
<th>Content encoding</th>
</tr>
<tr class="directory ">
<td><a href="r47483%20%281%29/"><b>r47483 (1)/</b></a></td>
<td><b></b></td>
<td><b>[Directory]</b></td>
<td><b></b></td>
</tr>
<tr class="file alt">
<td><a href="r47484%20%282%29.zip">r47484 (2).zip</a></td>
<td>89K</td>
<td>[application/zip]</td>
<td></td>
</tr>
'''
_expected_files = [
{
"filename" : "r47483 (1)/",
"size" : "",
"type" : "[Directory]",
"encoding" : "",
},
{
"filename" : "r47484 (2).zip",
"size" : "89K",
"type" : "[application/zip]",
"encoding" : "",
},
]
def test_parse_build_to_revision_map(self):
buildbot = BuildBot()
files = buildbot._parse_twisted_directory_listing(self._example_directory_listing)
self.assertEqual(self._expected_files, files)
_fake_builder_page = '''
<body>
<div class="content">
<h1>Some Builder</h1>
<p>(<a href="../waterfall?show=Some Builder">view in waterfall</a>)</p>
<div class="column">
<h2>Recent Builds:</h2>
<table class="info">
<tr>
<th>Time</th>
<th>Revision</th>
<th>Result</th> <th>Build #</th>
<th>Info</th>
</tr>
<tr class="alt">
<td>Jan 10 15:49</td>
<td><span class="revision" title="Revision 104643"><a href="http://trac.webkit.org/changeset/104643">104643</a></span></td>
<td class="success">failure</td> <td><a href=".../37604">#37604</a></td>
<td class="left">Build successful</td>
</tr>
<tr class="">
<td>Jan 10 15:32</td>
<td><span class="revision" title="Revision 104636"><a href="http://trac.webkit.org/changeset/104636">104636</a></span></td>
<td class="success">failure</td> <td><a href=".../37603">#37603</a></td>
<td class="left">Build successful</td>
</tr>
<tr class="alt">
<td>Jan 10 15:18</td>
<td><span class="revision" title="Revision 104635"><a href="http://trac.webkit.org/changeset/104635">104635</a></span></td>
<td class="success">success</td> <td><a href=".../37602">#37602</a></td>
<td class="left">Build successful</td>
</tr>
<tr class="">
<td>Jan 10 14:51</td>
<td><span class="revision" title="Revision 104633"><a href="http://trac.webkit.org/changeset/104633">104633</a></span></td>
<td class="failure">failure</td> <td><a href=".../37601">#37601</a></td>
<td class="left">Failed compile-webkit</td>
</tr>
</table>
</body>'''
_fake_builder_page_without_success = '''
<body>
<table>
<tr class="alt">
<td>Jan 10 15:49</td>
<td><span class="revision" title="Revision 104643"><a href="http://trac.webkit.org/changeset/104643">104643</a></span></td>
<td class="success">failure</td>
</tr>
<tr class="">
<td>Jan 10 15:32</td>
<td><span class="revision" title="Revision 104636"><a href="http://trac.webkit.org/changeset/104636">104636</a></span></td>
<td class="success">failure</td>
</tr>
<tr class="alt">
<td>Jan 10 15:18</td>
<td><span class="revision" title="Revision 104635"><a href="http://trac.webkit.org/changeset/104635">104635</a></span></td>
<td class="success">failure</td>
</tr>
<tr class="">
<td>Jan 10 11:58</td>
<td><span class="revision" title="Revision ??"><a href="http://trac.webkit.org/changeset/%3F%3F">??</a></span></td>
<td class="retry">retry</td>
</tr>
<tr class="">
<td>Jan 10 14:51</td>
<td><span class="revision" title="Revision 104633"><a href="http://trac.webkit.org/changeset/104633">104633</a></span></td>
<td class="failure">failure</td>
</tr>
</table>
</body>'''
def test_revisions_for_builder(self):
buildbot = BuildBot()
buildbot._fetch_builder_page = lambda builder: builder.page
builder_with_success = Builder('Some builder', None)
builder_with_success.page = self._fake_builder_page
self.assertEqual(buildbot._revisions_for_builder(builder_with_success), [(104643, False), (104636, False), (104635, True), (104633, False)])
builder_without_success = Builder('Some builder', None)
builder_without_success.page = self._fake_builder_page_without_success
self.assertEqual(buildbot._revisions_for_builder(builder_without_success), [(104643, False), (104636, False), (104635, False), (104633, False)])
def test_find_green_revision(self):
buildbot = BuildBot()
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (3, True)],
'Builder 2': [(1, True), (3, False)],
'Builder 3': [(1, True), (3, True)],
}), 1)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, False), (3, True)],
'Builder 2': [(1, True), (3, True)],
'Builder 3': [(1, True), (3, True)],
}), 3)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (2, True)],
'Builder 2': [(1, False), (2, True), (3, True)],
'Builder 3': [(1, True), (3, True)],
}), None)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (2, True)],
'Builder 2': [(1, True), (2, True), (3, True)],
'Builder 3': [(1, True), (3, True)],
}), 2)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, False), (2, True)],
'Builder 2': [(1, True), (3, True)],
'Builder 3': [(1, True), (3, True)],
}), None)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (3, True)],
'Builder 2': [(1, False), (2, True), (3, True), (4, True)],
'Builder 3': [(2, True), (4, True)],
}), 3)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (3, True)],
'Builder 2': [(1, False), (2, True), (3, True), (4, False)],
'Builder 3': [(2, True), (4, True)],
}), None)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (3, True)],
'Builder 2': [(1, False), (2, True), (3, True), (4, False)],
'Builder 3': [(2, True), (3, True), (4, True)],
}), 3)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (2, True)],
'Builder 2': [],
'Builder 3': [(1, True), (2, True)],
}), None)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (3, False), (5, True), (10, True), (12, False)],
'Builder 2': [(1, True), (3, False), (7, True), (9, True), (12, False)],
'Builder 3': [(1, True), (3, True), (7, True), (11, False), (12, True)],
}), 7)
def _fetch_build(self, build_number):
if build_number == 5:
return "correct build"
return "wrong build"
def _fetch_revision_to_build_map(self):
return {'r5': 5, 'r2': 2, 'r3': 3}
def test_latest_cached_build(self):
b = Builder('builder', BuildBot())
b._fetch_build = self._fetch_build
b._fetch_revision_to_build_map = self._fetch_revision_to_build_map
self.assertEqual("correct build", b.latest_cached_build())
def results_url(self):
return "some-url"
def test_results_zip_url(self):
b = Build(None, 123, 123, False)
b.results_url = self.results_url
self.assertEqual("some-url.zip", b.results_zip_url())
| bsd-3-clause |
EvanK/ansible | lib/ansible/modules/web_infrastructure/supervisorctl.py | 73 | 9341 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Matt Wright <matt@nobien.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: supervisorctl
short_description: Manage the state of a program or group of programs running via supervisord
description:
- Manage the state of a program or group of programs running via supervisord
version_added: "0.7"
options:
name:
description:
- The name of the supervisord program or group to manage.
- The name will be taken as group name when it ends with a colon I(:)
- Group support is only available in Ansible version 1.6 or later.
required: true
config:
description:
- The supervisor configuration file path
version_added: "1.3"
server_url:
description:
- URL on which supervisord server is listening
version_added: "1.3"
username:
description:
- username to use for authentication
version_added: "1.3"
password:
description:
- password to use for authentication
version_added: "1.3"
state:
description:
- The desired state of program/group.
required: true
choices: [ "present", "started", "stopped", "restarted", "absent", "signalled" ]
signal:
description:
- The signal to send to the program/group, when combined with the 'signalled' state. Required when l(state=signalled).
version_added: "2.8"
supervisorctl_path:
description:
- path to supervisorctl executable
version_added: "1.4"
notes:
- When C(state) = I(present), the module will call C(supervisorctl reread) then C(supervisorctl add) if the program/group does not exist.
- When C(state) = I(restarted), the module will call C(supervisorctl update) then call C(supervisorctl restart).
- When C(state) = I(absent), the module will call C(supervisorctl reread) then C(supervisorctl remove) to remove the target program/group.
requirements: [ "supervisorctl" ]
author:
- "Matt Wright (@mattupstate)"
- "Aaron Wang (@inetfuture) <inetfuture@gmail.com>"
'''
EXAMPLES = '''
# Manage the state of program to be in 'started' state.
- supervisorctl:
name: my_app
state: started
# Manage the state of program group to be in 'started' state.
- supervisorctl:
name: 'my_apps:'
state: started
# Restart my_app, reading supervisorctl configuration from a specified file.
- supervisorctl:
name: my_app
state: restarted
config: /var/opt/my_project/supervisord.conf
# Restart my_app, connecting to supervisord with credentials and server URL.
- supervisorctl:
name: my_app
state: restarted
username: test
password: testpass
server_url: http://localhost:9001
# Send a signal to my_app via supervisorctl
- supervisorctl:
name: my_app
state: signalled
signal: USR1
'''
import os
from ansible.module_utils.basic import AnsibleModule, is_executable
def main():
arg_spec = dict(
name=dict(required=True),
config=dict(required=False, type='path'),
server_url=dict(required=False),
username=dict(required=False),
password=dict(required=False, no_log=True),
supervisorctl_path=dict(required=False, type='path'),
state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'absent', 'signalled']),
signal=dict(required=False)
)
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
name = module.params['name']
is_group = False
if name.endswith(':'):
is_group = True
name = name.rstrip(':')
state = module.params['state']
config = module.params.get('config')
server_url = module.params.get('server_url')
username = module.params.get('username')
password = module.params.get('password')
supervisorctl_path = module.params.get('supervisorctl_path')
signal = module.params.get('signal')
# we check error message for a pattern, so we need to make sure that's in C locale
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
if supervisorctl_path:
if os.path.exists(supervisorctl_path) and is_executable(supervisorctl_path):
supervisorctl_args = [supervisorctl_path]
else:
module.fail_json(
msg="Provided path to supervisorctl does not exist or isn't executable: %s" % supervisorctl_path)
else:
supervisorctl_args = [module.get_bin_path('supervisorctl', True)]
if config:
supervisorctl_args.extend(['-c', config])
if server_url:
supervisorctl_args.extend(['-s', server_url])
if username:
supervisorctl_args.extend(['-u', username])
if password:
supervisorctl_args.extend(['-p', password])
if state == 'signalled' and not signal:
module.fail_json(msg="State 'signalled' requires a 'signal' value")
def run_supervisorctl(cmd, name=None, **kwargs):
args = list(supervisorctl_args) # copy the master args
args.append(cmd)
if name:
args.append(name)
return module.run_command(args, **kwargs)
def get_matched_processes():
matched = []
rc, out, err = run_supervisorctl('status')
for line in out.splitlines():
# One status line may look like one of these two:
# process not in group:
# echo_date_lonely RUNNING pid 7680, uptime 13:22:18
# process in group:
# echo_date_group:echo_date_00 RUNNING pid 7681, uptime 13:22:18
fields = [field for field in line.split(' ') if field != '']
process_name = fields[0]
status = fields[1]
if is_group:
# If there is ':', this process must be in a group.
if ':' in process_name:
group = process_name.split(':')[0]
if group != name:
continue
else:
continue
else:
if process_name != name:
continue
matched.append((process_name, status))
return matched
def take_action_on_processes(processes, status_filter, action, expected_result):
to_take_action_on = []
for process_name, status in processes:
if status_filter(status):
to_take_action_on.append(process_name)
if len(to_take_action_on) == 0:
module.exit_json(changed=False, name=name, state=state)
if module.check_mode:
module.exit_json(changed=True)
for process_name in to_take_action_on:
rc, out, err = run_supervisorctl(action, process_name, check_rc=True)
if '%s: %s' % (process_name, expected_result) not in out:
module.fail_json(msg=out)
module.exit_json(changed=True, name=name, state=state, affected=to_take_action_on)
if state == 'restarted':
rc, out, err = run_supervisorctl('update', check_rc=True)
processes = get_matched_processes()
if len(processes) == 0:
module.fail_json(name=name, msg="ERROR (no such process)")
take_action_on_processes(processes, lambda s: True, 'restart', 'started')
processes = get_matched_processes()
if state == 'absent':
if len(processes) == 0:
module.exit_json(changed=False, name=name, state=state)
if module.check_mode:
module.exit_json(changed=True)
run_supervisorctl('reread', check_rc=True)
rc, out, err = run_supervisorctl('remove', name)
if '%s: removed process group' % name in out:
module.exit_json(changed=True, name=name, state=state)
else:
module.fail_json(msg=out, name=name, state=state)
if state == 'present':
if len(processes) > 0:
module.exit_json(changed=False, name=name, state=state)
if module.check_mode:
module.exit_json(changed=True)
run_supervisorctl('reread', check_rc=True)
rc, out, err = run_supervisorctl('add', name)
if '%s: added process group' % name in out:
module.exit_json(changed=True, name=name, state=state)
else:
module.fail_json(msg=out, name=name, state=state)
if state == 'started':
if len(processes) == 0:
module.fail_json(name=name, msg="ERROR (no such process)")
take_action_on_processes(processes, lambda s: s not in ('RUNNING', 'STARTING'), 'start', 'started')
if state == 'stopped':
if len(processes) == 0:
module.fail_json(name=name, msg="ERROR (no such process)")
take_action_on_processes(processes, lambda s: s in ('RUNNING', 'STARTING'), 'stop', 'stopped')
if state == 'signalled':
if len(processes) == 0:
module.fail_json(name=name, msg="ERROR (no such process)")
take_action_on_processes(processes, lambda s: s in ('RUNNING'), "signal %s" % signal, 'signalled')
if __name__ == '__main__':
main()
| gpl-3.0 |
Sciumo/GeographicLib | python/geographiclib/polygonarea.py | 3 | 8833 | """polygonarea.py: transcription of GeographicLib::PolygonArea class."""
# polygonarea.py
#
# This is a rather literal translation of the GeographicLib::PolygonArea class
# to python. See the documentation for the C++ class for more information at
#
# http://geographiclib.sourceforge.net/html/annotated.html
#
# The algorithms are derived in
#
# Charles F. F. Karney,
# Algorithms for geodesics, J. Geodesy 87, 43-55 (2013),
# https://dx.doi.org/10.1007/s00190-012-0578-z
# Addenda: http://geographiclib.sf.net/geod-addenda.html
#
# Copyright (c) Charles Karney (2011-2014) <charles@karney.com> and licensed
# under the MIT/X11 License. For more information, see
# http://geographiclib.sourceforge.net/
######################################################################
import math
from geographiclib.geomath import Math
from geographiclib.accumulator import Accumulator
class PolygonArea(object):
"""Area of a geodesic polygon"""
def transit(lon1, lon2):
"""Count crossings of prime meridian for AddPoint."""
# Return 1 or -1 if crossing prime meridian in east or west direction.
# Otherwise return zero.
# Compute lon12 the same way as Geodesic::Inverse.
lon1 = Math.AngNormalize(lon1)
lon2 = Math.AngNormalize(lon2)
lon12 = Math.AngDiff(lon1, lon2)
cross = (1 if lon1 < 0 and lon2 >= 0 and lon12 > 0
else (-1 if lon2 < 0 and lon1 >= 0 and lon12 < 0 else 0))
return cross
transit = staticmethod(transit)
def transitdirect(lon1, lon2):
"""Count crossings of prime meridian for AddEdge."""
# We want to compute exactly
# int(floor(lon2 / 360)) - int(floor(lon1 / 360))
# Since we only need the parity of the result we can use std::remquo but
# this is buggy with g++ 4.8.3 and requires C++11. So instead we do
lon1 = math.fmod(lon1, 720.0); lon2 = math.fmod(lon2, 720.0)
return ( (0 if ((lon2 >= 0 and lon2 < 360) or lon2 < -360) else 1) -
(0 if ((lon1 >= 0 and lon1 < 360) or lon1 < -360) else 1) )
transitdirect = staticmethod(transitdirect)
def __init__(self, earth, polyline = False):
from geographiclib.geodesic import Geodesic
self._earth = earth
self._area0 = 4 * math.pi * earth._c2
self._polyline = polyline
self._mask = (Geodesic.LATITUDE | Geodesic.LONGITUDE |
Geodesic.DISTANCE |
(Geodesic.EMPTY if self._polyline else
Geodesic.AREA | Geodesic.LONG_UNROLL))
if not self._polyline: self._areasum = Accumulator()
self._perimetersum = Accumulator()
self.Clear()
def Clear(self):
"""Reset to empty polygon."""
self._num = 0
self._crossings = 0
if not self._polyline: self._areasum.Set(0)
self._perimetersum.Set(0)
self._lat0 = self._lon0 = self._lat1 = self._lon1 = Math.nan
def AddPoint(self, lat, lon):
"""Add a vertex to the polygon."""
if self._num == 0:
self._lat0 = self._lat1 = lat
self._lon0 = self._lon1 = lon
else:
_, s12, _, _, _, _, _, S12 = self._earth.GenInverse(
self._lat1, self._lon1, lat, lon, self._mask)
self._perimetersum.Add(s12)
if not self._polyline:
self._areasum.Add(S12)
self._crossings += PolygonArea.transit(self._lon1, lon)
self._lat1 = lat
self._lon1 = lon
self._num += 1
def AddEdge(self, azi, s):
"""Add an edge to the polygon."""
if self._num != 0:
_, lat, lon, _, _, _, _, _, S12 = self._earth.GenDirect(
self._lat1, self._lon1, azi, False, s, self._mask)
self._perimetersum.Add(s)
if not self._polyline:
self._areasum.Add(S12)
self._crossings += PolygonArea.transitdirect(self._lon1, lon)
self._lat1 = lat
self._lon1 = lon
self._num += 1
# return number, perimeter, area
def Compute(self, reverse, sign):
"""Return the number, perimeter, and area for the polygon."""
if self._polyline: area = Math.nan
if self._num < 2:
perimeter = 0
if not self._polyline: area = 0
return self._num, perimeter, area
if self._polyline:
perimeter = self._perimetersum.Sum()
return self._num, perimeter, area
_, s12, _, _, _, _, _, S12 = self._earth.GenInverse(
self._lat1, self._lon1, self._lat0, self._lon0, self._mask)
perimeter = self._perimetersum.Sum(s12)
tempsum = Accumulator(self._areasum)
tempsum.Add(S12)
crossings = self._crossings + PolygonArea.transit(self._lon1, self._lon0)
if crossings & 1:
tempsum.Add( (1 if tempsum < 0 else -1) * self._area0/2 )
# area is with the clockwise sense. If !reverse convert to
# counter-clockwise convention.
if not reverse: tempsum.Negate()
# If sign put area in (-area0/2, area0/2], else put area in [0, area0)
if sign:
if tempsum.Sum() > self._area0/2:
tempsum.Add( -self._area0 )
elif tempsum.Sum() <= -self._area0/2:
tempsum.Add( self._area0 )
else:
if tempsum.Sum() >= self._area0:
tempsum.Add( -self._area0 )
elif tempsum.Sum() < 0:
tempsum.Add( self._area0 )
area = 0 + tempsum.Sum()
return self._num, perimeter, area
# return number, perimeter, area
def TestPoint(self, lat, lon, reverse, sign):
"""Return the results for a tentative additional vertex."""
if self._polyline: area = Math.nan
if self._num == 0:
perimeter = 0
if not self._polyline: area = 0
return 1, perimeter, area
perimeter = self._perimetersum.Sum()
tempsum = 0 if self._polyline else self._areasum.Sum()
crossings = self._crossings; num = self._num + 1
for i in ([0] if self._polyline else [0, 1]):
_, s12, _, _, _, _, _, S12 = self._earth.GenInverse(
self._lat1 if i == 0 else lat, self._lon1 if i == 0 else lon,
self._lat0 if i != 0 else lat, self._lon0 if i != 0 else lon,
self._mask)
perimeter += s12
if not self._polyline:
tempsum += S12
crossings += PolygonArea.transit(self._lon1 if i == 0 else lon,
self._lon0 if i != 0 else lon)
if self._polyline:
return num, perimeter, area
if crossings & 1:
tempsum += (1 if tempsum < 0 else -1) * self._area0/2
# area is with the clockwise sense. If !reverse convert to
# counter-clockwise convention.
if not reverse: tempsum *= -1
# If sign put area in (-area0/2, area0/2], else put area in [0, area0)
if sign:
if tempsum > self._area0/2:
tempsum -= self._area0
elif tempsum <= -self._area0/2:
tempsum += self._area0
else:
if tempsum >= self._area0:
tempsum -= self._area0
elif tempsum < 0:
tempsum += self._area0
area = 0 + tempsum
return num, perimeter, area
# return number, perimeter, area (for backward compatibility)
def TestCompute(self, lat, lon, reverse, sign):
return self.TestPoint(lat, lon, reverse, sign)
# return num, perimeter, area
def TestEdge(self, azi, s, reverse, sign):
"""Return the results for a tentative additional edge."""
if self._num == 0: # we don't have a starting point!
return 0, Math.nan, Math.nan
num = self._num + 1
perimeter = self._perimetersum.Sum() + s
if self._polyline:
return num, perimeter, Math.nan
tempsum = self._areasum.Sum()
crossings = self._crossings
_, lat, lon, _, _, _, _, _, S12 = self._earth.GenDirect(
self._lat1, self._lon1, azi, False, s, self._mask)
tempsum += S12
crossings += PolygonArea.transitdirect(self._lon1, lon)
_, s12, _, _, _, _, _, S12 = self._earth.GenInverse(
lat, lon, self._lat0, self._lon0, self._mask)
perimeter += s12
tempsum += S12
crossings += PolygonArea.transit(lon, self._lon0)
if crossings & 1:
tempsum += (1 if tempsum < 0 else -1) * self._area0/2
# area is with the clockwise sense. If !reverse convert to
# counter-clockwise convention.
if not reverse: tempsum *= -1
# If sign put area in (-area0/2, area0/2], else put area in [0, area0)
if sign:
if tempsum > self._area0/2:
tempsum -= self._area0
elif tempsum <= -self._area0/2:
tempsum += self._area0
else:
if tempsum >= self._area0:
tempsum -= self._area0
elif tempsum < 0:
tempsum += self._area0
area = 0 + tempsum
return num, perimeter, area
def CurrentPoint(self):
"""Return the current point as a lat, lon tuple."""
return self._lat1, self._lon1
def Area(earth, points, polyline):
"""Return the number, perimeter, and area for a set of vertices."""
poly = PolygonArea(earth, polyline)
for p in points:
poly.AddPoint(p['lat'], p['lon'])
return poly.Compute(False, True)
Area = staticmethod(Area)
| mit |
containerleaks/site | pelican-plugins/liquid_tags/mdx_liquid_tags.py | 281 | 3447 | """
Markdown Extension for Liquid-style Tags
----------------------------------------
A markdown extension to allow user-defined tags of the form::
{% tag arg1 arg2 ... argn %}
Where "tag" is associated with some user-defined extension.
These result in a preprocess step within markdown that produces
either markdown or html.
"""
import warnings
import markdown
import itertools
import re
import os
from functools import wraps
# Define some regular expressions
LIQUID_TAG = re.compile(r'\{%.*?%\}', re.MULTILINE | re.DOTALL)
EXTRACT_TAG = re.compile(r'(?:\s*)(\S+)(?:\s*)')
LT_CONFIG = { 'CODE_DIR': 'code',
'NOTEBOOK_DIR': 'notebooks',
'FLICKR_API_KEY': 'flickr',
'GIPHY_API_KEY': 'giphy'
}
LT_HELP = { 'CODE_DIR' : 'Code directory for include_code subplugin',
'NOTEBOOK_DIR' : 'Notebook directory for notebook subplugin',
'FLICKR_API_KEY': 'Flickr key for accessing the API',
'GIPHY_API_KEY': 'Giphy key for accessing the API'
}
class _LiquidTagsPreprocessor(markdown.preprocessors.Preprocessor):
_tags = {}
def __init__(self, configs):
self.configs = configs
def run(self, lines):
page = '\n'.join(lines)
liquid_tags = LIQUID_TAG.findall(page)
for i, markup in enumerate(liquid_tags):
# remove {% %}
markup = markup[2:-2]
tag = EXTRACT_TAG.match(markup).groups()[0]
markup = EXTRACT_TAG.sub('', markup, 1)
if tag in self._tags:
liquid_tags[i] = self._tags[tag](self, tag, markup.strip())
# add an empty string to liquid_tags so that chaining works
liquid_tags.append('')
# reconstruct string
page = ''.join(itertools.chain(*zip(LIQUID_TAG.split(page),
liquid_tags)))
# resplit the lines
return page.split("\n")
class LiquidTags(markdown.Extension):
"""Wrapper for MDPreprocessor"""
def __init__(self, config):
try:
# Needed for markdown versions >= 2.5
for key,value in LT_CONFIG.items():
self.config[key] = [value,LT_HELP[key]]
super(LiquidTags,self).__init__(**config)
except AttributeError:
# Markdown versions < 2.5
for key,value in LT_CONFIG.items():
config[key] = [config[key],LT_HELP[key]]
super(LiquidTags,self).__init__(config)
@classmethod
def register(cls, tag):
"""Decorator to register a new include tag"""
def dec(func):
if tag in _LiquidTagsPreprocessor._tags:
warnings.warn("Enhanced Markdown: overriding tag '%s'" % tag)
_LiquidTagsPreprocessor._tags[tag] = func
return func
return dec
def extendMarkdown(self, md, md_globals):
self.htmlStash = md.htmlStash
md.registerExtension(self)
# for the include_code preprocessor, we need to re-run the
# fenced code block preprocessor after substituting the code.
# Because the fenced code processor is run before, {% %} tags
# within equations will not be parsed as an include.
md.preprocessors.add('mdincludes',
_LiquidTagsPreprocessor(self), ">html_block")
def makeExtension(configs=None):
"""Wrapper for a MarkDown extension"""
return LiquidTags(configs=configs)
| gpl-3.0 |
favll/pogom | pogom/pgoapi/protos/POGOProtos/Settings/Master/Item/FortModifierAttributes_pb2.py | 16 | 2825 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Settings/Master/Item/FortModifierAttributes.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Settings/Master/Item/FortModifierAttributes.proto',
package='POGOProtos.Settings.Master.Item',
syntax='proto3',
serialized_pb=_b('\n<POGOProtos/Settings/Master/Item/FortModifierAttributes.proto\x12\x1fPOGOProtos.Settings.Master.Item\"b\n\x16\x46ortModifierAttributes\x12!\n\x19modifier_lifetime_seconds\x18\x01 \x01(\x05\x12%\n\x1dtroy_disk_num_pokemon_spawned\x18\x02 \x01(\x05\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_FORTMODIFIERATTRIBUTES = _descriptor.Descriptor(
name='FortModifierAttributes',
full_name='POGOProtos.Settings.Master.Item.FortModifierAttributes',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='modifier_lifetime_seconds', full_name='POGOProtos.Settings.Master.Item.FortModifierAttributes.modifier_lifetime_seconds', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='troy_disk_num_pokemon_spawned', full_name='POGOProtos.Settings.Master.Item.FortModifierAttributes.troy_disk_num_pokemon_spawned', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=97,
serialized_end=195,
)
DESCRIPTOR.message_types_by_name['FortModifierAttributes'] = _FORTMODIFIERATTRIBUTES
FortModifierAttributes = _reflection.GeneratedProtocolMessageType('FortModifierAttributes', (_message.Message,), dict(
DESCRIPTOR = _FORTMODIFIERATTRIBUTES,
__module__ = 'POGOProtos.Settings.Master.Item.FortModifierAttributes_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Settings.Master.Item.FortModifierAttributes)
))
_sym_db.RegisterMessage(FortModifierAttributes)
# @@protoc_insertion_point(module_scope)
| mit |
invesalius/invesalius3 | invesalius/gui/widgets/slice_menu.py | 4 | 11800 | # -*- coding: UTF-8 -*-
#--------------------------------------------------------------------------
# Software: InVesalius - Software de Reconstrucao 3D de Imagens Medicas
# Copyright: (C) 2001 Centro de Pesquisas Renato Archer
# Homepage: http://www.softwarepublico.gov.br
# Contact: invesalius@cti.gov.br
# License: GNU - GPL 2 (LICENSE.txt/LICENCA.txt)
#--------------------------------------------------------------------------
# Este programa e software livre; voce pode redistribui-lo e/ou
# modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
# publicada pela Free Software Foundation; de acordo com a versao 2
# da Licenca.
#
# Este programa eh distribuido na expectativa de ser util, mas SEM
# QUALQUER GARANTIA; sem mesmo a garantia implicita de
# COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
# PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
# detalhes.
#--------------------------------------------------------------------------
import sys
try:
from collections import OrderedDict
except(ImportError):
from ordereddict import OrderedDict
import wx
from pubsub import pub as Publisher
import invesalius.constants as const
import invesalius.data.slice_ as sl
import invesalius.presets as presets
from invesalius.gui.dialogs import ClutImagedataDialog
PROJECTIONS_ID = OrderedDict(((_('Normal'), const.PROJECTION_NORMAL),
(_('MaxIP'), const.PROJECTION_MaxIP),
(_('MinIP'), const.PROJECTION_MinIP),
(_('MeanIP'), const.PROJECTION_MeanIP),
(_('MIDA'), const.PROJECTION_MIDA),
(_('Contour MaxIP'), const.PROJECTION_CONTOUR_MIP),
(_('Contour MIDA'), const.PROJECTION_CONTOUR_MIDA),) )
class SliceMenu(wx.Menu):
def __init__(self):
wx.Menu.__init__(self)
self.ID_TO_TOOL_ITEM = {}
self.cdialog = None
#------------ Sub menu of the window and level ----------
submenu_wl = wx.Menu()
self._gen_event = True
#Window and level from DICOM
new_id = self.id_wl_first = wx.NewId()
wl_item = wx.MenuItem(submenu_wl, new_id,\
_('Default'), kind=wx.ITEM_RADIO)
submenu_wl.Append(wl_item)
self.ID_TO_TOOL_ITEM[new_id] = wl_item
#Case the user change window and level
new_id = self.other_wl_id = wx.NewId()
wl_item = wx.MenuItem(submenu_wl, new_id,\
_('Manual'), kind=wx.ITEM_RADIO)
submenu_wl.Append(wl_item)
self.ID_TO_TOOL_ITEM[new_id] = wl_item
for name in const.WINDOW_LEVEL:
if not(name == _('Default') or name == _('Manual')):
new_id = wx.NewId()
wl_item = wx.MenuItem(submenu_wl, new_id,\
name, kind=wx.ITEM_RADIO)
submenu_wl.Append(wl_item)
self.ID_TO_TOOL_ITEM[new_id] = wl_item
#----------- Sub menu of the save and load options ---------
#submenu_wl.AppendSeparator()
#options = [_("Save current values"),
# _("Save current values as..."),_("Load values")]
#for name in options:
# new_id = wx.NewId()
# wl_item = wx.MenuItem(submenu_wl, new_id,\
# name)
# submenu_wl.Append(wl_item)
# self.ID_TO_TOOL_ITEM[new_id] = wl_item
#------------ Sub menu of the pseudo colors ----------------
if sys.platform.startswith('linux'):
mkind = wx.ITEM_CHECK
else:
mkind = wx.ITEM_RADIO
self.pseudo_color_items = {}
submenu_pseudo_colours = wx.Menu()
self.pseudo_color_items = {}
new_id = self.id_pseudo_first = wx.NewId()
color_item = wx.MenuItem(submenu_pseudo_colours, new_id,\
_("Default "), kind=mkind)
submenu_pseudo_colours.Append(color_item)
color_item.Check(1)
self.ID_TO_TOOL_ITEM[new_id] = color_item
self.pseudo_color_items[new_id] = color_item
for name in sorted(const.SLICE_COLOR_TABLE):
if not(name == _("Default ")):
new_id = wx.NewId()
color_item = wx.MenuItem(submenu_wl, new_id,\
name, kind=mkind)
submenu_pseudo_colours.Append(color_item)
self.ID_TO_TOOL_ITEM[new_id] = color_item
self.pseudo_color_items[new_id] = color_item
self.plist_presets = presets.get_wwwl_presets()
for name in sorted(self.plist_presets):
new_id = wx.NewId()
color_item = wx.MenuItem(submenu_wl, new_id, name,
kind=mkind)
submenu_pseudo_colours.Append(color_item)
self.ID_TO_TOOL_ITEM[new_id] = color_item
self.pseudo_color_items[new_id] = color_item
new_id = wx.NewId()
color_item = wx.MenuItem(submenu_wl, new_id, _('Custom'),
kind=mkind)
submenu_pseudo_colours.Append(color_item)
self.ID_TO_TOOL_ITEM[new_id] = color_item
self.pseudo_color_items[new_id] = color_item
# --------------- Sub menu of the projection type ---------------------
self.projection_items = {}
submenu_projection = wx.Menu()
for name in PROJECTIONS_ID:
new_id = wx.NewId()
projection_item = wx.MenuItem(submenu_projection, new_id, name,
kind=wx.ITEM_RADIO)
submenu_projection.Append(projection_item)
self.ID_TO_TOOL_ITEM[new_id] = projection_item
self.projection_items[PROJECTIONS_ID[name]] = projection_item
flag_tiling = False
#------------ Sub menu of the image tiling ---------------
submenu_image_tiling = wx.Menu()
for name in sorted(const.IMAGE_TILING):
new_id = wx.NewId()
image_tiling_item = wx.MenuItem(submenu_image_tiling, new_id,\
name, kind=wx.ITEM_RADIO)
submenu_image_tiling.Append(image_tiling_item)
self.ID_TO_TOOL_ITEM[new_id] = image_tiling_item
#Save first id item
if not(flag_tiling):
self.id_tiling_first = new_id
flag_tiling = True
# Add sub itens in the menu
self.Append(-1, _("Window width and level"), submenu_wl)
self.Append(-1, _("Pseudo color"), submenu_pseudo_colours)
self.Append(-1, _("Projection type"), submenu_projection)
###self.Append(-1, _("Image Tiling"), submenu_image_tiling)
# It doesn't work in Linux
self.Bind(wx.EVT_MENU, self.OnPopup)
# In Linux the bind must be putted in the submenu
if sys.platform.startswith('linux') or sys.platform == 'darwin':
submenu_wl.Bind(wx.EVT_MENU, self.OnPopup)
submenu_pseudo_colours.Bind(wx.EVT_MENU, self.OnPopup)
submenu_image_tiling.Bind(wx.EVT_MENU, self.OnPopup)
submenu_projection.Bind(wx.EVT_MENU, self.OnPopup)
self.__bind_events()
def __bind_events(self):
Publisher.subscribe(self.CheckWindowLevelOther, 'Check window and level other')
Publisher.subscribe(self.FirstItemSelect, 'Select first item from slice menu')
Publisher.subscribe(self._close, 'Close project data')
Publisher.subscribe(self._check_projection_menu, 'Check projection menu')
def FirstItemSelect(self):
item = self.ID_TO_TOOL_ITEM[self.id_wl_first]
item.Check(True)
for i in self.pseudo_color_items:
it = self.pseudo_color_items[i]
if it.IsChecked():
it.Check(False)
item = self.ID_TO_TOOL_ITEM[self.id_pseudo_first]
item.Check(True)
# item = self.ID_TO_TOOL_ITEM[self.id_tiling_first]
# item.Check(True)
def CheckWindowLevelOther(self):
item = self.ID_TO_TOOL_ITEM[self.other_wl_id]
item.Check()
def _check_projection_menu(self, projection_id):
item = self.projection_items[projection_id]
item.Check()
def OnPopup(self, evt):
id = evt.GetId()
item = self.ID_TO_TOOL_ITEM[evt.GetId()]
key = item.GetItemLabelText()
if(key in const.WINDOW_LEVEL.keys()):
window, level = const.WINDOW_LEVEL[key]
Publisher.sendMessage('Bright and contrast adjustment image',
window=window, level=level)
Publisher.sendMessage('Update window level value',
window=window,
level=level)
# Publisher.sendMessage('Update window and level text',
# "WL: %d WW: %d"%(level, window))
Publisher.sendMessage('Update slice viewer')
#Necessary update the slice plane in the volume case exists
Publisher.sendMessage('Render volume viewer')
elif(key in const.SLICE_COLOR_TABLE.keys()):
values = const.SLICE_COLOR_TABLE[key]
Publisher.sendMessage('Change colour table from background image', values=values)
Publisher.sendMessage('Update slice viewer')
if sys.platform.startswith('linux'):
for i in self.pseudo_color_items:
it = self.pseudo_color_items[i]
it.Check(False)
item.Check()
self.HideClutDialog()
self._gen_event = True
elif key in self.plist_presets:
values = presets.get_wwwl_preset_colours(self.plist_presets[key])
Publisher.sendMessage('Change colour table from background image from plist', values=values)
Publisher.sendMessage('Update slice viewer')
if sys.platform.startswith('linux'):
for i in self.pseudo_color_items:
it = self.pseudo_color_items[i]
it.Check(False)
item.Check()
self.HideClutDialog()
self._gen_event = True
elif(key in const.IMAGE_TILING.keys()):
values = const.IMAGE_TILING[key]
Publisher.sendMessage('Set slice viewer layout', layout=values)
Publisher.sendMessage('Update slice viewer')
elif key in PROJECTIONS_ID:
pid = PROJECTIONS_ID[key]
Publisher.sendMessage('Set projection type', projection_id=pid)
Publisher.sendMessage('Reload actual slice')
elif key == _('Custom'):
if self.cdialog is None:
slc = sl.Slice()
histogram = slc.histogram
init = int(slc.matrix.min())
end = int(slc.matrix.max())
nodes = slc.nodes
self.cdialog = ClutImagedataDialog(histogram, init, end, nodes)
self.cdialog.Show()
else:
self.cdialog.Show(self._gen_event)
if sys.platform.startswith('linux'):
for i in self.pseudo_color_items:
it = self.pseudo_color_items[i]
it.Check(False)
item.Check()
item = self.ID_TO_TOOL_ITEM[evt.GetId()]
item.Check(True)
self._gen_event = False
evt.Skip()
def HideClutDialog(self):
if self.cdialog:
self.cdialog.Hide()
def _close(self):
if self.cdialog:
self.cdialog.Destroy()
self.cdialog = None
| gpl-2.0 |
open-synergy/stock-logistics-warehouse | stock_cycle_count/models/stock_cycle_count_rule.py | 2 | 9715 | # -*- coding: utf-8 -*-
# Copyright 2017 Eficent Business and IT Consulting Services S.L.
# (http://www.eficent.com)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from openerp import api, fields, models, _
from openerp.exceptions import ValidationError
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from datetime import timedelta, datetime
class StockCycleCountRule(models.Model):
_name = 'stock.cycle.count.rule'
_description = "Stock Cycle Counts Rules"
@api.one
def _compute_currency(self):
self.currency_id = self.env.user.company_id.currency_id
@api.model
def _selection_rule_types(self):
return [
('periodic', _('Periodic')),
('turnover', _('Value Turnover')),
('accuracy', _('Minimum Accuracy')),
('zero', _('Zero Confirmation'))]
@api.one
@api.constrains('rule_type', 'warehouse_ids')
def _check_zero_rule(self):
if self.rule_type == 'zero' and len(self.warehouse_ids) > 1:
raise ValidationError(
_('Zero confirmation rules can only have one warehouse '
'assigned.')
)
if self.rule_type == 'zero':
zero_rule = self.search([
('rule_type', '=', 'zero'),
('warehouse_ids', '=', self.warehouse_ids.id)])
if len(zero_rule) > 1:
raise ValidationError(
_('You can only have one zero confirmation rule per '
'warehouse.')
)
@api.onchange('rule_type')
def _get_rule_description(self):
if self.rule_type == 'periodic':
self.rule_description = _('Ensures that at least a defined number '
'of counts in a given period will '
'be run.')
elif self.rule_type == 'turnover':
self.rule_description = _('Schedules a count every time the total '
'turnover of a location exceeds the '
'threshold. This considers every '
'product going into/out of the location')
elif self.rule_type == 'accuracy':
self.rule_description = _('Schedules a count every time the '
'accuracy of a location goes under a '
'given threshold.')
elif self.rule_type == 'zero':
self.rule_description = _('Perform an Inventory Adjustment every '
'time a location in the warehouse runs '
'out of stock in order to confirm it is '
'truly empty.')
else:
self.rule_description = _('(No description provided.)')
@api.constrains('periodic_qty_per_period', 'periodic_count_period')
def _check_negative_periodic(self):
if self.periodic_qty_per_period < 1:
raise ValidationError(
_('You cannot define a negative or null number of counts per '
'period.')
)
if self.periodic_count_period < 0:
raise ValidationError(
_('You cannot define a negative period.')
)
@api.onchange('location_ids')
def _get_warehouses(self):
"""Get the warehouses for the selected locations."""
wh_ids = []
for loc in self.location_ids:
wh_ids.append(loc.get_warehouse(loc))
wh_ids = list(set(wh_ids))
self.warehouse_ids = self.env['stock.warehouse'].browse(wh_ids)
name = fields.Char('Name', required=True)
rule_type = fields.Selection(selection="_selection_rule_types",
string='Type of rule',
required=True)
rule_description = fields.Char(string='Rule Description',
compute=_get_rule_description)
active = fields.Boolean(string='Active', default=True)
periodic_qty_per_period = fields.Integer(string='Counts per period',
default=1)
periodic_count_period = fields.Integer(string='Period in days')
turnover_inventory_value_threshold = fields.Float(
string='Turnover Inventory Value Threshold')
currency_id = fields.Many2one(comodel_name='res.currency',
string='Currency',
compute=_compute_currency)
accuracy_threshold = fields.Float(string='Minimum Accuracy Threshold',
digits=(3, 2))
apply_in = fields.Selection(
string='Apply this rule in:',
selection=[('warehouse', 'Selected warehouses'),
('location', 'Selected Location Zones.')],
default='warehouse')
warehouse_ids = fields.Many2many(
comodel_name='stock.warehouse',
relation='warehouse_cycle_count_rule_rel', column1='rule_id',
column2='warehouse_id', string='Warehouses where applied')
location_ids = fields.Many2many(
comodel_name='stock.location',
relation='location_cycle_count_rule_rel', column1='rule_id',
column2='location_id', string='Zones where applied')
def compute_rule(self, locs):
if self.rule_type == 'periodic':
proposed_cycle_counts = self._compute_rule_periodic(locs)
elif self.rule_type == 'turnover':
proposed_cycle_counts = self._compute_rule_turnover(locs)
elif self.rule_type == 'accuracy':
proposed_cycle_counts = self._compute_rule_accuracy(locs)
return proposed_cycle_counts
@api.model
def _propose_cycle_count(self, date, location):
cycle_count = {
'date': date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'location': location,
'rule_type': self
}
return cycle_count
@api.model
def _compute_rule_periodic(self, locs):
cycle_counts = []
for loc in locs:
last_inventories = self.env['stock.inventory'].search([
('location_id', '=', loc.id),
('state', 'in', ['confirm', 'done', 'draft'])]).mapped('date')
if last_inventories:
latest_inventory = sorted(last_inventories, reverse=True)[0]
try:
period = self.periodic_count_period / \
self.periodic_qty_per_period
next_date = datetime.strptime(
latest_inventory,
DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(
days=period)
if next_date < datetime.today():
next_date = datetime.today()
except Exception as e:
raise ValidationError(
_('Error found determining the frequency of periodic '
'cycle count rule. %s') % e.message)
else:
next_date = datetime.today()
cycle_count = self._propose_cycle_count(next_date, loc)
cycle_counts.append(cycle_count)
return cycle_counts
@api.model
def _get_turnover_moves(self, location, date):
moves = self.env['stock.move'].search([
'|', ('location_id', '=', location.id),
('location_dest_id', '=', location.id),
('date', '>', date),
('state', '=', 'done')])
return moves
@api.model
def _compute_turnover(self, move):
price = move.get_price_unit(move)
turnover = move.product_uom_qty * price
return turnover
@api.model
def _compute_rule_turnover(self, locs):
cycle_counts = []
for loc in locs:
last_inventories = self.env['stock.inventory'].search([
('location_id', '=', loc.id),
('state', 'in', ['confirm', 'done', 'draft'])]).mapped('date')
if last_inventories:
latest_inventory = sorted(last_inventories, reverse=True)[0]
moves = self._get_turnover_moves(loc, latest_inventory)
if moves:
total_turnover = 0.0
for m in moves:
turnover = self._compute_turnover(m)
total_turnover += turnover
try:
if total_turnover > \
self.turnover_inventory_value_threshold:
next_date = datetime.today()
cycle_count = self._propose_cycle_count(next_date,
loc)
cycle_counts.append(cycle_count)
except Exception as e:
raise ValidationError(_(
'Error found when comparing turnover with the '
'rule threshold. %s') % e.message)
else:
next_date = datetime.today()
cycle_count = self._propose_cycle_count(next_date, loc)
cycle_counts.append(cycle_count)
return cycle_counts
@api.model
def _compute_rule_accuracy(self, locs):
cycle_counts = []
for loc in locs:
if loc.loc_accuracy < self.accuracy_threshold:
next_date = datetime.today()
cycle_count = self._propose_cycle_count(next_date, loc)
cycle_counts.append(cycle_count)
return cycle_counts
| agpl-3.0 |
rohitwaghchaure/erpnext_smart | erpnext/support/doctype/maintenance_visit/maintenance_visit.py | 32 | 3160 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from erpnext.utilities.transaction_base import TransactionBase
class MaintenanceVisit(TransactionBase):
def get_item_details(self, item_code):
return frappe.db.get_value("Item", item_code, ["item_name", "description"], as_dict=1)
def validate_serial_no(self):
for d in self.get('maintenance_visit_details'):
if d.serial_no and not frappe.db.exists("Serial No", d.serial_no):
frappe.throw(_("Serial No {0} does not exist").format(d.serial_no))
def validate(self):
self.validate_serial_no()
def update_customer_issue(self, flag):
for d in self.get('maintenance_visit_details'):
if d.prevdoc_docname and d.prevdoc_doctype == 'Customer Issue' :
if flag==1:
mntc_date = self.mntc_date
service_person = d.service_person
work_done = d.work_done
if self.completion_status == 'Fully Completed':
status = 'Closed'
elif self.completion_status == 'Partially Completed':
status = 'Work In Progress'
else:
nm = frappe.db.sql("select t1.name, t1.mntc_date, t2.service_person, t2.work_done from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2 where t2.parent = t1.name and t1.completion_status = 'Partially Completed' and t2.prevdoc_docname = %s and t1.name!=%s and t1.docstatus = 1 order by t1.name desc limit 1", (d.prevdoc_docname, self.name))
if nm:
status = 'Work In Progress'
mntc_date = nm and nm[0][1] or ''
service_person = nm and nm[0][2] or ''
work_done = nm and nm[0][3] or ''
else:
status = 'Open'
mntc_date = ''
service_person = ''
work_done = ''
frappe.db.sql("update `tabCustomer Issue` set resolution_date=%s, resolved_by=%s, resolution_details=%s, status=%s where name =%s",(mntc_date,service_person,work_done,status,d.prevdoc_docname))
def check_if_last_visit(self):
"""check if last maintenance visit against same sales order/ customer issue"""
check_for_docname = None
for d in self.get('maintenance_visit_details'):
if d.prevdoc_docname:
check_for_docname = d.prevdoc_docname
#check_for_doctype = d.prevdoc_doctype
if check_for_docname:
check = frappe.db.sql("select t1.name from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2 where t2.parent = t1.name and t1.name!=%s and t2.prevdoc_docname=%s and t1.docstatus = 1 and (t1.mntc_date > %s or (t1.mntc_date = %s and t1.mntc_time > %s))", (self.name, check_for_docname, self.mntc_date, self.mntc_date, self.mntc_time))
if check:
check_lst = [x[0] for x in check]
check_lst =','.join(check_lst)
frappe.throw(_("Cancel Material Visits {0} before cancelling this Maintenance Visit").format(check_lst))
raise Exception
else:
self.update_customer_issue(0)
def on_submit(self):
self.update_customer_issue(1)
frappe.db.set(self, 'status', 'Submitted')
def on_cancel(self):
self.check_if_last_visit()
frappe.db.set(self, 'status', 'Cancelled')
def on_update(self):
pass
| agpl-3.0 |
shaunbrady/boto | tests/integration/ec2containerservice/test_ec2containerservice.py | 99 | 1749 | # Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.ec2containerservice.exceptions import ClientException
from tests.compat import unittest
class TestEC2ContainerService(unittest.TestCase):
def setUp(self):
self.ecs = boto.connect_ec2containerservice()
def test_list_clusters(self):
response = self.ecs.list_clusters()
self.assertIn('clusterArns',
response['ListClustersResponse']['ListClustersResult'])
def test_handle_not_found_exception(self):
with self.assertRaises(ClientException):
# Try to stop a task with an invalid arn.
self.ecs.stop_task(task='foo')
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.