repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
rjfellman/molecule | molecule/command/login.py | 1 | 4218 | # Copyright (c) 2015-2016 Cisco Systems
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import pexpect
import signal
import subprocess
from molecule import util
from molecule.command import base
LOG = util.get_logger(__name__)
class Login(base.Base):
"""
Initiates an interactive ssh session with the given host.
Usage:
login [<host>]
"""
def execute(self):
# get list of running hosts from state
if self.molecule._state.hosts:
hosts = [k for k, v in self.molecule._state.hosts.iteritems()]
else:
hosts = []
try:
# Nowhere to login to if there is no running host.
if len(hosts) == 0:
raise base.InvalidHost("There are no running hosts.")
# Check whether a host was specified.
if self.molecule._args['<host>'] is None:
# One running host is perfect. Login to it.
if len(hosts) == 1:
hostname = hosts[0]
# But too many hosts is trouble as well.
else:
raise base.InvalidHost(
"There are {} running hosts. You can only login to one at a time.\n\n"
"Available hosts:\n{}".format(
len(hosts), "\n".join(hosts)))
else:
# If the host was specified, try to use it.
hostname = self.molecule._args['<host>']
match = [x for x in hosts if x.startswith(hostname)]
if len(match) == 0:
raise subprocess.CalledProcessError(1, None)
elif len(match) != 1:
# If there are multiple matches, but one of them is an
# exact string match, assume this is the one they're
# looking for and use it
if hostname in match:
match = [hostname, ]
else:
raise base.InvalidHost(
"There are {} hosts that match '{}'. You can only login to one at a time.\n\n"
"Available hosts:\n{}".format(
len(match), hostname, "\n".join(hosts)))
hostname = match[0]
login_cmd = self.molecule._driver.login_cmd(hostname)
login_args = self.molecule._driver.login_args(hostname)
except subprocess.CalledProcessError:
msg = "Unknown host '{}'.\n\nAvailable hosts:\n{}"
LOG.error(msg.format(self.molecule._args['<host>'], "\n".join(
hosts)))
util.sysexit()
except base.InvalidHost as e:
LOG.error(e.message)
util.sysexit()
lines, columns = os.popen('stty size', 'r').read().split()
dimensions = (int(lines), int(columns))
self.molecule._pt = pexpect.spawn(
'/usr/bin/env ' + login_cmd.format(*login_args),
dimensions=dimensions)
signal.signal(signal.SIGWINCH, self.molecule._sigwinch_passthrough)
self.molecule._pt.interact()
return None, None
| mit |
eezee-it/addons-yelizariev | sale_report_ru/models.py | 15 | 3669 | # -*- coding: utf-8 -*-
from openerp.osv import osv,fields
try:
from pytils import numeral
except ImportError:
pass
class res_partner(osv.Model):
_inherit = "res.partner"
def _get_default_bank_id(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for row in self.browse(cr, uid, ids, context):
res[row.id] = row.bank_ids and row.bank_ids[0].id or None
return res
_columns = {
'represented_by':fields.char('Represented by', size=256, help='String for contracts'),
'default_bank_id':fields.function(_get_default_bank_id, type='many2one', obj='res.partner.bank')
}
CURRENCY_RU = {
'USD': (u"доллар", u"доллара", u"долларов"),
'KZT': (u"тенге", u"тенге", u"тенге"),
'RUB': (u"рубль", u"рубля", u"рублей"),
}
CURRENCY_CENTS_RU = {
'USD': (u"цент", u"цента", u"центов"),
'RUB': (u"копейка", u"копейки", u"копеек"),
}
def money_to_words(amount, code):
rubles_num_in_words = numeral.in_words(amount)
rubles = numeral.choose_plural(amount, CURRENCY_RU[code])
copek_num = round(amount - amount)
copek = numeral.choose_plural(int(copek_num), CURRENCY_CENTS_RU[code]) if code in CURRENCY_CENTS_RU else ''
if copek:
return ("%s %s %02d %s")%(rubles_num_in_words, rubles, copek_num, copek)
else:
return ("%s %s")%(rubles_num_in_words, rubles)
def _get_amount_in_words(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for row in self.browse(cr, uid, ids, context):
code = row.currency_id.name
if code not in CURRENCY_RU:
code = 'RUB'
res[row.id] = money_to_words(row.amount_total, code)
return res
class sale_order(osv.Model):
_inherit = 'sale.order'
_columns = {
'date_acceptance': fields.date('Acceptance act date', required=False, readonly=False),
'amount_total_in_words': fields.function(_get_amount_in_words, string='Amount in words', type='char'),
}
def _prepare_invoice(self, cr, uid, order, lines, context=None):
invoice_vals = super(sale_order, self)._prepare_invoice(cr, uid, order, lines, context)
invoice_vals['date_origin'] = order.date_order
return invoice_vals
class account_invoice(osv.Model):
_inherit = 'account.invoice'
def _get_partner_bank_id(self, cr, uid, context=None):
company_id = self.pool.get('res.company')._company_default_get(cr, uid, 'account.invoice', context=context)
company_obj = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
if not company_obj:
return None
return company_obj.partner_id.default_bank_id.id
_columns = {
'amount_total_in_words': fields.function(_get_amount_in_words, string='Amount in words', type='char'),
'date_origin': fields.date('Origin date', required=False, readonly=True),
}
_defaults = {
'partner_bank_id': _get_partner_bank_id,
}
#class account_invoice_line(osv.Model):
# _inherit = 'account.invoice.line'
# def _get_synonym(self, cr, uid, ids, field_name, arg, context):
# res = {}
# for row in self.browse(cr, uid, ids, context):
# res[row.id] = {}
# res[row.id]['product_uom_qty'] = row.quantity
# res[row.id]['product_uom'] = row.uos_id
# return res
#
# _columns = {
# 'product_uom_qty':fields.function(_get_synonym, multi='synonym', type='float'),
# 'product_uom':fields.function(_get_synonym, multi='synonym', type='many2one'),
# }
| lgpl-3.0 |
silenci/neutron | neutron/db/migration/alembic_migrations/versions/20c469a5f920_add_index_for_port.py | 47 | 1143 | # Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""add index for port
Revision ID: 20c469a5f920
Revises: 28a09af858a8
Create Date: 2015-04-01 04:12:49.898443
"""
# revision identifiers, used by Alembic.
revision = '20c469a5f920'
down_revision = '28a09af858a8'
from alembic import op
def upgrade():
op.create_index(op.f('ix_ports_network_id_device_owner'),
'ports', ['network_id', 'device_owner'], unique=False)
op.create_index(op.f('ix_ports_network_id_mac_address'),
'ports', ['network_id', 'mac_address'], unique=False)
| apache-2.0 |
PaulWay/spacewalk | client/solaris/smartpm/smart/channels/rpm_rhn.py | 2 | 3306 | #
# Copyright (c) 2005--2013 Red Hat, Inc.
#
# Written by Joel Martin <jmartin@redhat.com>
#
# This file is part of Smart Package Manager.
#
# Smart Package Manager is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# Smart Package Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Smart Package Manager; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from smart import *
from smart.const import SUCCEEDED, FAILED, NEVER, ALWAYS
from smart.channel import PackageChannel
from smart.backends.rpm.header import RPMHeaderLoader
from rhn.client import rpcServer
from rhn.client import rhnAuth
class RPMRHNLoader(RPMHeaderLoader):
def __init__(self, channel):
RPMHeaderLoader.__init__(self)
self.s = rpcServer.getServer()
self.li = rhnAuth.getLoginInfo()
self.channels = self.li['X-RHN-Auth-Channels']
self.channel = channel
print "i:", "RPMRHNLoader.__init__"
# print "d:", "RPMRHNLoader.li", self.li
def loadFileProvides(self, fndict):
# do the call to get the package list
chn = []
foo = self.s.listPackages(self.channel[0], self.channel[1])
print foo
# package_list, type = self.s.listPackages(
print "m:", "RPMRHNLoader.loadFileProvides"
def reset(self):
print "m:", "RPMRHNLoader.reset"
RPMHeaderLoad.reset(self)
def getInfo(self, pkg):
print "m:", "RPMRHNLoader.getInfo"
def getSize(self,pkg):
print "m:", "RPMRHNLoader.getSize"
def getHeader(self, pkg):
print "m:", "RPMRHNLoader.getHeader"
def getCache(self):
print "m:", "RPMRHNLoader.getCache"
return self._cache
def getDigest(self):
print "m:", "RPMRHNLoader.getDigest"
return 123
class RPMRHNChannel(PackageChannel):
def __init__(self, baseurl, channel_info, *args):
print "i:", "RPMRHNChannel"
super(RPMRHNChannel, self).__init__(*args)
self.channel_info = channel_info
print "args", args
def fetch(self, fetcher, progress):
print "called fetch"
self.removeLoaders()
loader = RPMRHNLoader(self.channel_info)
loader.setChannel(self)
self._loaders.append(loader)
# get package list
# get obsolete list
# setup a loader
# loader probably based on RPMHeaderLoader
# the loader will need to be able to do
# get header across the net
return True
def create(alias, data):
return RPMRHNChannel(data['baseurl'],
data['channel_info'],
alias,
data['name'],
data['manual'],
data['removable'],
data['priority'])
| gpl-2.0 |
ziotom78/nimcfitsio | docs/conf.py | 1 | 8197 | # -*- coding: utf-8 -*-
#
# NimCfitsio documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 15 15:56:38 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('./exts'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['nim-domain']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'NimCfitsio'
copyright = u'2014, Maurizio Tomasi'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'NimCfitsiodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'NimCfitsio.tex', u'NimCfitsio Documentation',
u'Maurizio Tomasi', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'nimcfitsio', u'NimCfitsio Documentation',
[u'Maurizio Tomasi'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'NimCfitsio', u'NimCfitsio Documentation',
u'Maurizio Tomasi', 'NimCfitsio', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit |
mcella/django | tests/auth_tests/test_signals.py | 312 | 4005 | import datetime
from django.contrib.auth import signals
from django.contrib.auth.models import User
from django.test import TestCase, override_settings
from django.test.client import RequestFactory
@override_settings(USE_TZ=False,
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='auth_tests.urls')
class SignalTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='testclient',
first_name='Test', last_name='Client', email='testclient@example.com', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u3 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='staff',
first_name='Staff', last_name='Member', email='staffmember@example.com', is_staff=True, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
def listener_login(self, user, **kwargs):
self.logged_in.append(user)
def listener_logout(self, user, **kwargs):
self.logged_out.append(user)
def listener_login_failed(self, sender, credentials, **kwargs):
self.login_failed.append(credentials)
def setUp(self):
"""Set up the listeners and reset the logged in/logged out counters"""
self.logged_in = []
self.logged_out = []
self.login_failed = []
signals.user_logged_in.connect(self.listener_login)
signals.user_logged_out.connect(self.listener_logout)
signals.user_login_failed.connect(self.listener_login_failed)
def tearDown(self):
"""Disconnect the listeners"""
signals.user_logged_in.disconnect(self.listener_login)
signals.user_logged_out.disconnect(self.listener_logout)
signals.user_login_failed.disconnect(self.listener_login_failed)
def test_login(self):
# Only a successful login will trigger the success signal.
self.client.login(username='testclient', password='bad')
self.assertEqual(len(self.logged_in), 0)
self.assertEqual(len(self.login_failed), 1)
self.assertEqual(self.login_failed[0]['username'], 'testclient')
# verify the password is cleansed
self.assertIn('***', self.login_failed[0]['password'])
# Like this:
self.client.login(username='testclient', password='password')
self.assertEqual(len(self.logged_in), 1)
self.assertEqual(self.logged_in[0].username, 'testclient')
# Ensure there were no more failures.
self.assertEqual(len(self.login_failed), 1)
def test_logout_anonymous(self):
# The log_out function will still trigger the signal for anonymous
# users.
self.client.get('/logout/next_page/')
self.assertEqual(len(self.logged_out), 1)
self.assertEqual(self.logged_out[0], None)
def test_logout(self):
self.client.login(username='testclient', password='password')
self.client.get('/logout/next_page/')
self.assertEqual(len(self.logged_out), 1)
self.assertEqual(self.logged_out[0].username, 'testclient')
def test_update_last_login(self):
"""Ensure that only `last_login` is updated in `update_last_login`"""
user = self.u3
old_last_login = user.last_login
user.username = "This username shouldn't get saved"
request = RequestFactory().get('/login')
signals.user_logged_in.send(sender=user.__class__, request=request,
user=user)
user = User.objects.get(pk=self.u3.pk)
self.assertEqual(user.username, 'staff')
self.assertNotEqual(user.last_login, old_last_login)
| bsd-3-clause |
cameronobrien/BroadsideBot | app/constants.py | 1 | 3104 | with open('data/bot_key.txt') as f:
CLIENT_ID = f.read().strip()
KHALED_CHOICES = ["They don't want you to eat!", "Bless Up", "All Praise to the most high",
"Some people can't handle success...heh...I can!", "Follow me on the pathway to more success",
"Asahd let's hit the studio", "Asahd send me this video!", "Honey, did the Drake vocals come in yet?!?",
"Everything is top secret.", "Always have faith, always have hope.", "The key is to make it",
"Smh, they mad when you have joy...", "Key to more success is a clean heart and a clean face.",
"Baby, you smart! You loyal! You a genius!", "They'll try to close the door on you... Just open it.",
"Another one. No, another two!", "Another one.", "Cocoa Butter is the Key.",
"Congratulations, you played yourself.", "Don't ever play yourself.", "They don't want you to jetski, so we on the jetski",
"Miami finga' lickin", "Big up!","All I do is WIN, WIN, WIN no matter what","I remember when I ain’t have a jacuzzi","Watch your back, but more importantly when you get out the shower, dry your back. It’s a cold world out there.","Be a star. Be a Superstar","Almond milk + cinnamon crunch = major key to success.","The key is to enjoy life, because they don’t want you to enjoy life.","To succeed, you must believe. When you believe, you will succeed.","In life everyone has a choice. The key is: make a right choice.","We have to get money. We have no choice. It cost money to eat.","The key is: never fold."]
ZOLTAR_CHOICES = ["As I see it, yes", "It is certain", "It is decidedly so", "Most likely",
"Outlook good", "Signs point to yes", "Without a doubt", "Yes", "Definitely",
"You may rely on it", "Reply hazy, try again", "Ask again later",
"Better not tell you now", "Cannot predict now'", "My reply is no",
"Don't count on it", "Very doubtful"]
IMPLANT_TYPES = ["alpha", "beta", "gamma", "delta", "epsilon", "omega"]
YES_NO["yes","no"]
QUOTE_LIST = []
# Used for validating in add_quote
class ValidationError(Exception):
pass
# Quotes #
# Open quotes.txt and read the contents into quote_list
def update_quotes():
with open('quotes.txt', 'r') as f:
for line in f:
line.rstrip('\n')
QUOTE_LIST.append(line)
def add_quote(msg):
# Validate that the given quote has quotation marks and an author
if '"' in msg:
newMsg = msg.split('"')
if '-' in newMsg[len(newMsg)-1]:
# Write the quote to quotes.txt and strip the !add prefix
with open('quotes.txt', 'a') as f:
f.write('\n')
for i in range(1, len(newMsg)):
f.write(newMsg[i] + " ")
update_quotes()
return True
else:
raise ValidationError('Please add an author at the end of your quote.')
else:
raise ValidationError('Please use quotation marks when adding a quote.')
| mit |
natbraun/biggraphite | tests/test_graphite.py | 2 | 5274 | #!/usr/bin/env python
# Copyright 2016 Criteo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific lanbg_guage governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import print_function
import unittest
from biggraphite import test_utils as bg_test_utils
from biggraphite import accessor as bg_accessor
# This needs to run before we import the plugin.
bg_test_utils.prepare_graphite()
from biggraphite.plugins import graphite as bg_graphite # noqa
from graphite import readers # noqa
_METRIC_NAME = "test_metric"
# TODO: Move this to test_utils.
def _make_easily_queryable_points(start, end, period):
"""Return points that aggregats easily.
Averaging over each period gives range((end-start)/period).
Taking last or max for each period gives [x*3 for x in range(end-start)].
Taking min for each period gives [-1]*(end-start).
"""
assert period % 4 == 0
assert start % period == 0
res = []
for t in xrange(start, end, 4):
current_period = (t - start) // period
# A fourth of points are -1
res.append((t+0, -1))
# A fourth of points are +1
res.append((t+1, +1))
# A fourth of points are the start timestamp
res.append((t+2, current_period * 3))
# A fourth of points are missing
return res
class TestReader(bg_test_utils.TestCaseWithFakeAccessor):
_POINTS_START = 3600 * 24 * 10
_POINTS_END = _POINTS_START + 3600
_RETENTION = bg_accessor.Retention.from_string("1440*60s")
_POINTS = _make_easily_queryable_points(
start=_POINTS_START, end=_POINTS_END, period=_RETENTION[0].precision,
)
_METRIC = bg_test_utils.make_metric(_METRIC_NAME, retention=_RETENTION)
def setUp(self):
super(TestReader, self).setUp()
self.accessor.connect()
self.accessor.create_metric(self._METRIC)
self.accessor.insert_points(self._METRIC, self._POINTS)
self.reader = bg_graphite.Reader(self.accessor, self.metadata_cache, _METRIC_NAME)
def fetch(self, *args, **kwargs):
result = self.reader.fetch(*args, **kwargs)
# Readers can return a list or an object.
if isinstance(result, readers.FetchInProgress):
result = result.waitForResults()
return result
def test_fresh_read(self):
(start, end, step), points = self.fetch(
start_time=self._POINTS_START+3,
end_time=self._POINTS_END-3,
now=self._POINTS_END+10,
)
self.assertEqual(self._RETENTION[0].precision, step)
# We expect these to have been rounded to match precision.
self.assertEqual(self._POINTS_START, start)
self.assertEqual(self._POINTS_END, end)
expected_points = range((end-start)//step)
self.assertEqual(expected_points, points)
def test_get_intervals(self):
# start and end are the expected results, aligned on the precision
now_rounded = 10000000 * self._RETENTION[0].precision
now = now_rounded - 3
res = self.reader.get_intervals(now=now)
self.assertEqual(self._RETENTION.duration, res.size)
self.assertEqual(1, len(res.intervals))
self.assertEqual(now_rounded, res.intervals[0].end)
class FakeFindQuery(object):
""""Fake Query object for testing puposes.
We don't use the Graphite Query because it imports too many things from Django.
"""
def __init__(self, pattern):
self.pattern = pattern
class TestFinder(bg_test_utils.TestCaseWithFakeAccessor):
def setUp(self):
super(TestFinder, self).setUp()
for metric_name in "a", "a.a", "a.b.c", "x.y":
metric = bg_test_utils.make_metric(metric_name)
self.accessor.create_metric(metric)
self.finder = bg_graphite.Finder(
accessor=self.accessor,
metadata_cache=self.metadata_cache,
)
def find_nodes(self, pattern):
return self.finder.find_nodes(FakeFindQuery(pattern))
def assertMatch(self, glob, branches, leaves):
found = list(self.find_nodes(glob))
found_branches = [node.path for node in found if not node.is_leaf]
found_leaves = [node.path for node in found if node.is_leaf]
for path in found_branches + found_leaves:
self.assertIsInstance(path, str)
self.assertItemsEqual(found_branches, branches)
self.assertItemsEqual(found_leaves, leaves)
def test_find_nodes(self):
self.assertMatch("a", ["a"], ["a"])
self.assertMatch("a.*", ["a.b"], ["a.a"])
self.assertMatch("*.{a,b,c,y,z}", ["a.b"], ["a.a", "x.y"])
self.assertMatch("?.[a-c]", ["a.b"], ["a.a"])
self.assertMatch("?.[a-z]", ["a.b"], ["a.a", "x.y"])
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
masayukig/tempest | tempest/lib/services/network/ports_client.py | 3 | 3063 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import exceptions as lib_exc
from tempest.lib.services.network import base
class PortsClient(base.BaseNetworkClient):
def create_port(self, **kwargs):
"""Creates a port on a network.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/network/v2/index.html#create-port
"""
uri = '/ports'
post_data = {'port': kwargs}
return self.create_resource(uri, post_data)
def update_port(self, port_id, **kwargs):
"""Updates a port.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/network/v2/index.html#update-port
"""
uri = '/ports/%s' % port_id
post_data = {'port': kwargs}
return self.update_resource(uri, post_data)
def show_port(self, port_id, **fields):
"""Shows details for a port.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/network/v2/index.html#show-port-details
"""
uri = '/ports/%s' % port_id
return self.show_resource(uri, **fields)
def delete_port(self, port_id):
"""Deletes a port.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/network/v2/index.html#delete-port
"""
uri = '/ports/%s' % port_id
return self.delete_resource(uri)
def list_ports(self, **filters):
"""Lists ports to which the tenant has access.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/network/v2/index.html#list-ports
"""
uri = '/ports'
return self.list_resources(uri, **filters)
def create_bulk_ports(self, **kwargs):
"""Create multiple ports in a single request.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/network/v2/index.html#bulk-create-ports
"""
uri = '/ports'
return self.create_resource(uri, kwargs)
def is_resource_deleted(self, id):
try:
self.show_port(id)
except lib_exc.NotFound:
return True
return False
| apache-2.0 |
sysadminmatmoz/OCB | addons/l10n_fr_hr_payroll/__openerp__.py | 24 | 1219 | # -*- encoding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'French Payroll',
'category': 'Localization/Payroll',
'author': 'Yannick Buron (SYNERPGY)',
'depends': ['hr_payroll', 'l10n_fr'],
'version': '1.0',
'description': """
French Payroll Rules.
=====================
- Configuration of hr_payroll for French localization
- All main contributions rules for French payslip, for 'cadre' and 'non-cadre'
- New payslip report
TODO:
-----
- Integration with holidays module for deduction and allowance
- Integration with hr_payroll_account for the automatic account_move_line
creation from the payslip
- Continue to integrate the contribution. Only the main contribution are
currently implemented
- Remake the report under webkit
- The payslip.line with appears_in_payslip = False should appears in the
payslip interface, but not in the payslip report
""",
'active': False,
'data': [
'l10n_fr_hr_payroll_view.xml',
'l10n_fr_hr_payroll_data.xml',
'views/report_l10nfrfichepaye.xml',
'l10n_fr_hr_payroll_reports.xml',
],
'installable': True
}
| agpl-3.0 |
foobarbazblarg/stayclean | stayclean-2020-december/venv/lib/python3.8/site-packages/setuptools/_distutils/command/bdist_wininst.py | 11 | 16030 | """distutils.command.bdist_wininst
Implements the Distutils 'bdist_wininst' command: create a windows installer
exe-program."""
import os
import sys
import warnings
from distutils.core import Command
from distutils.util import get_platform
from distutils.dir_util import remove_tree
from distutils.errors import *
from distutils.sysconfig import get_python_version
from distutils import log
class bdist_wininst(Command):
description = "create an executable installer for MS Windows"
user_options = [('bdist-dir=', None,
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('target-version=', None,
"require a specific python version" +
" on the target system"),
('no-target-compile', 'c',
"do not compile .py to .pyc on the target system"),
('no-target-optimize', 'o',
"do not compile .py to .pyo (optimized) "
"on the target system"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('bitmap=', 'b',
"bitmap to use for the installer instead of python-powered logo"),
('title=', 't',
"title to display on the installer background instead of default"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('install-script=', None,
"basename of installation script to be run after "
"installation or before deinstallation"),
('pre-install-script=', None,
"Fully qualified filename of a script to be run before "
"any files are installed. This script need not be in the "
"distribution"),
('user-access-control=', None,
"specify Vista's UAC handling - 'none'/default=no "
"handling, 'auto'=use UAC if target Python installed for "
"all users, 'force'=always use UAC"),
]
boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize',
'skip-build']
# bpo-10945: bdist_wininst requires mbcs encoding only available on Windows
_unsupported = (sys.platform != "win32")
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
warnings.warn("bdist_wininst command is deprecated since Python 3.8, "
"use bdist_wheel (wheel packages) instead",
DeprecationWarning, 2)
def initialize_options(self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.no_target_compile = 0
self.no_target_optimize = 0
self.target_version = None
self.dist_dir = None
self.bitmap = None
self.title = None
self.skip_build = None
self.install_script = None
self.pre_install_script = None
self.user_access_control = None
def finalize_options(self):
self.set_undefined_options('bdist', ('skip_build', 'skip_build'))
if self.bdist_dir is None:
if self.skip_build and self.plat_name:
# If build is skipped and plat_name is overridden, bdist will
# not see the correct 'plat_name' - so set that up manually.
bdist = self.distribution.get_command_obj('bdist')
bdist.plat_name = self.plat_name
# next the command will be initialized using that name
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'wininst')
if not self.target_version:
self.target_version = ""
if not self.skip_build and self.distribution.has_ext_modules():
short_version = get_python_version()
if self.target_version and self.target_version != short_version:
raise DistutilsOptionError(
"target version can only be %s, or the '--skip-build'" \
" option must be specified" % (short_version,))
self.target_version = short_version
self.set_undefined_options('bdist',
('dist_dir', 'dist_dir'),
('plat_name', 'plat_name'),
)
if self.install_script:
for script in self.distribution.scripts:
if self.install_script == os.path.basename(script):
break
else:
raise DistutilsOptionError(
"install_script '%s' not found in scripts"
% self.install_script)
def run(self):
if (sys.platform != "win32" and
(self.distribution.has_ext_modules() or
self.distribution.has_c_libraries())):
raise DistutilsPlatformError \
("distribution contains extensions and/or C libraries; "
"must be compiled on a Windows 32 platform")
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=1)
install.root = self.bdist_dir
install.skip_build = self.skip_build
install.warn_dir = 0
install.plat_name = self.plat_name
install_lib = self.reinitialize_command('install_lib')
# we do not want to include pyc or pyo files
install_lib.compile = 0
install_lib.optimize = 0
if self.distribution.has_ext_modules():
# If we are building an installer for a Python version other
# than the one we are currently running, then we need to ensure
# our build_lib reflects the other Python version rather than ours.
# Note that for target_version!=sys.version, we must have skipped the
# build step, so there is no issue with enforcing the build of this
# version.
target_version = self.target_version
if not target_version:
assert self.skip_build, "Should have already checked this"
target_version = '%d.%d' % sys.version_info[:2]
plat_specifier = ".%s-%s" % (self.plat_name, target_version)
build = self.get_finalized_command('build')
build.build_lib = os.path.join(build.build_base,
'lib' + plat_specifier)
# Use a custom scheme for the zip-file, because we have to decide
# at installation time which scheme to use.
for key in ('purelib', 'platlib', 'headers', 'scripts', 'data'):
value = key.upper()
if key == 'headers':
value = value + '/Include/$dist_name'
setattr(install,
'install_' + key,
value)
log.info("installing to %s", self.bdist_dir)
install.ensure_finalized()
# avoid warning of 'install_lib' about installing
# into a directory not in sys.path
sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB'))
install.run()
del sys.path[0]
# And make an archive relative to the root of the
# pseudo-installation tree.
from tempfile import mktemp
archive_basename = mktemp()
fullname = self.distribution.get_fullname()
arcname = self.make_archive(archive_basename, "zip",
root_dir=self.bdist_dir)
# create an exe containing the zip-file
self.create_exe(arcname, fullname, self.bitmap)
if self.distribution.has_ext_modules():
pyversion = get_python_version()
else:
pyversion = 'any'
self.distribution.dist_files.append(('bdist_wininst', pyversion,
self.get_installer_filename(fullname)))
# remove the zip-file again
log.debug("removing temporary file '%s'", arcname)
os.remove(arcname)
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
def get_inidata(self):
# Return data describing the installation.
lines = []
metadata = self.distribution.metadata
# Write the [metadata] section.
lines.append("[metadata]")
# 'info' will be displayed in the installer's dialog box,
# describing the items to be installed.
info = (metadata.long_description or '') + '\n'
# Escape newline characters
def escape(s):
return s.replace("\n", "\\n")
for name in ["author", "author_email", "description", "maintainer",
"maintainer_email", "name", "url", "version"]:
data = getattr(metadata, name, "")
if data:
info = info + ("\n %s: %s" % \
(name.capitalize(), escape(data)))
lines.append("%s=%s" % (name, escape(data)))
# The [setup] section contains entries controlling
# the installer runtime.
lines.append("\n[Setup]")
if self.install_script:
lines.append("install_script=%s" % self.install_script)
lines.append("info=%s" % escape(info))
lines.append("target_compile=%d" % (not self.no_target_compile))
lines.append("target_optimize=%d" % (not self.no_target_optimize))
if self.target_version:
lines.append("target_version=%s" % self.target_version)
if self.user_access_control:
lines.append("user_access_control=%s" % self.user_access_control)
title = self.title or self.distribution.get_fullname()
lines.append("title=%s" % escape(title))
import time
import distutils
build_info = "Built %s with distutils-%s" % \
(time.ctime(time.time()), distutils.__version__)
lines.append("build_info=%s" % build_info)
return "\n".join(lines)
def create_exe(self, arcname, fullname, bitmap=None):
import struct
self.mkpath(self.dist_dir)
cfgdata = self.get_inidata()
installer_name = self.get_installer_filename(fullname)
self.announce("creating %s" % installer_name)
if bitmap:
with open(bitmap, "rb") as f:
bitmapdata = f.read()
bitmaplen = len(bitmapdata)
else:
bitmaplen = 0
with open(installer_name, "wb") as file:
file.write(self.get_exe_bytes())
if bitmap:
file.write(bitmapdata)
# Convert cfgdata from unicode to ascii, mbcs encoded
if isinstance(cfgdata, str):
cfgdata = cfgdata.encode("mbcs")
# Append the pre-install script
cfgdata = cfgdata + b"\0"
if self.pre_install_script:
# We need to normalize newlines, so we open in text mode and
# convert back to bytes. "latin-1" simply avoids any possible
# failures.
with open(self.pre_install_script, "r",
encoding="latin-1") as script:
script_data = script.read().encode("latin-1")
cfgdata = cfgdata + script_data + b"\n\0"
else:
# empty pre-install script
cfgdata = cfgdata + b"\0"
file.write(cfgdata)
# The 'magic number' 0x1234567B is used to make sure that the
# binary layout of 'cfgdata' is what the wininst.exe binary
# expects. If the layout changes, increment that number, make
# the corresponding changes to the wininst.exe sources, and
# recompile them.
header = struct.pack("<iii",
0x1234567B, # tag
len(cfgdata), # length
bitmaplen, # number of bytes in bitmap
)
file.write(header)
with open(arcname, "rb") as f:
file.write(f.read())
def get_installer_filename(self, fullname):
# Factored out to allow overriding in subclasses
if self.target_version:
# if we create an installer for a specific python version,
# it's better to include this in the name
installer_name = os.path.join(self.dist_dir,
"%s.%s-py%s.exe" %
(fullname, self.plat_name, self.target_version))
else:
installer_name = os.path.join(self.dist_dir,
"%s.%s.exe" % (fullname, self.plat_name))
return installer_name
def get_exe_bytes(self):
# If a target-version other than the current version has been
# specified, then using the MSVC version from *this* build is no good.
# Without actually finding and executing the target version and parsing
# its sys.version, we just hard-code our knowledge of old versions.
# NOTE: Possible alternative is to allow "--target-version" to
# specify a Python executable rather than a simple version string.
# We can then execute this program to obtain any info we need, such
# as the real sys.version string for the build.
cur_version = get_python_version()
# If the target version is *later* than us, then we assume they
# use what we use
# string compares seem wrong, but are what sysconfig.py itself uses
if self.target_version and self.target_version < cur_version:
if self.target_version < "2.4":
bv = '6.0'
elif self.target_version == "2.4":
bv = '7.1'
elif self.target_version == "2.5":
bv = '8.0'
elif self.target_version <= "3.2":
bv = '9.0'
elif self.target_version <= "3.4":
bv = '10.0'
else:
bv = '14.0'
else:
# for current version - use authoritative check.
try:
from msvcrt import CRT_ASSEMBLY_VERSION
except ImportError:
# cross-building, so assume the latest version
bv = '14.0'
else:
# as far as we know, CRT is binary compatible based on
# the first field, so assume 'x.0' until proven otherwise
major = CRT_ASSEMBLY_VERSION.partition('.')[0]
bv = major + '.0'
# wininst-x.y.exe is in the same directory as this file
directory = os.path.dirname(__file__)
# we must use a wininst-x.y.exe built with the same C compiler
# used for python. XXX What about mingw, borland, and so on?
# if plat_name starts with "win" but is not "win32"
# we want to strip "win" and leave the rest (e.g. -amd64)
# for all other cases, we don't want any suffix
if self.plat_name != 'win32' and self.plat_name[:3] == 'win':
sfix = self.plat_name[3:]
else:
sfix = ''
filename = os.path.join(directory, "wininst-%s%s.exe" % (bv, sfix))
f = open(filename, "rb")
try:
return f.read()
finally:
f.close()
| mit |
jinghaomiao/apollo | modules/tools/mapshow/libs/planning.py | 3 | 20994 | #!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import threading
import numpy as np
from modules.planning.proto import planning_internal_pb2
class Planning:
def __init__(self, planning_pb=None):
self.data_lock = threading.Lock()
self.init_point_lock = threading.Lock()
self.planning_pb = planning_pb
self.path_data_lock = threading.Lock()
self.path_data_x = {}
self.path_data_y = {}
self.speed_data_lock = threading.Lock()
self.speed_data_time = {}
self.speed_data_val = {}
self.traj_data_lock = threading.Lock()
self.traj_speed_history_len = 30
self.traj_speed_t_history = []
self.traj_speed_v_history = []
self.traj_acc_history_len = 30
self.traj_acc_t_history = []
self.traj_acc_a_history = []
self.traj_path_history_len = 30
self.traj_path_x_history = []
self.traj_path_y_history = []
self.st_data_lock = threading.Lock()
self.st_curve_s = {}
self.st_curve_t = {}
self.st_curve_v = {}
self.st_data_boundary_s = {}
self.st_data_boundary_t = {}
self.st_data_boundary_type = {}
self.st_speed_limit_s = {}
self.st_speed_limit_v = {}
self.st_speed_constraint_s = {}
self.st_speed_constraint_lower = {}
self.st_speed_constraint_upper = {}
self.sl_data_lock = threading.Lock()
self.sl_sampled_s = []
self.sl_static_obstacle_lower_boundary = []
self.sl_static_obstacle_upper_boundary = []
self.sl_dynamic_obstacle_lower_boundary = []
self.sl_dynamic_obstacle_upper_boundary = []
self.sl_map_lower_boundary = []
self.sl_map_upper_boundary = []
self.sl_path_s = []
self.sl_path_l = []
self.sl_aggregated_boundary_low_l = []
self.sl_aggregated_boundary_high_l = []
self.sl_aggregated_boundary_s = []
self.kernel_cruise_t = {}
self.kernel_cruise_s = {}
self.kernel_follow_t = {}
self.kernel_follow_s = {}
self.init_point_x = []
self.init_point_y = []
def update_planning_pb(self, planning_pb):
self.planning_pb = planning_pb
def compute_init_point(self):
self.init_point_lock.acquire()
init_point = self.planning_pb.debug.planning_data.init_point
self.init_point_x = [init_point.path_point.x]
self.init_point_y = [init_point.path_point.y]
self.init_point_lock.release()
def compute_sl_data(self):
sl_sampled_s = []
sl_map_lower_boundary = []
sl_map_upper_boundary = []
sl_static_obstacle_lower_boundary = []
sl_static_obstacle_upper_boundary = []
sl_dynamic_obstacle_lower_boundary = []
sl_dynamic_obstacle_upper_boundary = []
sl_path_s = []
sl_path_l = []
sl_aggregated_boundary_low_l = []
sl_aggregated_boundary_high_l = []
sl_aggregated_boundary_s = []
for sl_frame in self.planning_pb.debug.planning_data.sl_frame:
for s in sl_frame.sampled_s:
sl_sampled_s.append(s)
for l in sl_frame.map_lower_bound:
if (l > 10 or l < -10):
sl_map_lower_boundary.append(100 * l // abs(l))
else:
sl_map_lower_boundary.append(l)
for l in sl_frame.map_upper_bound:
if (l > 10 or l < -10):
sl_map_upper_boundary.append(100 * l // abs(l))
else:
sl_map_upper_boundary.append(l)
for l in sl_frame.static_obstacle_lower_bound:
sl_static_obstacle_lower_boundary.append(l)
for l in sl_frame.static_obstacle_upper_bound:
sl_static_obstacle_upper_boundary.append(l)
for l in sl_frame.dynamic_obstacle_lower_bound:
sl_dynamic_obstacle_lower_boundary.append(l)
for l in sl_frame.dynamic_obstacle_upper_bound:
sl_dynamic_obstacle_upper_boundary.append(l)
for slpoint in sl_frame.sl_path:
sl_path_s.append(slpoint.s)
sl_path_l.append(slpoint.l)
for l in sl_frame.aggregated_boundary_low:
sl_aggregated_boundary_low_l.append(l)
for l in sl_frame.aggregated_boundary_high:
sl_aggregated_boundary_high_l.append(l)
for s in sl_frame.aggregated_boundary_s:
sl_aggregated_boundary_s.append(s)
self.sl_data_lock.acquire()
self.sl_sampled_s = sl_sampled_s
self.sl_map_upper_boundary = sl_map_upper_boundary
self.sl_map_lower_boundary = sl_map_lower_boundary
self.sl_static_obstacle_lower_boundary = sl_static_obstacle_lower_boundary
self.sl_static_obstacle_upper_boundary = sl_static_obstacle_upper_boundary
self.sl_dynamic_obstacle_lower_boundary = sl_dynamic_obstacle_lower_boundary
self.sl_dynamic_obstacle_upper_boundary = sl_dynamic_obstacle_upper_boundary
self.sl_path_s = sl_path_s
self.sl_path_l = sl_path_l
self.sl_aggregated_boundary_low_l = sl_aggregated_boundary_low_l
self.sl_aggregated_boundary_high_l = sl_aggregated_boundary_high_l
self.sl_aggregated_boundary_s = sl_aggregated_boundary_s
self.sl_data_lock.release()
def compute_st_data(self):
st_data_boundary_s = {}
st_data_boundary_t = {}
st_curve_s = {}
st_curve_t = {}
st_curve_v = {}
st_data_boundary_type = {}
st_speed_limit_s = {}
st_speed_limit_v = {}
st_speed_constraint_s = {}
st_speed_constraint_lower = {}
st_speed_constraint_upper = {}
kernel_cruise_t = {}
kernel_cruise_s = {}
kernel_follow_t = {}
kernel_follow_s = {}
for st_graph in self.planning_pb.debug.planning_data.st_graph:
st_data_boundary_s[st_graph.name] = {}
st_data_boundary_t[st_graph.name] = {}
st_data_boundary_type[st_graph.name] = {}
for boundary in st_graph.boundary:
st_data_boundary_type[st_graph.name][boundary.name] \
= planning_internal_pb2.StGraphBoundaryDebug.StBoundaryType.Name(
boundary.type)
st_data_boundary_s[st_graph.name][boundary.name] = []
st_data_boundary_t[st_graph.name][boundary.name] = []
for point in boundary.point:
st_data_boundary_s[st_graph.name][boundary.name] \
.append(point.s)
st_data_boundary_t[st_graph.name][boundary.name] \
.append(point.t)
st_data_boundary_s[st_graph.name][boundary.name].append(
st_data_boundary_s[st_graph.name][boundary.name][0])
st_data_boundary_t[st_graph.name][boundary.name].append(
st_data_boundary_t[st_graph.name][boundary.name][0])
st_curve_s[st_graph.name] = []
st_curve_t[st_graph.name] = []
st_curve_v[st_graph.name] = []
for point in st_graph.speed_profile:
st_curve_s[st_graph.name].append(point.s)
st_curve_t[st_graph.name].append(point.t)
st_curve_v[st_graph.name].append(point.v)
st_speed_limit_s[st_graph.name] = []
st_speed_limit_v[st_graph.name] = []
for point in st_graph.speed_limit:
st_speed_limit_s[st_graph.name].append(point.s)
st_speed_limit_v[st_graph.name].append(point.v)
st_speed_constraint_s[st_graph.name] = []
st_speed_constraint_lower[st_graph.name] = []
st_speed_constraint_upper[st_graph.name] = []
speed_constraint = st_graph.speed_constraint
interp_s_set = []
for t in speed_constraint.t:
interp_s = np.interp(t, st_curve_t[st_graph.name],
st_curve_s[st_graph.name])
interp_s_set.append(interp_s)
st_speed_constraint_s[st_graph.name].extend(interp_s_set)
st_speed_constraint_lower[st_graph.name].extend(
speed_constraint.lower_bound)
st_speed_constraint_upper[st_graph.name].extend(
speed_constraint.upper_bound)
kernel_cruise_t[st_graph.name] = []
kernel_cruise_s[st_graph.name] = []
kernel_cruise = st_graph.kernel_cruise_ref
kernel_cruise_t[st_graph.name].append(kernel_cruise.t)
kernel_cruise_s[st_graph.name].append(kernel_cruise.cruise_line_s)
kernel_follow_t[st_graph.name] = []
kernel_follow_s[st_graph.name] = []
kernel_follow = st_graph.kernel_follow_ref
kernel_follow_t[st_graph.name].append(kernel_follow.t)
kernel_follow_s[st_graph.name].append(kernel_follow.follow_line_s)
self.st_data_lock.acquire()
self.st_data_boundary_s = st_data_boundary_s
self.st_data_boundary_t = st_data_boundary_t
self.st_curve_s = st_curve_s
self.st_curve_t = st_curve_t
self.st_curve_v = st_curve_v
self.st_speed_limit_v = st_speed_limit_v
self.st_speed_limit_s = st_speed_limit_s
self.st_data_boundary_type = st_data_boundary_type
self.st_speed_constraint_s = st_speed_constraint_s
self.st_speed_constraint_lower = st_speed_constraint_lower
self.st_speed_constraint_upper = st_speed_constraint_upper
self.kernel_cruise_t = kernel_cruise_t
self.kernel_cruise_s = kernel_cruise_s
self.kernel_follow_t = kernel_follow_t
self.kernel_follow_s = kernel_follow_s
self.st_data_lock.release()
def compute_traj_data(self):
traj_speed_t = []
traj_speed_v = []
traj_acc_t = []
traj_acc_a = []
traj_path_x = []
traj_path_y = []
base_time = self.planning_pb.header.timestamp_sec
for trajectory_point in self.planning_pb.trajectory_point:
traj_acc_t.append(base_time + trajectory_point.relative_time)
traj_acc_a.append(trajectory_point.a)
traj_speed_t.append(base_time + trajectory_point.relative_time)
traj_speed_v.append(trajectory_point.v)
traj_path_x.append(trajectory_point.path_point.x)
traj_path_y.append(trajectory_point.path_point.y)
self.traj_data_lock.acquire()
self.traj_speed_t_history.append(traj_speed_t)
self.traj_speed_v_history.append(traj_speed_v)
if len(self.traj_speed_t_history) > self.traj_speed_history_len:
self.traj_speed_t_history = \
self.traj_speed_t_history[len(self.traj_speed_t_history)
- self.traj_speed_history_len:]
self.traj_speed_v_history = \
self.traj_speed_v_history[len(self.traj_speed_v_history)
- self.traj_speed_history_len:]
self.traj_acc_t_history.append(traj_acc_t)
self.traj_acc_a_history.append(traj_acc_a)
if len(self.traj_acc_t_history) > self.traj_acc_history_len:
self.traj_acc_t_history = \
self.traj_acc_t_history[len(self.traj_acc_t_history)
- self.traj_acc_history_len:]
self.traj_acc_a_history = \
self.traj_acc_a_history[len(self.traj_acc_a_history)
- self.traj_acc_history_len:]
self.traj_path_x_history.append(traj_path_x)
self.traj_path_y_history.append(traj_path_y)
if len(self.traj_path_x_history) > self.traj_path_history_len:
self.traj_path_x_history = \
self.traj_path_x_history[len(self.traj_path_x_history)
- self.traj_path_history_len:]
self.traj_path_y_history = \
self.traj_path_y_history[len(self.traj_path_y_history)
- self.traj_path_history_len:]
self.traj_data_lock.release()
def replot_sl_data(self,
sl_static_obstacle_lower_boundary,
sl_static_obstacle_upper_boundary,
sl_dynamic_obstacle_lower_boundary,
sl_dynamic_obstacle_upper_boundary,
sl_map_lower_boundary,
sl_map_upper_boundary, sl_path,
sl_aggregated_boundary_low_line,
sl_aggregated_boundary_high_line):
self.sl_data_lock.acquire()
sl_static_obstacle_lower_boundary.set_visible(True)
sl_static_obstacle_upper_boundary.set_visible(True)
sl_dynamic_obstacle_lower_boundary.set_visible(True)
sl_dynamic_obstacle_upper_boundary.set_visible(True)
sl_map_lower_boundary.set_visible(True)
sl_map_upper_boundary.set_visible(True)
sl_path.set_visible(True)
sl_aggregated_boundary_low_line.set_visible(True)
sl_aggregated_boundary_high_line.set_visible(True)
new_sampled_s = []
for s in self.sl_sampled_s:
new_sampled_s.append(s)
new_sampled_s.append(s)
new_map_lower = []
for l in self.sl_map_lower_boundary:
new_map_lower.append(l)
new_map_lower.append(-11)
new_map_upper = []
for l in self.sl_map_upper_boundary:
new_map_upper.append(l)
new_map_upper.append(11)
sl_map_lower_boundary.set_xdata(new_sampled_s)
sl_map_lower_boundary.set_ydata(new_map_lower)
sl_map_upper_boundary.set_xdata(new_sampled_s)
sl_map_upper_boundary.set_ydata(new_map_upper)
sl_dynamic_obstacle_lower_boundary.set_xdata(self.sl_sampled_s)
sl_dynamic_obstacle_lower_boundary.set_ydata(
self.sl_dynamic_obstacle_lower_boundary)
sl_dynamic_obstacle_upper_boundary.set_xdata(self.sl_sampled_s)
sl_dynamic_obstacle_upper_boundary.set_ydata(
self.sl_dynamic_obstacle_upper_boundary)
new_static_lower = []
for l in self.sl_static_obstacle_lower_boundary:
new_static_lower.append(l)
new_static_lower.append(-11)
new_static_upper = []
for l in self.sl_static_obstacle_upper_boundary:
new_static_upper.append(l)
new_static_upper.append(11)
sl_static_obstacle_lower_boundary.set_xdata(new_sampled_s)
sl_static_obstacle_lower_boundary.set_ydata(new_static_lower)
sl_static_obstacle_upper_boundary.set_xdata(new_sampled_s)
sl_static_obstacle_upper_boundary.set_ydata(new_static_upper)
sl_path.set_xdata(self.sl_path_s)
sl_path.set_ydata(self.sl_path_l)
sl_aggregated_boundary_low_line.set_xdata(
self.sl_aggregated_boundary_s)
sl_aggregated_boundary_low_line.set_ydata(
self.sl_aggregated_boundary_low_l)
sl_aggregated_boundary_high_line.set_xdata(
self.sl_aggregated_boundary_s)
sl_aggregated_boundary_high_line.set_ydata(
self.sl_aggregated_boundary_high_l)
self.sl_data_lock.release()
def replot_st_data(self, boundaries_pool, st_line,
obstacle_annotation_pool, st_graph_name):
if st_graph_name not in self.st_data_boundary_s:
return
if st_graph_name not in self.st_curve_s:
return
cnt = 0
self.st_data_lock.acquire()
st_graph_boudnary_s = self.st_data_boundary_s[st_graph_name]
st_graph_boudnary_t = self.st_data_boundary_t[st_graph_name]
st_boundary_type = self.st_data_boundary_type[st_graph_name]
for boundary_name in st_graph_boudnary_s.keys():
if cnt >= len(boundaries_pool):
print("WARNING: number of path lines is more than "
+ len(boundaries_pool))
continue
boundary = boundaries_pool[cnt]
boundary.set_visible(True)
boundary.set_xdata(st_graph_boudnary_t[boundary_name])
boundary.set_ydata(st_graph_boudnary_s[boundary_name])
center_t = 0
center_s = 0
for i in range(len(st_graph_boudnary_t[boundary_name]) - 1):
center_s += st_graph_boudnary_s[boundary_name][i]
center_t += st_graph_boudnary_t[boundary_name][i]
center_s /= float(len(st_graph_boudnary_s[boundary_name]) - 1)
center_t /= float(len(st_graph_boudnary_t[boundary_name]) - 1)
annotation = obstacle_annotation_pool[cnt]
annotation.set_visible(True)
annotation.set_text(boundary_name + "_"
+ st_boundary_type[boundary_name]
.replace("ST_BOUNDARY_TYPE_", ""))
annotation.set_x(center_t)
annotation.set_y(center_s)
cnt += 1
st_line.set_visible(True)
st_line.set_xdata(self.st_curve_t[st_graph_name])
st_line.set_ydata(self.st_curve_s[st_graph_name])
st_line.set_label(st_graph_name[0:5])
self.st_data_lock.release()
def compute_path_data(self):
path_data_x = {}
path_data_y = {}
for path_debug in self.planning_pb.debug.planning_data.path:
name = path_debug.name
path_data_x[name] = []
path_data_y[name] = []
for path_point in path_debug.path_point:
path_data_x[name].append(path_point.x)
path_data_y[name].append(path_point.y)
self.path_data_lock.acquire()
self.path_data_x = path_data_x
self.path_data_y = path_data_y
self.path_data_lock.release()
def replot_path_data(self, path_lines):
cnt = 0
self.path_data_lock.acquire()
for name in self.path_data_x.keys():
if cnt >= len(path_lines):
print("WARNING: number of path lines is more than "
+ len(path_lines))
continue
if len(self.path_data_x[name]) <= 1:
continue
line = path_lines[cnt]
line.set_visible(True)
line.set_xdata(self.path_data_x[name])
line.set_ydata(self.path_data_y[name])
line.set_label(name[0:5])
cnt += 1
self.path_data_lock.release()
def compute_speed_data(self):
speed_data_time = {}
speed_data_val = {}
for speed_plan in self.planning_pb.debug.planning_data.speed_plan:
name = speed_plan.name
speed_data_time[name] = []
speed_data_val[name] = []
for speed_point in speed_plan.speed_point:
speed_data_time[name].append(speed_point.t)
speed_data_val[name].append(speed_point.v)
name = "final_speed_output"
speed_data_time[name] = []
speed_data_val[name] = []
for traj_point in self.planning_pb.trajectory_point:
speed_data_time[name].append(traj_point.relative_time)
speed_data_val[name].append(traj_point.v)
self.speed_data_lock.acquire()
self.speed_data_time = speed_data_time
self.speed_data_val = speed_data_val
self.speed_data_lock.release()
def replot_speed_data(self, speed_lines):
cnt = 0
self.speed_data_lock.acquire()
for name in self.speed_data_time.keys():
if cnt >= len(speed_lines):
print("WARNING: number of speed lines is more than "
+ len(speed_lines))
continue
if len(self.speed_data_time[name]) <= 1:
continue
line = speed_lines[cnt]
line.set_visible(True)
line.set_xdata(self.speed_data_time[name])
line.set_ydata(self.speed_data_val[name])
line.set_label(name[0:5])
cnt += 1
self.speed_data_lock.release()
| apache-2.0 |
petrutlucian94/nova | nova/api/openstack/compute/plugins/v3/baremetal_nodes.py | 12 | 6358 | # Copyright (c) 2013 NTT DOCOMO, INC.
# Copyright 2014 IBM Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The bare-metal admin extension."""
from oslo_config import cfg
from oslo_utils import importutils
import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.i18n import _
ironic_client = importutils.try_import('ironicclient.client')
ironic_exc = importutils.try_import('ironicclient.exc')
CONF = cfg.CONF
ALIAS = "os-baremetal-nodes"
authorize = extensions.os_compute_authorizer(ALIAS)
node_fields = ['id', 'cpus', 'local_gb', 'memory_mb', 'pm_address',
'pm_user', 'service_host', 'terminal_port', 'instance_uuid']
node_ext_fields = ['uuid', 'task_state', 'updated_at', 'pxe_config_path']
interface_fields = ['id', 'address', 'datapath_id', 'port_no']
CONF.import_opt('api_version',
'nova.virt.ironic.driver',
group='ironic')
CONF.import_opt('api_endpoint',
'nova.virt.ironic.driver',
group='ironic')
CONF.import_opt('admin_username',
'nova.virt.ironic.driver',
group='ironic')
CONF.import_opt('admin_password',
'nova.virt.ironic.driver',
group='ironic')
CONF.import_opt('admin_tenant_name',
'nova.virt.ironic.driver',
group='ironic')
CONF.import_opt('compute_driver', 'nova.virt.driver')
def _check_ironic_client_enabled():
"""Check whether Ironic is installed or not."""
if ironic_client is None:
msg = _("Ironic client unavailable, cannot access Ironic.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
def _get_ironic_client():
"""return an Ironic client."""
# TODO(NobodyCam): Fix insecure setting
kwargs = {'os_username': CONF.ironic.admin_username,
'os_password': CONF.ironic.admin_password,
'os_auth_url': CONF.ironic.admin_url,
'os_tenant_name': CONF.ironic.admin_tenant_name,
'os_service_type': 'baremetal',
'os_endpoint_type': 'public',
'insecure': 'true',
'ironic_url': CONF.ironic.api_endpoint}
icli = ironic_client.get_client(CONF.ironic.api_version, **kwargs)
return icli
def _no_ironic_proxy(cmd):
raise webob.exc.HTTPBadRequest(
explanation=_("Command Not supported. Please use Ironic "
"command %(cmd)s to perform this "
"action.") % {'cmd': cmd})
class BareMetalNodeController(wsgi.Controller):
"""The Bare-Metal Node API controller for the OpenStack API."""
def _node_dict(self, node_ref):
d = {}
for f in node_fields:
d[f] = node_ref.get(f)
for f in node_ext_fields:
d[f] = node_ref.get(f)
return d
@extensions.expected_errors((404, 501))
def index(self, req):
context = req.environ['nova.context']
authorize(context)
nodes = []
# proxy command to Ironic
_check_ironic_client_enabled()
icli = _get_ironic_client()
ironic_nodes = icli.node.list(detail=True)
for inode in ironic_nodes:
node = {'id': inode.uuid,
'interfaces': [],
'host': 'IRONIC MANAGED',
'task_state': inode.provision_state,
'cpus': inode.properties.get('cpus', 0),
'memory_mb': inode.properties.get('memory_mb', 0),
'disk_gb': inode.properties.get('local_gb', 0)}
nodes.append(node)
return {'nodes': nodes}
@extensions.expected_errors((404, 501))
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
# proxy command to Ironic
_check_ironic_client_enabled()
icli = _get_ironic_client()
try:
inode = icli.node.get(id)
except ironic_exc.NotFound:
msg = _("Node %s could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
iports = icli.node.list_ports(id)
node = {'id': inode.uuid,
'interfaces': [],
'host': 'IRONIC MANAGED',
'task_state': inode.provision_state,
'cpus': inode.properties.get('cpus', 0),
'memory_mb': inode.properties.get('memory_mb', 0),
'disk_gb': inode.properties.get('local_gb', 0),
'instance_uuid': inode.instance_uuid}
for port in iports:
node['interfaces'].append({'address': port.address})
return {'node': node}
@extensions.expected_errors(400)
def create(self, req, body):
_no_ironic_proxy("port-create")
@extensions.expected_errors(400)
def delete(self, req, id):
_no_ironic_proxy("port-create")
@wsgi.action('add_interface')
@extensions.expected_errors(400)
def _add_interface(self, req, id, body):
_no_ironic_proxy("port-create")
@wsgi.action('remove_interface')
@extensions.expected_errors(400)
def _remove_interface(self, req, id, body):
_no_ironic_proxy("port-delete")
class BareMetalNodes(extensions.V3APIExtensionBase):
"""Admin-only bare-metal node administration."""
name = "BareMetalNodes"
alias = ALIAS
version = 1
def get_resources(self):
resource = [extensions.ResourceExtension(ALIAS,
BareMetalNodeController(),
member_actions={"action": "POST"})]
return resource
def get_controller_extensions(self):
"""It's an abstract function V3APIExtensionBase and the extension
will not be loaded without it.
"""
return []
| apache-2.0 |
hale36/SRTV | lib/unidecode/x0b8.py | 253 | 4714 | data = (
'reoss', # 0x00
'reong', # 0x01
'reoj', # 0x02
'reoc', # 0x03
'reok', # 0x04
'reot', # 0x05
'reop', # 0x06
'reoh', # 0x07
're', # 0x08
'reg', # 0x09
'regg', # 0x0a
'regs', # 0x0b
'ren', # 0x0c
'renj', # 0x0d
'renh', # 0x0e
'red', # 0x0f
'rel', # 0x10
'relg', # 0x11
'relm', # 0x12
'relb', # 0x13
'rels', # 0x14
'relt', # 0x15
'relp', # 0x16
'relh', # 0x17
'rem', # 0x18
'reb', # 0x19
'rebs', # 0x1a
'res', # 0x1b
'ress', # 0x1c
'reng', # 0x1d
'rej', # 0x1e
'rec', # 0x1f
'rek', # 0x20
'ret', # 0x21
'rep', # 0x22
'reh', # 0x23
'ryeo', # 0x24
'ryeog', # 0x25
'ryeogg', # 0x26
'ryeogs', # 0x27
'ryeon', # 0x28
'ryeonj', # 0x29
'ryeonh', # 0x2a
'ryeod', # 0x2b
'ryeol', # 0x2c
'ryeolg', # 0x2d
'ryeolm', # 0x2e
'ryeolb', # 0x2f
'ryeols', # 0x30
'ryeolt', # 0x31
'ryeolp', # 0x32
'ryeolh', # 0x33
'ryeom', # 0x34
'ryeob', # 0x35
'ryeobs', # 0x36
'ryeos', # 0x37
'ryeoss', # 0x38
'ryeong', # 0x39
'ryeoj', # 0x3a
'ryeoc', # 0x3b
'ryeok', # 0x3c
'ryeot', # 0x3d
'ryeop', # 0x3e
'ryeoh', # 0x3f
'rye', # 0x40
'ryeg', # 0x41
'ryegg', # 0x42
'ryegs', # 0x43
'ryen', # 0x44
'ryenj', # 0x45
'ryenh', # 0x46
'ryed', # 0x47
'ryel', # 0x48
'ryelg', # 0x49
'ryelm', # 0x4a
'ryelb', # 0x4b
'ryels', # 0x4c
'ryelt', # 0x4d
'ryelp', # 0x4e
'ryelh', # 0x4f
'ryem', # 0x50
'ryeb', # 0x51
'ryebs', # 0x52
'ryes', # 0x53
'ryess', # 0x54
'ryeng', # 0x55
'ryej', # 0x56
'ryec', # 0x57
'ryek', # 0x58
'ryet', # 0x59
'ryep', # 0x5a
'ryeh', # 0x5b
'ro', # 0x5c
'rog', # 0x5d
'rogg', # 0x5e
'rogs', # 0x5f
'ron', # 0x60
'ronj', # 0x61
'ronh', # 0x62
'rod', # 0x63
'rol', # 0x64
'rolg', # 0x65
'rolm', # 0x66
'rolb', # 0x67
'rols', # 0x68
'rolt', # 0x69
'rolp', # 0x6a
'rolh', # 0x6b
'rom', # 0x6c
'rob', # 0x6d
'robs', # 0x6e
'ros', # 0x6f
'ross', # 0x70
'rong', # 0x71
'roj', # 0x72
'roc', # 0x73
'rok', # 0x74
'rot', # 0x75
'rop', # 0x76
'roh', # 0x77
'rwa', # 0x78
'rwag', # 0x79
'rwagg', # 0x7a
'rwags', # 0x7b
'rwan', # 0x7c
'rwanj', # 0x7d
'rwanh', # 0x7e
'rwad', # 0x7f
'rwal', # 0x80
'rwalg', # 0x81
'rwalm', # 0x82
'rwalb', # 0x83
'rwals', # 0x84
'rwalt', # 0x85
'rwalp', # 0x86
'rwalh', # 0x87
'rwam', # 0x88
'rwab', # 0x89
'rwabs', # 0x8a
'rwas', # 0x8b
'rwass', # 0x8c
'rwang', # 0x8d
'rwaj', # 0x8e
'rwac', # 0x8f
'rwak', # 0x90
'rwat', # 0x91
'rwap', # 0x92
'rwah', # 0x93
'rwae', # 0x94
'rwaeg', # 0x95
'rwaegg', # 0x96
'rwaegs', # 0x97
'rwaen', # 0x98
'rwaenj', # 0x99
'rwaenh', # 0x9a
'rwaed', # 0x9b
'rwael', # 0x9c
'rwaelg', # 0x9d
'rwaelm', # 0x9e
'rwaelb', # 0x9f
'rwaels', # 0xa0
'rwaelt', # 0xa1
'rwaelp', # 0xa2
'rwaelh', # 0xa3
'rwaem', # 0xa4
'rwaeb', # 0xa5
'rwaebs', # 0xa6
'rwaes', # 0xa7
'rwaess', # 0xa8
'rwaeng', # 0xa9
'rwaej', # 0xaa
'rwaec', # 0xab
'rwaek', # 0xac
'rwaet', # 0xad
'rwaep', # 0xae
'rwaeh', # 0xaf
'roe', # 0xb0
'roeg', # 0xb1
'roegg', # 0xb2
'roegs', # 0xb3
'roen', # 0xb4
'roenj', # 0xb5
'roenh', # 0xb6
'roed', # 0xb7
'roel', # 0xb8
'roelg', # 0xb9
'roelm', # 0xba
'roelb', # 0xbb
'roels', # 0xbc
'roelt', # 0xbd
'roelp', # 0xbe
'roelh', # 0xbf
'roem', # 0xc0
'roeb', # 0xc1
'roebs', # 0xc2
'roes', # 0xc3
'roess', # 0xc4
'roeng', # 0xc5
'roej', # 0xc6
'roec', # 0xc7
'roek', # 0xc8
'roet', # 0xc9
'roep', # 0xca
'roeh', # 0xcb
'ryo', # 0xcc
'ryog', # 0xcd
'ryogg', # 0xce
'ryogs', # 0xcf
'ryon', # 0xd0
'ryonj', # 0xd1
'ryonh', # 0xd2
'ryod', # 0xd3
'ryol', # 0xd4
'ryolg', # 0xd5
'ryolm', # 0xd6
'ryolb', # 0xd7
'ryols', # 0xd8
'ryolt', # 0xd9
'ryolp', # 0xda
'ryolh', # 0xdb
'ryom', # 0xdc
'ryob', # 0xdd
'ryobs', # 0xde
'ryos', # 0xdf
'ryoss', # 0xe0
'ryong', # 0xe1
'ryoj', # 0xe2
'ryoc', # 0xe3
'ryok', # 0xe4
'ryot', # 0xe5
'ryop', # 0xe6
'ryoh', # 0xe7
'ru', # 0xe8
'rug', # 0xe9
'rugg', # 0xea
'rugs', # 0xeb
'run', # 0xec
'runj', # 0xed
'runh', # 0xee
'rud', # 0xef
'rul', # 0xf0
'rulg', # 0xf1
'rulm', # 0xf2
'rulb', # 0xf3
'ruls', # 0xf4
'rult', # 0xf5
'rulp', # 0xf6
'rulh', # 0xf7
'rum', # 0xf8
'rub', # 0xf9
'rubs', # 0xfa
'rus', # 0xfb
'russ', # 0xfc
'rung', # 0xfd
'ruj', # 0xfe
'ruc', # 0xff
)
| gpl-3.0 |
FirstSanny/appfs | Tam/exercise8/ex8documentation/conf.py | 2 | 9797 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# APPFS-Ex8 documentation build configuration file, created by
# sphinx-quickstart on Sat Jul 8 01:49:58 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'APPFS-Ex8'
copyright = '2017, Tam'
author = 'Tam'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'APPFS-Ex8 v1.0.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'APPFS-Ex8doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'APPFS-Ex8.tex', 'APPFS-Ex8 Documentation',
'Tam', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'appfs-ex8', 'APPFS-Ex8 Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'APPFS-Ex8', 'APPFS-Ex8 Documentation',
author, 'APPFS-Ex8', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| mit |
tqchen/tvm | tests/python/unittest/test_target_codegen_cuda.py | 3 | 33217 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import numpy as np
from tvm import topi
import unittest
from tvm.contrib.nvcc import have_fp16, have_int8
from tvm.contrib import nvcc
import tvm.testing
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_vectorize_add():
num_thread = 8
def check_cuda(dtype, n, lanes):
if dtype == "float16" and not have_fp16(tvm.gpu(0).compute_version):
print("Skip because gpu does not have fp16 support")
return
if dtype == "int8" and not have_int8(tvm.gpu(0).compute_version):
print("skip because gpu does not support int8")
return
A = te.placeholder((n,), name="A", dtype="%sx%d" % (dtype, lanes))
B = te.compute((n,), lambda i: A[i] + tvm.tir.const(1, A.dtype), name="B")
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=num_thread)
s[B].bind(xo, bx)
s[B].bind(xi, tx)
fun = tvm.build(s, [A, B], "cuda")
ctx = tvm.gpu(0)
a = tvm.nd.empty((n,), A.dtype, ctx).copyfrom(np.random.uniform(size=(n, lanes)))
c = tvm.nd.empty((n,), B.dtype, ctx)
fun(a, c)
tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + 1)
check_cuda("float32", 64, 2)
check_cuda("float32", 64, 3)
check_cuda("float32", 64, 4)
check_cuda("int8", 64, 2)
check_cuda("int8", 64, 3)
check_cuda("int8", 64, 4)
check_cuda("uint8", 64, 2)
check_cuda("uint8", 64, 3)
check_cuda("uint8", 64, 4)
check_cuda("float16", 64, 2)
check_cuda("float16", 64, 4)
check_cuda("float16", 64, 6)
check_cuda("float16", 64, 8)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_multiply_add():
num_thread = 8
def check_cuda(dtype, n, lanes):
if dtype == "int8" and not have_int8(tvm.gpu(0).compute_version):
print("skip because gpu does not support int8")
return
A = te.placeholder((n,), name="A", dtype="%sx%d" % (dtype, lanes))
B = te.placeholder((n,), name="B", dtype="%sx%d" % (dtype, lanes))
C = te.placeholder((n,), name="C", dtype="int32")
D = te.compute(
(n,), lambda i: tvm.tir.call_pure_extern("int32", "__dp4a", A[i], B[i], C[i]), name="D"
)
s = te.create_schedule(D.op)
xo, xi = s[D].split(D.op.axis[0], factor=num_thread)
s[D].bind(xo, bx)
s[D].bind(xi, tx)
fun = tvm.build(s, [A, B, C, D], "cuda")
np_a = np.random.randint(low=-128, high=127, size=(n, lanes))
np_b = np.random.randint(low=-128, high=127, size=(n, lanes))
np_c = np.random.randint(low=0, high=127, size=(n,))
np_d = [sum(x * y) + z for x, y, z in zip(np_a, np_b, np_c)]
ctx = tvm.gpu(0)
a = tvm.nd.empty((n,), A.dtype, ctx).copyfrom(np_a)
b = tvm.nd.empty((n,), B.dtype, ctx).copyfrom(np_b)
c = tvm.nd.empty((n,), C.dtype, ctx).copyfrom(np_c)
d = tvm.nd.empty((n,), D.dtype, ctx)
fun(a, b, c, d)
tvm.testing.assert_allclose(d.asnumpy(), np_d)
check_cuda("int8", 64, 4)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_vectorize_load():
num_thread = 8
def check_cuda(dtype, n, lanes):
ctx = tvm.gpu(0)
A = te.placeholder((n,), name="A", dtype="%sx%d" % (dtype, lanes))
B = te.compute((n,), lambda i: A[i], name="B")
s = te.create_schedule(B.op)
block, thread = s[B].split(B.op.axis[0], factor=num_thread)
s[B].bind(block, bx)
s[B].bind(thread, tx)
fun = tvm.build(s, [A, B], "cuda", name="vector_load")
np_a = np.random.randint(low=-128, high=127, size=(n, lanes))
a = tvm.nd.empty((n,), A.dtype, ctx).copyfrom(np_a)
b = tvm.nd.empty((n,), B.dtype, ctx)
fun(a, b)
tvm.testing.assert_allclose(a.asnumpy(), b.asnumpy())
check_cuda("int8", 64, 2)
check_cuda("int8", 64, 3)
check_cuda("int8", 64, 4)
check_cuda("int8", 64, 8)
check_cuda("int8", 64, 16)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_make_int8():
def check_cuda(n, value, lanes):
dtype = "int8"
ctx = tvm.gpu(0)
A = te.compute((n, lanes), lambda i, j: tvm.tir.const(value, dtype=dtype))
s = te.create_schedule(A.op)
y, x = s[A].op.axis
s[A].vectorize(x)
s[A].bind(y, bx)
fun = tvm.build(s, [A], "cuda", name="make_int8x4")
np_a = np.full((n, lanes), value, dtype=dtype)
a = tvm.nd.empty(np_a.shape, dtype, ctx)
fun(a)
np.testing.assert_equal(a.asnumpy(), np_a)
check_cuda(64, 0xAB, 4)
check_cuda(64, 0, 4)
check_cuda(64, -3, 4)
check_cuda(64, 0xAB, 3)
check_cuda(64, 0, 3)
check_cuda(64, -3, 3)
check_cuda(64, 0xAB, 2)
check_cuda(64, 0, 2)
check_cuda(64, -3, 2)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_inf_nan():
target = "cuda"
def check_inf_nan(ctx, n, value, dtype):
A = te.placeholder((n,), name="A", dtype=dtype)
inf_value = tvm.tir.const(value, dtype=dtype)
C = te.compute((n,), lambda i: inf_value, name="C")
s = te.create_schedule(C.op)
s[C].bind(s[C].op.axis[0], tx)
fun = tvm.build(s, [A, C], target)
a = tvm.nd.empty((n,), A.dtype, ctx)
c = tvm.nd.empty((n,), A.dtype, ctx)
# Only need to test compiling here
fun(a, c)
ctx = tvm.context(target, 0)
check_inf_nan(ctx, 1, -float("inf"), "float32")
check_inf_nan(ctx, 1, -float("inf"), "float64")
check_inf_nan(ctx, 1, float("inf"), "float32")
check_inf_nan(ctx, 1, float("inf"), "float64")
check_inf_nan(ctx, 1, float("nan"), "float32")
check_inf_nan(ctx, 1, float("nan"), "float64")
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_shuffle():
idxm = tvm.tir.indexmod
a = te.placeholder((64,), "int32")
b = te.placeholder((64,), "int32")
c = te.compute((64,), lambda x: a[x] + b[x - idxm(x, 4) + (3 - idxm(x, 4))])
sch = te.create_schedule(c.op)
x = c.op.axis[0]
xo, xi = sch[c].split(x, 4)
thrx = te.thread_axis("threadIdx.x")
sch[c].bind(xo, thrx)
sch[c].vectorize(xi)
def MyVectorize():
def vectorizer(op):
if op.for_type == tvm.tir.For.Vectorized:
four = tvm.tir.const(4, "int32")
idx = tvm.tir.Ramp(thrx.var * four, tvm.tir.const(1, "int32"), 4)
all_ones = tvm.tir.const(1, "int32x4")
store = op.body
value = store.value
new_a = tvm.tir.Load("int32x4", value.a.buffer_var, idx, all_ones)
bs, ids = [], []
for i in range(4):
bs.append(
tvm.tir.Load(
"int32", value.b.buffer_var, thrx.var * four + tvm.tir.const(i, "int32")
)
)
ids.append(tvm.tir.const(3 - i, "int32"))
new_b = tvm.tir.Shuffle(bs, ids)
return tvm.tir.Store(store.buffer_var, new_a + new_b, idx, all_ones)
return None
def _transform(f, *_):
return f.with_body(
tvm.tir.stmt_functor.ir_transform(f.body, None, vectorizer, ["tir.For"])
)
return tvm.tir.transform.prim_func_pass(_transform, opt_level=0, name="MyVectorize")
with tvm.transform.PassContext(config={"tir.add_lower_pass": [(1, MyVectorize())]}):
module = tvm.build(sch, [a, b, c], target="cuda")
a_ = np.array(list(range(64)), dtype="int32")
b_ = np.array((list(range(4))[::-1]) * 16, dtype="int32")
c_ = np.zeros((64,), dtype="int32")
ref = a_ + np.array((list(range(4))) * 16, dtype="int32")
nda, ndb, ndc = [tvm.nd.array(i, tvm.gpu(0)) for i in [a_, b_, c_]]
module(nda, ndb, ndc)
tvm.testing.assert_allclose(ndc.asnumpy(), ref)
@tvm.testing.parametrize_targets("cuda", "rocm")
def test_crossthread_reduction1(target, ctx):
n = te.var("n")
m = te.var("m")
A = te.placeholder((n, m), name="A")
k = te.reduce_axis((0, m), "m")
B = te.compute((n,), lambda i: te.sum(A[i, k], axis=k), name="B")
def sched(nthd):
s = te.create_schedule(B.op)
ko, _ = s[B].split(B.op.reduce_axis[0], nparts=nthd)
s[B].bind(ko, te.thread_axis("threadIdx.x"))
s[B].bind(B.op.axis[0], te.thread_axis("blockIdx.x"))
func = tvm.build(s, [A, B], target)
return func
def verify(nthd):
func = sched(nthd)
nn = 3
# checks three typical cases
vals = [nthd - 1, nthd, nthd + 1]
for kk in [x for x in vals]:
size = (nn, kk)
a = tvm.nd.array(np.random.uniform(size=size).astype(A.dtype), ctx)
b = tvm.nd.array(np.zeros(nn, dtype=B.dtype), ctx)
func(a, b)
tvm.testing.assert_allclose(b.asnumpy(), np.sum(a.asnumpy(), axis=1), rtol=1e-3)
verify(16)
verify(32)
verify(64)
@tvm.testing.parametrize_targets("cuda", "rocm")
def test_crossthread_reduction2(target, ctx):
n = te.var("n")
k0 = te.var("k0")
k1 = te.var("k1")
A = te.placeholder((n, k0, k1), name="A")
k0 = te.reduce_axis((0, k0), "k0")
k1 = te.reduce_axis((0, k1), "k1")
B = te.compute((n,), lambda i: te.sum(A[i, k0, k1], axis=(k0, k1)), name="B")
def sched(nthdx, nthdy):
s = te.create_schedule(B.op)
k0o, _ = s[B].split(B.op.reduce_axis[0], nparts=nthdx)
k1o, _ = s[B].split(B.op.reduce_axis[1], nparts=nthdy)
s[B].bind(k0o, te.thread_axis("threadIdx.x"))
s[B].bind(k1o, te.thread_axis("threadIdx.y"))
s[B].bind(B.op.axis[0], te.thread_axis("blockIdx.x"))
func = tvm.build(s, [A, B], target)
return func
def verify(nthdx, nthdy):
func = sched(nthdx, nthdy)
nn = 3
# checks three typical cases
vx = [nthdx - 1, nthdx, nthdx + 1]
vy = [nthdy - 1, nthdy, nthdy + 1]
for kk0, kk1 in [(x, y) for x in vx for y in vy]:
size = (nn, kk0, kk1)
a = tvm.nd.array(np.random.uniform(size=size).astype(A.dtype), ctx)
b = tvm.nd.array(np.zeros(nn, dtype=B.dtype), ctx)
func(a, b)
tvm.testing.assert_allclose(b.asnumpy(), np.sum(a.asnumpy(), axis=(1, 2)), rtol=1e-3)
verify(16, 16)
verify(32, 32)
verify(16, 32)
verify(32, 16)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_reduction_binding():
k = te.reduce_axis((0, 32), "k")
A = te.placeholder((96, 32), name="A")
B = te.compute((96,), lambda m: te.sum(A[m, k], axis=k), name="B")
s = te.create_schedule(B.op)
s[B].reorder(B.op.reduce_axis[0], B.op.axis[0])
mo, _ = s[B].split(B.op.axis[0], 32)
s[B].bind(mo, te.thread_axis("blockIdx.x"))
fcuda = tvm.build(s, [A, B], "cuda")
@tvm.testing.parametrize_targets("cuda", "rocm")
def test_rfactor_predicates(target, ctx):
n = te.reduce_axis((0, 129), "n")
A = te.placeholder((129,), name="A")
B = te.compute((1,), lambda b: te.sum(A[n], axis=n), name="B")
s = te.create_schedule(B.op)
_, ni = s[B].split(s[B].op.reduce_axis[0], factor=8)
BF = s.rfactor(B, ni, 0)
s[B].set_store_predicate(tx.var.equal(0))
s[B].bind(s[B].op.reduce_axis[0], tx)
s[B].bind(s[B].op.axis[0], bx)
s[BF].compute_at(s[B], s[B].op.axis[0])
_, noi = s[BF].split(s[BF].op.reduce_axis[0], factor=2)
BF2 = s.rfactor(BF, noi, 0)
s[BF].bind(s[BF].op.axis[0], tx)
s[BF2].compute_at(s[BF], s[BF].op.axis[1])
fcuda = tvm.build(s, [A, B], target)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_const_float_to_half():
# This import is required to use nvcc to perform code gen;
# otherwise it is found that the code gen is done by nvrtc.
from tvm import autotvm
shape = (2, 3, 4)
a = te.placeholder(shape, dtype="float16", name="a")
b = tvm.tir.const(0.5, dtype="float16")
c = te.compute(shape, lambda i, j, k: a[i, j, k] > b, name="c")
s = te.create_schedule(c.op)
axes = [axis for axis in c.op.axis]
fused = s[c].fuse(*axes)
bx, tx = s[c].split(fused, factor=64)
s[c].bind(bx, te.thread_axis("blockIdx.x"))
s[c].bind(tx, te.thread_axis("threadIdx.x"))
func = tvm.build(s, [a, c], "cuda")
ctx = tvm.gpu(0)
a_np = np.random.uniform(size=shape).astype(a.dtype)
c_np = np.zeros(shape=shape, dtype=c.dtype)
a = tvm.nd.array(a_np, ctx)
c = tvm.nd.array(c_np, ctx)
func(a, c)
np.testing.assert_equal(c.asnumpy(), a_np > b.value)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_reduction():
def check(device, dtype, m=32, n=32):
if not tvm.testing.device_enabled(device):
print("Skipping", device)
return
ctx = tvm.context(device, 0)
a = te.placeholder((m, n), name="a", dtype=dtype)
b = te.placeholder((m, n), name="b", dtype=dtype)
c = a + b
d = a * b
e = topi.elemwise_sum([c, d])
g = topi.sum(e)
with tvm.target.Target(device):
sg = topi.cuda.schedule_reduce(g)
func = tvm.build(sg, [a, b, g], device)
a_np = np.random.uniform(size=(m, n)).astype(a.dtype)
b_np = np.random.uniform(size=(m, n)).astype(b.dtype)
g_np = np.sum(np.add(a_np * b_np, a_np + b_np))
a_nd = tvm.nd.array(a_np, ctx)
b_nd = tvm.nd.array(b_np, ctx)
g_nd = tvm.nd.array(np.zeros(g_np.shape, dtype=g_np.dtype), ctx)
func(a_nd, b_nd, g_nd)
tvm.testing.assert_allclose(g_nd.asnumpy(), g_np, rtol=1e-3)
check("cuda", "float32")
check("rocm", "float32")
check("cuda", "float16")
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_mix_threaded_and_normal_reduction():
def check(device, dtype, m=32, n=32):
if not tvm.testing.device_enabled(device):
print("Skipping", device)
return
ctx = tvm.context(device, 0)
if dtype == "float16" and not have_fp16(ctx.compute_version):
print("Skip because gpu does not have fp16 support")
return
a = tvm.te.placeholder((m, n), name="a", dtype=dtype)
b = topi.sum(a)
with tvm.target.Target(device):
sb = tvm.te.create_schedule(b.op)
i, _ = b.op.reduce_axis
sb[b].bind(i, tvm.te.thread_axis("threadIdx.x"))
func = tvm.build(sb, [a, b], device)
a_np = np.random.uniform(size=(m, n)).astype(a.dtype)
b_np = np.sum(a_np)
a_nd = tvm.nd.array(a_np, ctx)
b_nd = tvm.nd.array(np.zeros(b_np.shape, dtype=b_np.dtype), ctx)
func(a_nd, b_nd)
tvm.testing.assert_allclose(b_nd.asnumpy(), b_np, rtol=1e-3)
check("cuda", "float32")
check("rocm", "float32")
check("cuda", "float16")
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_floordiv_with_vectorization():
with tvm.target.cuda():
# B[i] = A[floordiv(i, k)]
n = 256
k = 37
A = te.placeholder((n,), name="A")
B = te.compute((n,), lambda i: A[tvm.tir.floordiv(i, k)], name="B")
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], nparts=1)
xio, xii = s[B].split(xi, factor=4)
s[B].vectorize(xii)
s[B].bind(xo, bx)
s[B].bind(xio, tx)
func = tvm.build(s, [A, B], "cuda")
ctx = tvm.gpu(0)
a_np = np.random.uniform(size=(n,)).astype(A.dtype)
b_np = np.array([a_np[i // k] for i in range(0, n)])
a_nd = tvm.nd.array(a_np, ctx)
b_nd = tvm.nd.array(np.zeros(b_np.shape, dtype=b_np.dtype), ctx)
func(a_nd, b_nd)
tvm.testing.assert_allclose(b_nd.asnumpy(), b_np, rtol=1e-3)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_floormod_with_vectorization():
with tvm.target.cuda():
# B[i] = A[floormod(i, k)]
n = 256
k = 37
A = te.placeholder((n,), name="A")
B = te.compute((n,), lambda i: A[tvm.tir.floormod(i, k)], name="B")
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], nparts=1)
xio, xii = s[B].split(xi, factor=4)
s[B].vectorize(xii)
s[B].bind(xo, bx)
s[B].bind(xio, tx)
func = tvm.build(s, [A, B], "cuda")
ctx = tvm.gpu(0)
a_np = np.random.uniform(size=(n,)).astype(A.dtype)
b_np = np.array([a_np[i % k] for i in range(0, n)])
a_nd = tvm.nd.array(a_np, ctx)
b_nd = tvm.nd.array(np.zeros(b_np.shape, dtype=b_np.dtype), ctx)
func(a_nd, b_nd)
tvm.testing.assert_allclose(b_nd.asnumpy(), b_np, rtol=1e-3)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_vectorized_casts():
def check(t0, t1):
if (t0 == "float16" or t1 == "float16") and not have_fp16(tvm.gpu(0).compute_version):
print("Skip because gpu does not have fp16 support")
return
# compute
n = 128
A = te.placeholder((n,), dtype=t0, name="A")
B = te.placeholder((n,), dtype=t1, name="B")
C = te.compute((n,), lambda i: A[i] + topi.cast(B[i], A.dtype), name="C")
# schedule
s = tvm.te.create_schedule(C.op)
ob, ib = s[C].split(s[C].op.axis[0], nparts=32)
_, iib = s[C].split(ib, factor=4)
s[C].vectorize(iib)
s[C].bind(ob, tx)
func = tvm.build(s, [A, B, C], "cuda")
# correctness
ctx = tvm.gpu(0)
low, high = (0, 20) if t0.startswith("u") or t1.startswith("u") else (-10, 10)
a_np = np.random.randint(low, high, size=n).astype(A.dtype)
b_np = np.random.randint(low, high, size=n).astype(B.dtype)
c_np = (a_np + b_np).astype(A.dtype)
a_nd = tvm.nd.array(a_np, ctx)
b_nd = tvm.nd.array(b_np, ctx)
c_nd = tvm.nd.array(np.zeros(c_np.shape, dtype=c_np.dtype), ctx)
func(a_nd, b_nd, c_nd)
tvm.testing.assert_allclose(c_nd.asnumpy(), c_np, rtol=1e-3)
def skip(t0, t1):
if t0 == t1:
return True
# CUDA does support cast between {u}int8 and fp16.
skip_set = {"float16", "uint8", "int8"}
if t0 in skip_set and t1 in skip_set:
return True
return False
types = ["float16", "float32", "int8", "uint8", "int16", "uint16", "int32", "uint32"]
for t0, t1 in [(x, y) for x in types for y in types if not skip(x, y)]:
check(t0, t1)
def sched(B):
s = te.create_schedule(B.op)
io, ii = s[B].split(s[B].op.axis[0], nparts=1)
iio, iii = s[B].split(ii, nparts=32)
_, iiii = s[B].split(iii, factor=4)
s[B].vectorize(iiii)
s[B].bind(io, bx)
s[B].bind(iio, tx)
return s
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_vectorized_intrin1():
test_funcs = [
(tvm.tir.floor, lambda x: np.floor(x)),
(tvm.tir.ceil, lambda x: np.ceil(x)),
(tvm.tir.trunc, lambda x: np.trunc(x)),
(tvm.tir.abs, lambda x: np.fabs(x)),
(tvm.tir.round, lambda x: np.round(x)),
(tvm.tir.exp, lambda x: np.exp(x)),
(tvm.tir.exp2, lambda x: np.exp2(x)),
(tvm.tir.exp10, lambda x: np.power(10, x)),
(tvm.tir.log, lambda x: np.log(x)),
(tvm.tir.log2, lambda x: np.log2(x)),
(tvm.tir.log10, lambda x: np.log10(x)),
(tvm.tir.tan, lambda x: np.tan(x)),
(tvm.tir.cos, lambda x: np.cos(x)),
(tvm.tir.cosh, lambda x: np.cosh(x)),
(tvm.tir.sin, lambda x: np.sin(x)),
(tvm.tir.sinh, lambda x: np.sinh(x)),
(tvm.tir.atan, lambda x: np.arctan(x)),
(tvm.tir.tanh, lambda x: np.tanh(x)),
(tvm.tir.sqrt, lambda x: np.sqrt(x)),
]
def run_test(tvm_intrin, np_func, dtype):
if dtype == "float16" and not have_fp16(tvm.gpu(0).compute_version):
print("Skip because gpu does not have fp16 support")
return
# set of intrinsics does not support fp16 yet.
skip_set = {
tvm.tir.abs,
tvm.tir.round,
tvm.tir.tan,
tvm.tir.atan,
tvm.tir.tanh,
tvm.tir.cosh,
tvm.tir.sinh,
}
if dtype == "float16" and tvm_intrin in skip_set:
print("Skip because '{0}' does not support fp16 yet".format(tvm_intrin.__name__))
return
n = 128
A = te.placeholder((n,), dtype=dtype, name="A")
B = te.compute((n,), lambda *i: tvm_intrin(A(*i)), name="B")
s = sched(B)
f = tvm.build(s, [A, B], "cuda")
ctx = tvm.gpu(0)
a = tvm.nd.array(np.random.uniform(0, 1, size=n).astype(A.dtype), ctx)
b = tvm.nd.array(np.zeros(shape=(n,)).astype(A.dtype), ctx)
f(a, b)
tvm.testing.assert_allclose(b.asnumpy(), np_func(a.asnumpy()), atol=1e-3, rtol=1e-3)
for func in test_funcs:
run_test(*func, "float32")
run_test(*func, "float16")
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_vectorized_intrin2(dtype="float32"):
c2 = tvm.tir.const(2, dtype=dtype)
test_funcs = [
(tvm.tir.power, lambda x: np.power(x, 2.0)),
(tvm.tir.fmod, lambda x: np.fmod(x, 2.0)),
]
def run_test(tvm_intrin, np_func):
n = 128
A = te.placeholder((n,), dtype=dtype, name="A")
B = te.compute((n,), lambda i: tvm_intrin(A[i], c2), name="B")
s = sched(B)
f = tvm.build(s, [A, B], "cuda")
ctx = tvm.gpu(0)
a = tvm.nd.array(np.random.uniform(0, 1, size=n).astype(A.dtype), ctx)
b = tvm.nd.array(np.zeros(shape=(n,)).astype(A.dtype), ctx)
f(a, b)
tvm.testing.assert_allclose(b.asnumpy(), np_func(a.asnumpy()), atol=1e-3, rtol=1e-3)
for func in test_funcs:
run_test(*func)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_vectorized_popcount():
def ref_popcount(x):
cnt = 0
while x:
x -= x & -x
cnt += 1
return cnt
def run_test(dtype):
n = 128
A = te.placeholder((n,), dtype=dtype, name="A")
B = te.compute((n,), lambda i: tvm.tir.popcount(A[i]), name="B")
s = sched(B)
f = tvm.build(s, [A, B], "cuda")
ctx = tvm.gpu(0)
a = tvm.nd.array(np.random.randint(0, 100000, size=n).astype(A.dtype), ctx)
b = tvm.nd.array(np.zeros(shape=(n,)).astype(B.dtype), ctx)
f(a, b)
ref = np.vectorize(ref_popcount)(a.asnumpy())
tvm.testing.assert_allclose(b.asnumpy(), ref)
run_test("uint32")
run_test("uint64")
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_vectorize_load_permute_pad():
def check_cuda(dtype, n, l, padding, lanes):
if dtype == "float16" and not have_fp16(tvm.gpu(0).compute_version):
print("Skip because gpu does not have fp16 support")
return
ctx = tvm.gpu(0)
A = tvm.te.placeholder((n, l), name="A", dtype=dtype)
B = tvm.te.compute(
(n // lanes, l + 2 * padding, lanes),
lambda i, j, k: tvm.te.if_then_else(
tvm.te.any(j < padding, j >= l + padding),
tvm.runtime.convert(0).astype(dtype),
A[i * lanes + k, j - padding],
),
name="B",
)
s = te.create_schedule(B.op)
block, thread, vectorize = s[B].op.axis
s[B].bind(block, bx)
s[B].bind(thread, tx)
s[B].vectorize(vectorize)
fun = tvm.build(s, [A, B], "cuda", name="vector_load_permute_pad")
np_a = np.random.randint(low=-128, high=127, size=(n, l)).astype(A.dtype)
a = tvm.nd.empty((n, l), A.dtype, ctx).copyfrom(np_a)
b = tvm.nd.empty((n // lanes, l + padding * 2, lanes), B.dtype, ctx)
fun(a, b)
np_a_reshape = np_a.reshape(n // lanes, lanes, l).transpose(0, 2, 1)
ref = np.pad(
np_a_reshape, ((0, 0), (padding, padding), (0, 0)), mode="constant", constant_values=0
)
tvm.testing.assert_allclose(b.asnumpy(), ref)
check_cuda("int8", 64, 16, 3, 2)
check_cuda("uint8", 64, 16, 3, 2)
check_cuda("int8", 64, 16, 3, 4)
check_cuda("uint8", 64, 16, 3, 4)
check_cuda("int32", 64, 16, 3, 4)
check_cuda("float16", 64, 16, 3, 4)
check_cuda("float32", 64, 16, 3, 4)
def vcf_check_common(s, args):
N = 512
# To check if every vectorize loop transforms to ramp expr successfully
stmt = tvm.lower(s, args)
# Use this as a stack flag to show whether this stmt is inside a BroadcastNode
inside_broadcast = [False]
# Possible patterns:
# Reduce init: Store[Ramp] = Broadcast(0)
# Shared memory copy: Store[Ramp] = Load[Ramp]
# Compute: Store[Ramp] = Load[Ramp] ... Broadcast[Load]
def pre_visit(stmt):
if isinstance(stmt, tvm.tir.Broadcast):
inside_broadcast[0] = True
# Check Broadcast[Imm numbers] or Broadcast[Load] patterns
assert isinstance(stmt.value, (tvm.tir.IntImm, tvm.tir.FloatImm, tvm.tir.Load))
if isinstance(stmt, tvm.tir.Store):
# Check Store[Ramp] pattern
assert isinstance(stmt.index, tvm.tir.Ramp)
if isinstance(stmt, tvm.tir.Load):
# Check Broadcast[Load] or Load[Ramp] patterns
assert inside_broadcast[0] or isinstance(stmt.index, tvm.tir.Ramp)
# Skip the rest
return stmt
return None
def post_visit(stmt):
if isinstance(stmt, tvm.tir.Broadcast):
inside_broadcast[0] = False
return None
tvm.tir.stmt_functor.ir_transform(stmt["main"].body, pre_visit, post_visit)
tgt = tvm.target.cuda()
mod = tvm.build(s, args, tgt)
# To check if every vectorize loop transforms to correct instruction
# print(mod.imported_modules[0].get_source())
ctx = tvm.context("cuda", 0)
a = tvm.nd.array(np.random.uniform(size=(512, 512)).astype("float32"), ctx)
b = tvm.nd.array(np.random.uniform(size=(512, 512)).astype("float32"), ctx)
c = tvm.nd.array(np.zeros((512, 512), dtype="float32"), ctx)
mod(a, b, c)
tvm.testing.assert_allclose(c.asnumpy(), np.dot(a.asnumpy(), b.asnumpy()), rtol=1e-5)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_vectorized_cooperative_fetching_x():
N = 512
A = te.placeholder((N, N), name="A", dtype="float32")
B = te.placeholder((N, N), name="B", dtype="float32")
k = te.reduce_axis((0, N), name="k")
C = te.compute((N, N), lambda i, j: te.sum(A[i, k] * B[k, j], axis=k))
s = te.create_schedule(C.op)
i, j = s[C].op.axis
k = s[C].op.reduce_axis[0]
AA = s.cache_read(A, "shared", [C])
BB = s.cache_read(B, "shared", [C])
i3, i4 = s[C].split(i, factor=4)
i2, i3 = s[C].split(i3, factor=2)
i1, i2 = s[C].split(i2, factor=8)
i0, i1 = s[C].split(i1, factor=1)
j3, j4 = s[C].split(j, factor=4)
j2, j3 = s[C].split(j3, factor=2)
j1, j2 = s[C].split(j2, factor=8)
j0, j1 = s[C].split(j1, factor=2)
k1, k2 = s[C].split(k, factor=8)
k0, k1 = s[C].split(k1, factor=8)
s[C].reorder(i0, j0, i1, j1, i2, j2, k0, k1, i3, j3, k2, i4, j4)
block_it = s[C].fuse(i0, j0)
s[C].bind(block_it, tvm.te.thread_axis("blockIdx.x"))
vthread_it = s[C].fuse(i1, j1)
s[C].bind(vthread_it, tvm.te.thread_axis("vthread"))
thread_it = s[C].fuse(i2, j2)
s[C].bind(thread_it, tvm.te.thread_axis("threadIdx.x"))
s[C].vectorize(j4)
s[AA].compute_at(s[C], k0)
iaa, jaa = s[AA].op.axis
s[BB].compute_at(s[C], k0)
ibb, jbb = s[BB].op.axis
aa_fused = s[AA].fuse(iaa, jaa)
bb_fused = s[BB].fuse(ibb, jbb)
aa1, aa2 = s[AA].split(aa_fused, factor=4)
aa0, aa1 = s[AA].split(aa1, factor=64)
bb1, bb2 = s[BB].split(bb_fused, factor=4)
bb0, bb1 = s[BB].split(bb1, factor=64)
s[AA].bind(aa1, tvm.te.thread_axis("threadIdx.x"))
s[AA].vectorize(aa2)
s[BB].bind(bb1, tvm.te.thread_axis("threadIdx.x"))
s[BB].vectorize(bb2)
vcf_check_common(s, [A, B, C])
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_vectorized_cooperative_fetching_xy():
N = 512
A = te.placeholder((N, N), name="A")
B = te.placeholder((N, N), name="B")
k = te.reduce_axis((0, N), name="k")
C = te.compute((N, N), lambda i, j: te.sum(A[i, k] * B[k, j], axis=k))
s = te.create_schedule(C.op)
i, j = s[C].op.axis
k = s[C].op.reduce_axis[0]
AA = s.cache_read(A, "shared", [C])
BB = s.cache_read(B, "shared", [C])
i3, i4 = s[C].split(i, factor=4)
i2, i3 = s[C].split(i3, factor=2)
i1, i2 = s[C].split(i2, factor=8)
i0, i1 = s[C].split(i1, factor=1)
j3, j4 = s[C].split(j, factor=4)
j2, j3 = s[C].split(j3, factor=2)
j1, j2 = s[C].split(j2, factor=8)
j0, j1 = s[C].split(j1, factor=2)
k1, k2 = s[C].split(k, factor=8)
k0, k1 = s[C].split(k1, factor=8)
s[C].reorder(i0, j0, i1, j1, i2, j2, k0, k1, i3, j3, k2, i4, j4)
block_it = s[C].fuse(i0, j0)
s[C].bind(block_it, tvm.te.thread_axis("blockIdx.x"))
vthread_it = s[C].fuse(i1, j1)
s[C].bind(vthread_it, tvm.te.thread_axis("vthread"))
s[C].bind(i2, tvm.te.thread_axis("threadIdx.y"))
s[C].bind(j2, tvm.te.thread_axis("threadIdx.x"))
s[C].vectorize(j4)
s[AA].compute_at(s[C], k0)
iaa, jaa = s[AA].op.axis
s[BB].compute_at(s[C], k0)
ibb, jbb = s[BB].op.axis
aa_fused = s[AA].fuse(iaa, jaa)
bb_fused = s[BB].fuse(ibb, jbb)
aa2, aa3 = s[AA].split(aa_fused, factor=4)
aa1, aa2 = s[AA].split(aa2, factor=8)
aa0, aa1 = s[AA].split(aa1, factor=8)
bb2, bb3 = s[BB].split(bb_fused, factor=4)
bb1, bb2 = s[BB].split(bb2, factor=8)
bb0, bb1 = s[BB].split(bb1, factor=8)
s[AA].bind(aa1, tvm.te.thread_axis("threadIdx.y"))
s[AA].bind(aa2, tvm.te.thread_axis("threadIdx.x"))
s[AA].vectorize(aa3)
s[BB].bind(bb1, tvm.te.thread_axis("threadIdx.y"))
s[BB].bind(bb2, tvm.te.thread_axis("threadIdx.x"))
s[BB].vectorize(bb3)
vcf_check_common(s, [A, B, C])
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_unrolled_vectorization():
dtype = "float32"
target = "cuda"
# Compute declaration
N = 128
A = te.placeholder((N, N), name="A")
B = te.placeholder((N, N), name="B")
k = te.reduce_axis((0, N), name="k")
C = te.compute((N, N), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name="C")
# Schedule
s = te.create_schedule([C.op])
CC = s.cache_write(C, "local")
i, j = s[C].op.axis
bx, tx, ii, ji = s[C].tile(i, j, 1, 2)
s[C].bind(bx, te.thread_axis("blockIdx.x"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
s[C].vectorize(ji)
s[CC].compute_at(s[C], tx)
i, j = s[CC].op.axis
k = s[CC].op.reduce_axis[0]
ko, ki = s[CC].split(k, 2)
s[CC].unroll(ki)
s[CC].vectorize(j)
# Check correctness
ctx = tvm.context(target)
a_tvm = tvm.nd.array(np.ones((N, N)).astype(dtype), ctx=ctx)
b_tvm = tvm.nd.array(np.ones((N, N)).astype(dtype), ctx=ctx)
c_tvm = tvm.nd.empty((N, N), ctx=ctx)
func_tvm = tvm.build(s, [A, B, C], target=target)
func_tvm(a_tvm, b_tvm, c_tvm)
c_np = c_tvm.asnumpy()
tvm.testing.assert_allclose(c_np, N * np.ones((N, N)))
if __name__ == "__main__":
test_cuda_vectorize_add()
test_cuda_multiply_add()
test_cuda_vectorize_load()
test_cuda_make_int8()
test_cuda_inf_nan()
test_cuda_shuffle()
test_vectorized_casts()
test_cuda_reduction_binding()
test_crossthread_reduction1()
test_crossthread_reduction2()
test_rfactor_predicates()
test_cuda_const_float_to_half()
test_cuda_reduction()
test_cuda_mix_threaded_and_normal_reduction()
test_cuda_floordiv_with_vectorization()
test_cuda_floormod_with_vectorization()
test_vectorized_intrin1()
test_vectorized_intrin2()
test_vectorized_popcount()
test_cuda_vectorize_load_permute_pad()
test_vectorized_cooperative_fetching_x()
test_vectorized_cooperative_fetching_xy()
test_unrolled_vectorization()
| apache-2.0 |
jml/flocker | flocker/provision/_effect.py | 15 | 2630 | from functools import partial
from six import reraise
from characteristic import attributes
from effect import (
sync_performer, Effect,
ComposedDispatcher, TypeDispatcher, base_dispatcher)
# This is from https://github.com/radix/effect/pull/46
@attributes(['results', 'exc_info'], apply_immutable=True)
class SequenceFailed(Exception, object):
"""
Raised if an effect in a :class:``Sequence`` fails.
:ivar list results: The list of successful results.
:ivar error: The error result of the last run effect.
"""
def __str__(self):
# Exception has a custom __str__ that looks at arguments pass to it's
# init. Since we don't pass any, it is useless. The following will
# duplicate the class name in the traceback, but is better than
# otherwise.
return repr(self)
@attributes(["effects"], apply_with_init=False, apply_immutable=True)
class Sequence(object):
"""
Runs a sequence of effects serially.
:returns list: The list of results of the effects.
:raises SequenceFailed: If one of the effects fails.
"""
def __init__(self, effects):
"""
:param effects: The list of effects to execute in sequence.
"""
self.effects = effects
def sequence(effects):
"""
Given multiple Effects, return one Effect that represents the sequence of
all of their effects. The result of the aggregate Effect will be a list of
their results, in the same order as the input to this function. If any
child effect fails, the first such failure will be propagated as a
:class:`SequenceFailed` exception.
:param effects: Effects which should be performed in sequence.
:return: An Effect that results in a list of results, or which fails with
a :class:`SequenceFailed`.
"""
return Effect(Sequence(list(effects)))
@sync_performer
def perform_sequence(dispatcher, intent):
"""Performer for :class:`Sequence`."""
effects = list(intent.effects)
if not effects:
return []
results = []
def succeed(next_effect, result):
results.append(result)
return next_effect
def fail(result):
reraise(SequenceFailed,
SequenceFailed(results=results, exc_info=result),
result[2])
def reducer(next_effect, effect):
return effect.on(success=partial(succeed, next_effect),
error=fail)
return reduce(reducer, reversed(effects), results)
dispatcher = ComposedDispatcher([
TypeDispatcher({
Sequence: perform_sequence,
}),
base_dispatcher,
])
| apache-2.0 |
tuxfux-hlp-notes/python-batches | archieves/batch-60/files/myenv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/mbcharsetprober.py | 2924 | 3268 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = [0, 0]
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = [0, 0]
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mDistributionAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
| gpl-3.0 |
MarcosCommunity/odoo | addons/account/wizard/account_statement_from_invoice.py | 224 | 4128 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_statement_from_invoice_lines(osv.osv_memory):
"""
Generate Entries by Statement from Invoices
"""
_name = "account.statement.from.invoice.lines"
_description = "Entries by Statement from Invoices"
_columns = {
'line_ids': fields.many2many('account.move.line', 'account_move_line_relation', 'move_id', 'line_id', 'Invoices'),
}
def populate_statement(self, cr, uid, ids, context=None):
context = dict(context or {})
statement_id = context.get('statement_id', False)
if not statement_id:
return {'type': 'ir.actions.act_window_close'}
data = self.read(cr, uid, ids, context=context)[0]
line_ids = data['line_ids']
if not line_ids:
return {'type': 'ir.actions.act_window_close'}
line_obj = self.pool.get('account.move.line')
statement_obj = self.pool.get('account.bank.statement')
statement_line_obj = self.pool.get('account.bank.statement.line')
currency_obj = self.pool.get('res.currency')
statement = statement_obj.browse(cr, uid, statement_id, context=context)
line_date = statement.date
# for each selected move lines
for line in line_obj.browse(cr, uid, line_ids, context=context):
ctx = context.copy()
# take the date for computation of currency => use payment date
ctx['date'] = line_date
amount = 0.0
if line.debit > 0:
amount = line.debit
elif line.credit > 0:
amount = -line.credit
if line.amount_currency:
if line.company_id.currency_id.id != statement.currency.id:
# In the specific case where the company currency and the statement currency are the same
# the debit/credit field already contains the amount in the right currency.
# We therefore avoid to re-convert the amount in the currency, to prevent Gain/loss exchanges
amount = currency_obj.compute(cr, uid, line.currency_id.id,
statement.currency.id, line.amount_currency, context=ctx)
elif (line.invoice and line.invoice.currency_id.id != statement.currency.id):
amount = currency_obj.compute(cr, uid, line.invoice.currency_id.id,
statement.currency.id, amount, context=ctx)
context.update({'move_line_ids': [line.id],
'invoice_id': line.invoice.id})
statement_line_obj.create(cr, uid, {
'name': line.name or '?',
'amount': amount,
'partner_id': line.partner_id.id,
'statement_id': statement_id,
'ref': line.ref,
'date': statement.date,
'amount_currency': line.amount_currency,
'currency_id': line.currency_id.id,
}, context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
coded-by-hand/mass | env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/vcs/__init__.py | 8 | 8109 | """Handles all VCS (version control) support"""
import os
import shutil
from pip.backwardcompat import urlparse, urllib
from pip.log import logger
from pip.util import display_path, backup_dir, find_command, ask, rmtree
__all__ = ['vcs', 'get_src_requirement']
class VcsSupport(object):
_registry = {}
schemes = ['ssh', 'git', 'hg', 'bzr', 'sftp', 'svn']
def __init__(self):
# Register more schemes with urlparse for various version control systems
urlparse.uses_netloc.extend(self.schemes)
urlparse.uses_fragment.extend(self.schemes)
super(VcsSupport, self).__init__()
def __iter__(self):
return self._registry.__iter__()
@property
def backends(self):
return list(self._registry.values())
@property
def dirnames(self):
return [backend.dirname for backend in self.backends]
@property
def all_schemes(self):
schemes = []
for backend in self.backends:
schemes.extend(backend.schemes)
return schemes
def register(self, cls):
if not hasattr(cls, 'name'):
logger.warn('Cannot register VCS %s' % cls.__name__)
return
if cls.name not in self._registry:
self._registry[cls.name] = cls
def unregister(self, cls=None, name=None):
if name in self._registry:
del self._registry[name]
elif cls in self._registry.values():
del self._registry[cls.name]
else:
logger.warn('Cannot unregister because no class or name given')
def get_backend_name(self, location):
"""
Return the name of the version control backend if found at given
location, e.g. vcs.get_backend_name('/path/to/vcs/checkout')
"""
for vc_type in self._registry.values():
path = os.path.join(location, vc_type.dirname)
if os.path.exists(path):
return vc_type.name
return None
def get_backend(self, name):
name = name.lower()
if name in self._registry:
return self._registry[name]
def get_backend_from_location(self, location):
vc_type = self.get_backend_name(location)
if vc_type:
return self.get_backend(vc_type)
return None
vcs = VcsSupport()
class VersionControl(object):
name = ''
dirname = ''
def __init__(self, url=None, *args, **kwargs):
self.url = url
self._cmd = None
super(VersionControl, self).__init__(*args, **kwargs)
def _filter(self, line):
return (logger.INFO, line)
def _is_local_repository(self, repo):
"""
posix absolute paths start with os.path.sep,
win32 ones ones start with drive (like c:\\folder)
"""
drive, tail = os.path.splitdrive(repo)
return repo.startswith(os.path.sep) or drive
@property
def cmd(self):
if self._cmd is not None:
return self._cmd
command = find_command(self.name)
logger.info('Found command %r at %r' % (self.name, command))
self._cmd = command
return command
def get_url_rev(self):
"""
Returns the correct repository URL and revision by parsing the given
repository URL
"""
url = self.url.split('+', 1)[1]
scheme, netloc, path, query, frag = urlparse.urlsplit(url)
rev = None
if '@' in path:
path, rev = path.rsplit('@', 1)
url = urlparse.urlunsplit((scheme, netloc, path, query, ''))
return url, rev
def get_info(self, location):
"""
Returns (url, revision), where both are strings
"""
assert not location.rstrip('/').endswith(self.dirname), 'Bad directory: %s' % location
return self.get_url(location), self.get_revision(location)
def normalize_url(self, url):
"""
Normalize a URL for comparison by unquoting it and removing any trailing slash.
"""
return urllib.unquote(url).rstrip('/')
def compare_urls(self, url1, url2):
"""
Compare two repo URLs for identity, ignoring incidental differences.
"""
return (self.normalize_url(url1) == self.normalize_url(url2))
def parse_vcs_bundle_file(self, content):
"""
Takes the contents of the bundled text file that explains how to revert
the stripped off version control data of the given package and returns
the URL and revision of it.
"""
raise NotImplementedError
def obtain(self, dest):
"""
Called when installing or updating an editable package, takes the
source path of the checkout.
"""
raise NotImplementedError
def switch(self, dest, url, rev_options):
"""
Switch the repo at ``dest`` to point to ``URL``.
"""
raise NotImplemented
def update(self, dest, rev_options):
"""
Update an already-existing repo to the given ``rev_options``.
"""
raise NotImplementedError
def check_destination(self, dest, url, rev_options, rev_display):
"""
Prepare a location to receive a checkout/clone.
Return True if the location is ready for (and requires) a
checkout/clone, False otherwise.
"""
checkout = True
prompt = False
if os.path.exists(dest):
checkout = False
if os.path.exists(os.path.join(dest, self.dirname)):
existing_url = self.get_url(dest)
if self.compare_urls(existing_url, url):
logger.info('%s in %s exists, and has correct URL (%s)'
% (self.repo_name.title(), display_path(dest), url))
logger.notify('Updating %s %s%s'
% (display_path(dest), self.repo_name, rev_display))
self.update(dest, rev_options)
else:
logger.warn('%s %s in %s exists with URL %s'
% (self.name, self.repo_name, display_path(dest), existing_url))
prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ', ('s', 'i', 'w', 'b'))
else:
logger.warn('Directory %s already exists, and is not a %s %s.'
% (dest, self.name, self.repo_name))
prompt = ('(i)gnore, (w)ipe, (b)ackup ', ('i', 'w', 'b'))
if prompt:
logger.warn('The plan is to install the %s repository %s'
% (self.name, url))
response = ask('What to do? %s' % prompt[0], prompt[1])
if response == 's':
logger.notify('Switching %s %s to %s%s'
% (self.repo_name, display_path(dest), url, rev_display))
self.switch(dest, url, rev_options)
elif response == 'i':
# do nothing
pass
elif response == 'w':
logger.warn('Deleting %s' % display_path(dest))
rmtree(dest)
checkout = True
elif response == 'b':
dest_dir = backup_dir(dest)
logger.warn('Backing up %s to %s'
% (display_path(dest), dest_dir))
shutil.move(dest, dest_dir)
checkout = True
return checkout
def unpack(self, location):
if os.path.exists(location):
rmtree(location)
self.obtain(location)
def get_src_requirement(self, dist, location, find_tags=False):
raise NotImplementedError
def get_src_requirement(dist, location, find_tags):
version_control = vcs.get_backend_from_location(location)
if version_control:
return version_control().get_src_requirement(dist, location, find_tags)
logger.warn('cannot determine version of editable source in %s (is not SVN checkout, Git clone, Mercurial clone or Bazaar branch)' % location)
return dist.as_requirement()
| bsd-2-clause |
gunchleoc/django | django/db/migrations/writer.py | 24 | 23898 | from __future__ import unicode_literals
import collections
import datetime
import decimal
import functools
import math
import os
import re
import types
from importlib import import_module
from django import get_version
from django.apps import apps
from django.db import migrations, models
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.operations.base import Operation
from django.db.migrations.utils import COMPILED_REGEX_TYPE, RegexObject
from django.utils import datetime_safe, six
from django.utils._os import upath
from django.utils.encoding import force_text
from django.utils.functional import LazyObject, Promise
from django.utils.inspect import get_func_args
from django.utils.module_loading import module_dir
from django.utils.timezone import now, utc
from django.utils.version import get_docs_version
try:
import enum
except ImportError:
# No support on Python 2 if enum34 isn't installed.
enum = None
class SettingsReference(str):
"""
Special subclass of string which actually references a current settings
value. It's treated as the value in memory, but serializes out to a
settings.NAME attribute reference.
"""
def __new__(self, value, setting_name):
return str.__new__(self, value)
def __init__(self, value, setting_name):
self.setting_name = setting_name
class OperationWriter(object):
def __init__(self, operation, indentation=2):
self.operation = operation
self.buff = []
self.indentation = indentation
def serialize(self):
def _write(_arg_name, _arg_value):
if (_arg_name in self.operation.serialization_expand_args and
isinstance(_arg_value, (list, tuple, dict))):
if isinstance(_arg_value, dict):
self.feed('%s={' % _arg_name)
self.indent()
for key, value in _arg_value.items():
key_string, key_imports = MigrationWriter.serialize(key)
arg_string, arg_imports = MigrationWriter.serialize(value)
args = arg_string.splitlines()
if len(args) > 1:
self.feed('%s: %s' % (key_string, args[0]))
for arg in args[1:-1]:
self.feed(arg)
self.feed('%s,' % args[-1])
else:
self.feed('%s: %s,' % (key_string, arg_string))
imports.update(key_imports)
imports.update(arg_imports)
self.unindent()
self.feed('},')
else:
self.feed('%s=[' % _arg_name)
self.indent()
for item in _arg_value:
arg_string, arg_imports = MigrationWriter.serialize(item)
args = arg_string.splitlines()
if len(args) > 1:
for arg in args[:-1]:
self.feed(arg)
self.feed('%s,' % args[-1])
else:
self.feed('%s,' % arg_string)
imports.update(arg_imports)
self.unindent()
self.feed('],')
else:
arg_string, arg_imports = MigrationWriter.serialize(_arg_value)
args = arg_string.splitlines()
if len(args) > 1:
self.feed('%s=%s' % (_arg_name, args[0]))
for arg in args[1:-1]:
self.feed(arg)
self.feed('%s,' % args[-1])
else:
self.feed('%s=%s,' % (_arg_name, arg_string))
imports.update(arg_imports)
imports = set()
name, args, kwargs = self.operation.deconstruct()
operation_args = get_func_args(self.operation.__init__)
# See if this operation is in django.db.migrations. If it is,
# We can just use the fact we already have that imported,
# otherwise, we need to add an import for the operation class.
if getattr(migrations, name, None) == self.operation.__class__:
self.feed('migrations.%s(' % name)
else:
imports.add('import %s' % (self.operation.__class__.__module__))
self.feed('%s.%s(' % (self.operation.__class__.__module__, name))
self.indent()
for i, arg in enumerate(args):
arg_value = arg
arg_name = operation_args[i]
_write(arg_name, arg_value)
i = len(args)
# Only iterate over remaining arguments
for arg_name in operation_args[i:]:
if arg_name in kwargs: # Don't sort to maintain signature order
arg_value = kwargs[arg_name]
_write(arg_name, arg_value)
self.unindent()
self.feed('),')
return self.render(), imports
def indent(self):
self.indentation += 1
def unindent(self):
self.indentation -= 1
def feed(self, line):
self.buff.append(' ' * (self.indentation * 4) + line)
def render(self):
return '\n'.join(self.buff)
class MigrationWriter(object):
"""
Takes a Migration instance and is able to produce the contents
of the migration file from it.
"""
def __init__(self, migration):
self.migration = migration
self.needs_manual_porting = False
def as_string(self):
"""
Returns a string of the file contents.
"""
items = {
"replaces_str": "",
"initial_str": "",
}
imports = set()
# Deconstruct operations
operations = []
for operation in self.migration.operations:
operation_string, operation_imports = OperationWriter(operation).serialize()
imports.update(operation_imports)
operations.append(operation_string)
items["operations"] = "\n".join(operations) + "\n" if operations else ""
# Format dependencies and write out swappable dependencies right
dependencies = []
for dependency in self.migration.dependencies:
if dependency[0] == "__setting__":
dependencies.append(" migrations.swappable_dependency(settings.%s)," % dependency[1])
imports.add("from django.conf import settings")
else:
# No need to output bytestrings for dependencies
dependency = tuple(force_text(s) for s in dependency)
dependencies.append(" %s," % self.serialize(dependency)[0])
items["dependencies"] = "\n".join(dependencies) + "\n" if dependencies else ""
# Format imports nicely, swapping imports of functions from migration files
# for comments
migration_imports = set()
for line in list(imports):
if re.match("^import (.*)\.\d+[^\s]*$", line):
migration_imports.add(line.split("import")[1].strip())
imports.remove(line)
self.needs_manual_porting = True
# django.db.migrations is always used, but models import may not be.
# If models import exists, merge it with migrations import.
if "from django.db import models" in imports:
imports.discard("from django.db import models")
imports.add("from django.db import migrations, models")
else:
imports.add("from django.db import migrations")
# Sort imports by the package / module to be imported (the part after
# "from" in "from ... import ..." or after "import" in "import ...").
sorted_imports = sorted(imports, key=lambda i: i.split()[1])
items["imports"] = "\n".join(sorted_imports) + "\n" if imports else ""
if migration_imports:
items["imports"] += (
"\n\n# Functions from the following migrations need manual "
"copying.\n# Move them and any dependencies into this file, "
"then update the\n# RunPython operations to refer to the local "
"versions:\n# %s"
) % "\n# ".join(sorted(migration_imports))
# If there's a replaces, make a string for it
if self.migration.replaces:
items['replaces_str'] = "\n replaces = %s\n" % self.serialize(self.migration.replaces)[0]
# Hinting that goes into comment
items.update(
version=get_version(),
timestamp=now().strftime("%Y-%m-%d %H:%M"),
)
if self.migration.initial:
items['initial_str'] = "\n initial = True\n"
return (MIGRATION_TEMPLATE % items).encode("utf8")
@staticmethod
def serialize_datetime(value):
"""
Returns a serialized version of a datetime object that is valid,
executable python code. It converts timezone-aware values to utc with
an 'executable' utc representation of tzinfo.
"""
if value.tzinfo is not None and value.tzinfo != utc:
value = value.astimezone(utc)
value_repr = repr(value).replace("<UTC>", "utc")
if isinstance(value, datetime_safe.datetime):
value_repr = "datetime.%s" % value_repr
return value_repr
@property
def basedir(self):
migrations_package_name = MigrationLoader.migrations_module(self.migration.app_label)
if migrations_package_name is None:
raise ValueError(
"Django can't create migrations for app '%s' because "
"migrations have been disabled via the MIGRATION_MODULES "
"setting." % self.migration.app_label
)
# See if we can import the migrations module directly
try:
migrations_module = import_module(migrations_package_name)
except ImportError:
pass
else:
try:
return upath(module_dir(migrations_module))
except ValueError:
pass
# Alright, see if it's a direct submodule of the app
app_config = apps.get_app_config(self.migration.app_label)
maybe_app_name, _, migrations_package_basename = migrations_package_name.rpartition(".")
if app_config.name == maybe_app_name:
return os.path.join(app_config.path, migrations_package_basename)
# In case of using MIGRATION_MODULES setting and the custom package
# doesn't exist, create one, starting from an existing package
existing_dirs, missing_dirs = migrations_package_name.split("."), []
while existing_dirs:
missing_dirs.insert(0, existing_dirs.pop(-1))
try:
base_module = import_module(".".join(existing_dirs))
except ImportError:
continue
else:
try:
base_dir = upath(module_dir(base_module))
except ValueError:
continue
else:
break
else:
raise ValueError(
"Could not locate an appropriate location to create "
"migrations package %s. Make sure the toplevel "
"package exists and can be imported." %
migrations_package_name)
final_dir = os.path.join(base_dir, *missing_dirs)
if not os.path.isdir(final_dir):
os.makedirs(final_dir)
for missing_dir in missing_dirs:
base_dir = os.path.join(base_dir, missing_dir)
with open(os.path.join(base_dir, "__init__.py"), "w"):
pass
return final_dir
@property
def filename(self):
return "%s.py" % self.migration.name
@property
def path(self):
return os.path.join(self.basedir, self.filename)
@classmethod
def serialize_deconstructed(cls, path, args, kwargs):
name, imports = cls._serialize_path(path)
strings = []
for arg in args:
arg_string, arg_imports = cls.serialize(arg)
strings.append(arg_string)
imports.update(arg_imports)
for kw, arg in sorted(kwargs.items()):
arg_string, arg_imports = cls.serialize(arg)
imports.update(arg_imports)
strings.append("%s=%s" % (kw, arg_string))
return "%s(%s)" % (name, ", ".join(strings)), imports
@classmethod
def _serialize_path(cls, path):
module, name = path.rsplit(".", 1)
if module == "django.db.models":
imports = {"from django.db import models"}
name = "models.%s" % name
else:
imports = {"import %s" % module}
name = path
return name, imports
@classmethod
def serialize(cls, value):
"""
Serializes the value to a string that's parsable by Python, along
with any needed imports to make that string work.
More advanced than repr() as it can encode things
like datetime.datetime.now.
"""
# FIXME: Ideally Promise would be reconstructible, but for now we
# use force_text on them and defer to the normal string serialization
# process.
if isinstance(value, Promise):
value = force_text(value)
elif isinstance(value, LazyObject):
# The unwrapped value is returned as the first item of the
# arguments tuple.
value = value.__reduce__()[1][0]
# Sequences
if isinstance(value, (frozenset, list, set, tuple)):
imports = set()
strings = []
for item in value:
item_string, item_imports = cls.serialize(item)
imports.update(item_imports)
strings.append(item_string)
if isinstance(value, set):
# Don't use the literal "{%s}" as it doesn't support empty set
format = "set([%s])"
elif isinstance(value, frozenset):
format = "frozenset([%s])"
elif isinstance(value, tuple):
# When len(value)==0, the empty tuple should be serialized as
# "()", not "(,)" because (,) is invalid Python syntax.
format = "(%s)" if len(value) != 1 else "(%s,)"
else:
format = "[%s]"
return format % (", ".join(strings)), imports
# Dictionaries
elif isinstance(value, dict):
imports = set()
strings = []
for k, v in sorted(value.items()):
k_string, k_imports = cls.serialize(k)
v_string, v_imports = cls.serialize(v)
imports.update(k_imports)
imports.update(v_imports)
strings.append((k_string, v_string))
return "{%s}" % (", ".join("%s: %s" % (k, v) for k, v in strings)), imports
# Enums
elif enum and isinstance(value, enum.Enum):
enum_class = value.__class__
module = enum_class.__module__
imports = {"import %s" % module}
v_string, v_imports = cls.serialize(value.value)
imports.update(v_imports)
return "%s.%s(%s)" % (module, enum_class.__name__, v_string), imports
# Datetimes
elif isinstance(value, datetime.datetime):
value_repr = cls.serialize_datetime(value)
imports = ["import datetime"]
if value.tzinfo is not None:
imports.append("from django.utils.timezone import utc")
return value_repr, set(imports)
# Dates
elif isinstance(value, datetime.date):
value_repr = repr(value)
if isinstance(value, datetime_safe.date):
value_repr = "datetime.%s" % value_repr
return value_repr, {"import datetime"}
# Times
elif isinstance(value, datetime.time):
value_repr = repr(value)
if isinstance(value, datetime_safe.time):
value_repr = "datetime.%s" % value_repr
return value_repr, {"import datetime"}
# Timedeltas
elif isinstance(value, datetime.timedelta):
return repr(value), {"import datetime"}
# Settings references
elif isinstance(value, SettingsReference):
return "settings.%s" % value.setting_name, {"from django.conf import settings"}
# Simple types
elif isinstance(value, float):
if math.isnan(value) or math.isinf(value):
return 'float("{}")'.format(value), set()
return repr(value), set()
elif isinstance(value, six.integer_types + (bool, type(None))):
return repr(value), set()
elif isinstance(value, six.binary_type):
value_repr = repr(value)
if six.PY2:
# Prepend the `b` prefix since we're importing unicode_literals
value_repr = 'b' + value_repr
return value_repr, set()
elif isinstance(value, six.text_type):
value_repr = repr(value)
if six.PY2:
# Strip the `u` prefix since we're importing unicode_literals
value_repr = value_repr[1:]
return value_repr, set()
# Decimal
elif isinstance(value, decimal.Decimal):
return repr(value), {"from decimal import Decimal"}
# Django fields
elif isinstance(value, models.Field):
attr_name, path, args, kwargs = value.deconstruct()
return cls.serialize_deconstructed(path, args, kwargs)
# Classes
elif isinstance(value, type):
special_cases = [
(models.Model, "models.Model", []),
]
for case, string, imports in special_cases:
if case is value:
return string, set(imports)
if hasattr(value, "__module__"):
module = value.__module__
if module == six.moves.builtins.__name__:
return value.__name__, set()
else:
return "%s.%s" % (module, value.__name__), {"import %s" % module}
elif isinstance(value, models.manager.BaseManager):
as_manager, manager_path, qs_path, args, kwargs = value.deconstruct()
if as_manager:
name, imports = cls._serialize_path(qs_path)
return "%s.as_manager()" % name, imports
else:
return cls.serialize_deconstructed(manager_path, args, kwargs)
elif isinstance(value, Operation):
string, imports = OperationWriter(value, indentation=0).serialize()
# Nested operation, trailing comma is handled in upper OperationWriter._write()
return string.rstrip(','), imports
elif isinstance(value, functools.partial):
imports = {'import functools'}
# Serialize functools.partial() arguments
func_string, func_imports = cls.serialize(value.func)
args_string, args_imports = cls.serialize(value.args)
keywords_string, keywords_imports = cls.serialize(value.keywords)
# Add any imports needed by arguments
imports.update(func_imports)
imports.update(args_imports)
imports.update(keywords_imports)
return (
"functools.partial(%s, *%s, **%s)" % (
func_string, args_string, keywords_string,
),
imports,
)
# Anything that knows how to deconstruct itself.
elif hasattr(value, 'deconstruct'):
return cls.serialize_deconstructed(*value.deconstruct())
# Functions
elif isinstance(value, (types.FunctionType, types.BuiltinFunctionType)):
# @classmethod?
if getattr(value, "__self__", None) and isinstance(value.__self__, type):
klass = value.__self__
module = klass.__module__
return "%s.%s.%s" % (module, klass.__name__, value.__name__), {"import %s" % module}
# Further error checking
if value.__name__ == '<lambda>':
raise ValueError("Cannot serialize function: lambda")
if value.__module__ is None:
raise ValueError("Cannot serialize function %r: No module" % value)
# Python 3 is a lot easier, and only uses this branch if it's not local.
if getattr(value, "__qualname__", None) and getattr(value, "__module__", None):
if "<" not in value.__qualname__: # Qualname can include <locals>
return "%s.%s" % (value.__module__, value.__qualname__), {"import %s" % value.__module__}
# Python 2/fallback version
module_name = value.__module__
# Make sure it's actually there and not an unbound method
module = import_module(module_name)
if not hasattr(module, value.__name__):
raise ValueError(
"Could not find function %s in %s.\n"
"Please note that due to Python 2 limitations, you cannot "
"serialize unbound method functions (e.g. a method "
"declared and used in the same class body). Please move "
"the function into the main module body to use migrations.\n"
"For more information, see "
"https://docs.djangoproject.com/en/%s/topics/migrations/#serializing-values"
% (value.__name__, module_name, get_docs_version()))
# Needed on Python 2 only
if module_name == '__builtin__':
return value.__name__, set()
return "%s.%s" % (module_name, value.__name__), {"import %s" % module_name}
# Other iterables
elif isinstance(value, collections.Iterable):
imports = set()
strings = []
for item in value:
item_string, item_imports = cls.serialize(item)
imports.update(item_imports)
strings.append(item_string)
# When len(strings)==0, the empty iterable should be serialized as
# "()", not "(,)" because (,) is invalid Python syntax.
format = "(%s)" if len(strings) != 1 else "(%s,)"
return format % (", ".join(strings)), imports
# Compiled regex
elif isinstance(value, (COMPILED_REGEX_TYPE, RegexObject)):
imports = {"import re"}
regex_pattern, pattern_imports = cls.serialize(value.pattern)
regex_flags, flag_imports = cls.serialize(value.flags)
imports.update(pattern_imports)
imports.update(flag_imports)
args = [regex_pattern]
if value.flags:
args.append(regex_flags)
return "re.compile(%s)" % ', '.join(args), imports
# Uh oh.
else:
raise ValueError(
"Cannot serialize: %r\nThere are some values Django cannot serialize into "
"migration files.\nFor more, see https://docs.djangoproject.com/en/%s/"
"topics/migrations/#migration-serializing" % (value, get_docs_version())
)
MIGRATION_TEMPLATE = """\
# -*- coding: utf-8 -*-
# Generated by Django %(version)s on %(timestamp)s
from __future__ import unicode_literals
%(imports)s
class Migration(migrations.Migration):
%(replaces_str)s%(initial_str)s
dependencies = [
%(dependencies)s\
]
operations = [
%(operations)s\
]
"""
| bsd-3-clause |
s2hc-johan/nikola | nikola/data/themes/base/messages/messages_pl.py | 8 | 1627 | # -*- encoding:utf-8 -*-
from __future__ import unicode_literals
MESSAGES = {
"%d min remaining to read": "zostało %d minut czytania",
"(active)": "(aktywne)",
"Also available in:": "Również dostępny w językach:",
"Archive": "Archiwum",
"Authors": "Autorzy",
"Categories": "Kategorie",
"Comments": "Komentarze",
"LANGUAGE": "Polski",
"Languages:": "Języki:",
"More posts about %s": "Więcej postów o %s",
"Newer posts": "Nowsze posty",
"Next post": "Następny post",
"No posts found.": "Nie znaleziono żadnych postów.",
"Nothing found.": "Nic nie znaleziono.",
"Older posts": "Starsze posty",
"Original site": "Oryginalna strona",
"Posted:": "Opublikowano:",
"Posts about %s": "Posty o %s",
"Posts by %s": "Posty autora %s",
"Posts for year %s": "Posty z roku %s",
"Posts for {month} {day}, {year}": "Posty z {day} {month} {year}",
"Posts for {month} {year}": "Posty z {month} {year}",
"Previous post": "Poprzedni post",
"Publication date": "Data publikacji",
"RSS feed": "Kanał RSS",
"Read in English": "Czytaj po polsku",
"Read more": "Czytaj więcej",
"Skip to main content": "Przejdź do treści",
"Source": "Źródło",
"Subcategories:": "Podkategorie:",
"Tags and Categories": "Tagi i Kategorie",
"Tags": "Tagi",
"Uncategorized": "Nieskategoryzowane",
"Updates": "Aktualności",
"Write your page here.": "Tu wpisz treść strony.",
"Write your post here.": "Tu wpisz treść postu.",
"old posts, page %d": "stare posty, strona %d",
"page %d": "strona %d",
}
| mit |
endticket/uwsgi | uwsgidecorators.py | 3 | 10449 | from functools import partial
import sys
from threading import Thread
try:
import cPickle as pickle
except ImportError:
import pickle
import uwsgi
if uwsgi.masterpid() == 0:
raise Exception(
"you have to enable the uWSGI master process to use this module")
spooler_functions = {}
mule_functions = {}
postfork_chain = []
# Python3 compatibility
def _encode1(val):
if sys.version_info >= (3, 0) and isinstance(val, str):
return val.encode('utf-8')
else:
return val
def _decode1(val):
if sys.version_info >= (3, 0) and isinstance(val, bytes):
return val.decode('utf-8')
else:
return val
def _encode_to_spooler(vars):
return dict((_encode1(K), _encode1(V)) for (K, V) in vars.items())
def _decode_from_spooler(vars):
return dict((_decode1(K), _decode1(V)) for (K, V) in vars.items())
def get_free_signal():
for signum in range(0, 256):
if not uwsgi.signal_registered(signum):
return signum
raise Exception("No free uwsgi signal available")
def manage_spool_request(vars):
# To check whether 'args' is in vals or not - decode the keys first,
# because in python3 all keys in 'vals' are have 'byte' types
vars = dict((_decode1(K), V) for (K, V) in vars.items())
if 'args' in vars:
for k in ('args', 'kwargs'):
vars[k] = pickle.loads(vars.pop(k))
vars = _decode_from_spooler(vars)
f = spooler_functions[vars['ud_spool_func']]
if 'args' in vars:
ret = f(*vars['args'], **vars['kwargs'])
else:
ret = f(vars)
return int(vars.get('ud_spool_ret', ret))
def postfork_chain_hook():
for f in postfork_chain:
f()
uwsgi.spooler = manage_spool_request
uwsgi.post_fork_hook = postfork_chain_hook
class postfork(object):
def __init__(self, f):
if callable(f):
self.wid = 0
self.f = f
else:
self.f = None
self.wid = f
postfork_chain.append(self)
def __call__(self, *args, **kwargs):
if self.f:
if self.wid > 0 and self.wid != uwsgi.worker_id():
return
return self.f()
self.f = args[0]
class _spoolraw(object):
def __call__(self, *args, **kwargs):
arguments = self.base_dict.copy()
if not self.pass_arguments:
if len(args) > 0:
arguments.update(args[0])
if kwargs:
arguments.update(kwargs)
else:
spooler_args = {}
for key in ('message_dict', 'spooler', 'priority', 'at', 'body'):
if key in kwargs:
spooler_args.update({key: kwargs.pop(key)})
arguments.update(spooler_args)
arguments.update(
{'args': pickle.dumps(args), 'kwargs': pickle.dumps(kwargs)})
return uwsgi.spool(_encode_to_spooler(arguments))
# For backward compatibility (uWSGI < 1.9.13)
def spool(self, *args, **kwargs):
return self.__class__.__call__(self, *args, **kwargs)
def __init__(self, f, pass_arguments):
if 'spooler' not in uwsgi.opt:
raise Exception(
"you have to enable the uWSGI spooler to use @%s decorator" % self.__class__.__name__)
self.f = f
spooler_functions[self.f.__name__] = self.f
# For backward compatibility (uWSGI < 1.9.13)
self.f.spool = self.__call__
self.pass_arguments = pass_arguments
self.base_dict = {'ud_spool_func': self.f.__name__}
class _spool(_spoolraw):
def __call__(self, *args, **kwargs):
self.base_dict['ud_spool_ret'] = str(uwsgi.SPOOL_OK)
return _spoolraw.__call__(self, *args, **kwargs)
class _spoolforever(_spoolraw):
def __call__(self, *args, **kwargs):
self.base_dict['ud_spool_ret'] = str(uwsgi.SPOOL_RETRY)
return _spoolraw.__call__(self, *args, **kwargs)
def spool_decorate(f=None, pass_arguments=False, _class=_spoolraw):
if not f:
return partial(_class, pass_arguments=pass_arguments)
return _class(f, pass_arguments)
def spoolraw(f=None, pass_arguments=False):
return spool_decorate(f, pass_arguments)
def spool(f=None, pass_arguments=False):
return spool_decorate(f, pass_arguments, _spool)
def spoolforever(f=None, pass_arguments=False):
return spool_decorate(f, pass_arguments, _spoolforever)
class mulefunc(object):
def __init__(self, f):
if callable(f):
self.fname = f.__name__
self.mule = 0
mule_functions[f.__name__] = f
else:
self.mule = f
self.fname = None
def real_call(self, *args, **kwargs):
uwsgi.mule_msg(pickle.dumps(
{
'service': 'uwsgi_mulefunc',
'func': self.fname,
'args': args,
'kwargs': kwargs
}
), self.mule)
def __call__(self, *args, **kwargs):
if not self.fname:
self.fname = args[0].__name__
mule_functions[self.fname] = args[0]
return self.real_call
return self.real_call(*args, **kwargs)
def mule_msg_dispatcher(message):
msg = pickle.loads(message)
if msg['service'] == 'uwsgi_mulefunc':
return mule_functions[msg['func']](*msg['args'], **msg['kwargs'])
uwsgi.mule_msg_hook = mule_msg_dispatcher
class rpc(object):
def __init__(self, name):
self.name = name
def __call__(self, f):
uwsgi.register_rpc(self.name, f)
return f
class farm_loop(object):
def __init__(self, f, farm):
self.f = f
self.farm = farm
def __call__(self):
if uwsgi.mule_id() == 0:
return
if not uwsgi.in_farm(self.farm):
return
while True:
message = uwsgi.farm_get_msg()
if message:
self.f(message)
class farm(object):
def __init__(self, name=None, **kwargs):
self.name = name
def __call__(self, f):
postfork_chain.append(farm_loop(f, self.name))
class mule_brain(object):
def __init__(self, f, num):
self.f = f
self.num = num
def __call__(self):
if uwsgi.mule_id() == self.num:
try:
self.f()
except BaseException:
exc = sys.exc_info()
sys.excepthook(exc[0], exc[1], exc[2])
sys.exit(1)
class mule_brainloop(mule_brain):
def __call__(self):
if uwsgi.mule_id() == self.num:
while True:
try:
self.f()
except BaseException:
exc = sys.exc_info()
sys.excepthook(exc[0], exc[1], exc[2])
sys.exit(1)
class mule(object):
def __init__(self, num):
self.num = num
def __call__(self, f):
postfork_chain.append(mule_brain(f, self.num))
class muleloop(mule):
def __call__(self, f):
postfork_chain.append(mule_brainloop(f, self.num))
class mulemsg_loop(object):
def __init__(self, f, num):
self.f = f
self.num = num
def __call__(self):
if uwsgi.mule_id() == self.num:
while True:
message = uwsgi.mule_get_msg()
if message:
self.f(message)
class mulemsg(object):
def __init__(self, num):
self.num = num
def __call__(self, f):
postfork_chain.append(mulemsg_loop(f, self.num))
class signal(object):
def __init__(self, num, **kwargs):
self.num = num
self.target = kwargs.get('target', '')
def __call__(self, f):
uwsgi.register_signal(self.num, self.target, f)
return f
class timer(object):
def __init__(self, secs, **kwargs):
self.num = kwargs.get('signum', get_free_signal())
self.secs = secs
self.target = kwargs.get('target', '')
def __call__(self, f):
uwsgi.register_signal(self.num, self.target, f)
uwsgi.add_timer(self.num, self.secs)
return f
class cron(object):
def __init__(self, minute, hour, day, month, dayweek, **kwargs):
self.num = kwargs.get('signum', get_free_signal())
self.minute = minute
self.hour = hour
self.day = day
self.month = month
self.dayweek = dayweek
self.target = kwargs.get('target', '')
def __call__(self, f):
uwsgi.register_signal(self.num, self.target, f)
uwsgi.add_cron(self.num, self.minute, self.hour,
self.day, self.month, self.dayweek)
return f
class rbtimer(object):
def __init__(self, secs, **kwargs):
self.num = kwargs.get('signum', get_free_signal())
self.secs = secs
self.target = kwargs.get('target', '')
def __call__(self, f):
uwsgi.register_signal(self.num, self.target, f)
uwsgi.add_rb_timer(self.num, self.secs)
return f
class filemon(object):
def __init__(self, fsobj, **kwargs):
self.num = kwargs.get('signum', get_free_signal())
self.fsobj = fsobj
self.target = kwargs.get('target', '')
def __call__(self, f):
uwsgi.register_signal(self.num, self.target, f)
uwsgi.add_file_monitor(self.num, self.fsobj)
return f
class erlang(object):
def __init__(self, name):
self.name = name
def __call__(self, f):
uwsgi.erlang_register_process(self.name, f)
return f
class lock(object):
def __init__(self, f):
self.f = f
def __call__(self, *args, **kwargs):
# ensure the spooler will not call it
if uwsgi.i_am_the_spooler():
return
uwsgi.lock()
try:
return self.f(*args, **kwargs)
finally:
uwsgi.unlock()
class thread(object):
def __init__(self, f):
self.f = f
def __call__(self, *args):
t = Thread(target=self.f, args=args)
t.daemon = True
t.start()
return self.f
class harakiri(object):
def __init__(self, seconds):
self.s = seconds
def real_call(self, *args, **kwargs):
uwsgi.set_user_harakiri(self.s)
r = self.f(*args, **kwargs)
uwsgi.set_user_harakiri(0)
return r
def __call__(self, f):
self.f = f
return self.real_call
| gpl-2.0 |
xodus7/tensorflow | tensorflow/compiler/tf2xla/python/xla.py | 3 | 12692 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental library that exposes XLA operations directly in TensorFlow.
It is sometimes useful to be able to build HLO programs directly from
TensorFlow. This file provides Tensorflow operators that mirror the semantics of
HLO operators as closely as possible.
Note: There is no promise of backward or forward compatibility for operators
defined in this module. This is primarily because the underlying HLO operators
do not promise backward or forward compatibility.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.compiler.tf2xla.ops import gen_xla_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
# TODO(phawkins): provide wrappers for all XLA operators. Currently the missing
# ops include:
# infeed/outfeed (available via tf.contrib.tpu)
# collectives, e.g., cross-replica-sum (available via tf.contrib.tpu)
# conditional
# gather/scatter
# collapse
# This file reuses builtin names (following XLA's names, so we can call things
# like xla.max), so we capture the builtin versions here.
# pylint: disable=redefined-builtin
_max = max
_min = min
_slice = slice # pylint: disable=invalid-name
constant = constant_op.constant
# Unary operators.
# For most arithmetic operators there is a TensorFlow operator
# that exactly corresponds to each XLA operator. Rather than defining
# XLA-specific variants, we reuse the corresponding TensorFlow operator.
# TODO(phawkins): It would be even better to have TensorFlow operators that 1:1
# wrap every HLO operator, because that would allow us to be confident that the
# semantics match.
def _unary_op(fn):
"""Wrapper that restricts `fn` to have the correct signature."""
def unary_op_wrapper(x, name=None):
return fn(x, name=name)
return unary_op_wrapper
abs = _unary_op(math_ops.abs)
# TODO(phawkins): implement clz.
conj = _unary_op(math_ops.conj)
cos = _unary_op(math_ops.cos)
ceil = _unary_op(math_ops.ceil)
digamma = _unary_op(math_ops.digamma)
erf = _unary_op(math_ops.erf)
erfc = _unary_op(math_ops.erfc)
# TODO(phawkins): implement erfinv
exp = _unary_op(math_ops.exp)
expm1 = _unary_op(math_ops.expm1)
floor = _unary_op(math_ops.floor)
imag = _unary_op(math_ops.imag)
is_finite = _unary_op(math_ops.is_finite)
lgamma = _unary_op(math_ops.lgamma)
log = _unary_op(math_ops.log)
log1p = _unary_op(math_ops.log1p)
logical_not = _unary_op(math_ops.logical_not)
neg = _unary_op(math_ops.neg)
real = _unary_op(math_ops.real)
# TODO(phawkins): unlike xla::Round, this rounds to even instead of zero for
# numbers halfway between two integers.
round = _unary_op(math_ops.round)
sin = _unary_op(math_ops.sin)
sign = _unary_op(math_ops.sign)
tanh = _unary_op(math_ops.tanh)
# Binary operators
# The main difference between TensorFlow and XLA binary ops is the broadcasting
# semantics. TensorFlow uses Numpy-style broadcasting semantics, whereas XLA
# requires an explicit specification of which dimensions to broadcast if the
# arguments have different ranks.
def _broadcasting_binary_op(fn):
"""Wraps a binary Tensorflow operator and performs XLA-style broadcasting."""
def broadcasting_binary_op_wrapper(x, y, broadcast_dims=None, name=None):
"""Inner wrapper function."""
broadcast_dims = broadcast_dims or []
broadcast_dims = ops.convert_to_tensor(broadcast_dims, dtypes.int64)
# Rather than relying on having static shape information in the TensorFlow
# graph, we use an XlaBroadcastHelper op that can compute the correct shapes
# at JIT compilation time.
x, y = gen_xla_ops.xla_broadcast_helper(x, y, broadcast_dims)
return fn(x, y, name=name)
return broadcasting_binary_op_wrapper
# Map from TF signed types to TF unsigned types.
_SIGNED_TO_UNSIGNED_TABLE = {
dtypes.int8: dtypes.uint8,
dtypes.int16: dtypes.uint16,
dtypes.int32: dtypes.uint32,
dtypes.int64: dtypes.uint64,
}
# Map from TF unsigned types to TF signed types.
_UNSIGNED_TO_SIGNED_TABLE = {
dtypes.uint8: dtypes.int8,
dtypes.uint16: dtypes.int16,
dtypes.uint32: dtypes.int32,
dtypes.uint64: dtypes.int64,
}
def _shift_right_logical_helper(x, y, name=None):
"""Performs an integer right logical shift irrespective of input type."""
assert y.dtype == x.dtype
dtype = x.dtype
signed = dtype in _SIGNED_TO_UNSIGNED_TABLE
if signed:
unsigned_dtype = _SIGNED_TO_UNSIGNED_TABLE[dtype]
x = math_ops.cast(x, unsigned_dtype)
y = math_ops.cast(y, unsigned_dtype)
output = bitwise_ops.right_shift(x, y, name=name)
if signed:
output = math_ops.cast(output, dtype)
return output
def _shift_right_arithmetic_helper(x, y, name=None):
"""Performs an integer right arithmetic shift irrespective of input type."""
assert y.dtype == x.dtype
dtype = x.dtype
unsigned = dtype in _UNSIGNED_TO_SIGNED_TABLE
if unsigned:
signed_dtype = _UNSIGNED_TO_SIGNED_TABLE[dtype]
x = math_ops.cast(x, signed_dtype)
y = math_ops.cast(y, signed_dtype)
output = bitwise_ops.right_shift(x, y, name=name)
if unsigned:
output = math_ops.cast(output, dtype)
return output
add = _broadcasting_binary_op(math_ops.add)
sub = _broadcasting_binary_op(math_ops.sub)
mul = _broadcasting_binary_op(math_ops.mul)
div = _broadcasting_binary_op(math_ops.div)
rem = _broadcasting_binary_op(gen_math_ops.mod)
max = _broadcasting_binary_op(math_ops.maximum)
min = _broadcasting_binary_op(math_ops.minimum)
atan2 = _broadcasting_binary_op(math_ops.atan2)
complex = _broadcasting_binary_op(math_ops.complex)
logical_and = _broadcasting_binary_op(math_ops.logical_and)
logical_or = _broadcasting_binary_op(math_ops.logical_or)
logical_xor = _broadcasting_binary_op(math_ops.logical_xor)
eq = _broadcasting_binary_op(math_ops.equal)
ne = _broadcasting_binary_op(math_ops.not_equal)
ge = _broadcasting_binary_op(math_ops.greater_equal)
gt = _broadcasting_binary_op(math_ops.greater)
le = _broadcasting_binary_op(math_ops.less_equal)
lt = _broadcasting_binary_op(math_ops.less)
pow = _broadcasting_binary_op(math_ops.pow)
shift_left = _broadcasting_binary_op(bitwise_ops.left_shift)
shift_right_logical = _broadcasting_binary_op(_shift_right_logical_helper)
shift_right_arithmetic = _broadcasting_binary_op(_shift_right_arithmetic_helper)
def _binary_op(fn):
"""Wrapper that restricts `fn` to have the correct signature."""
def binary_op_wrapper(x, y, name=None):
return fn(x, y, name=name)
return binary_op_wrapper
transpose = _binary_op(array_ops.transpose)
rev = _binary_op(array_ops.reverse)
bitcast_convert_type = array_ops.bitcast
def broadcast(x, dims, name=None):
x = ops.convert_to_tensor(x)
shape = array_ops.concat(
[constant_op.constant(dims),
array_ops.shape(x)], axis=0)
return array_ops.broadcast_to(x, shape, name=name)
def clamp(a, x, b, name=None):
return min(max(a, x, name=name), b, name=name)
concatenate = array_ops.concat
def conv(lhs,
rhs,
window_strides,
padding,
lhs_dilation,
rhs_dilation,
dimension_numbers,
feature_group_count=1,
precision_config=None,
name=None):
"""Wraps the XLA ConvGeneralDilated operator.
ConvGeneralDilated is the most general form of XLA convolution and is
documented at
https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution
Args:
lhs: the input tensor
rhs: the kernel tensor
window_strides: the inter-window strides
padding: the padding to apply at the start and end of each input dimensions
lhs_dilation: dilation to apply between input elements
rhs_dilation: dilation to apply between kernel elements
dimension_numbers: a `ConvolutionDimensionNumbers` proto.
feature_group_count: number of feature groups for grouped convolution.
precision_config: a `PrecisionConfigProto` proto.
name: an optional name for the operator
Returns:
A tensor representing the output of the convolution.
"""
precision_config_proto = ""
if precision_config:
precision_config_proto = precision_config.SerializeToString()
return gen_xla_ops.xla_conv(
lhs,
rhs,
window_strides=window_strides,
padding=padding,
lhs_dilation=lhs_dilation,
rhs_dilation=rhs_dilation,
feature_group_count=feature_group_count,
dimension_numbers=dimension_numbers.SerializeToString(),
precision_config=precision_config_proto,
name=name)
convert_element_type = math_ops.cast
def dot(lhs, rhs, name=None):
return math_ops.tensordot(lhs, rhs, axes=1, name=name)
def dot_general(lhs, rhs, dimension_numbers, precision_config=None, name=None):
precision_config_proto = ""
if precision_config:
precision_config_proto = precision_config.SerializeToString()
return gen_xla_ops.xla_dot(
lhs,
rhs,
dimension_numbers=dimension_numbers.SerializeToString(),
precision_config=precision_config_proto,
name=name)
dynamic_slice = gen_xla_ops.xla_dynamic_slice
dynamic_update_slice = gen_xla_ops.xla_dynamic_update_slice
# TODO(phawkins): generalize tf.pad to support interior padding, and then remove
# the XLA-specific pad operator.
pad = gen_xla_ops.xla_pad
def random_normal(mu, sigma, dims, name=None):
mu = ops.convert_to_tensor(mu)
return random_ops.random_normal(
dims, mean=mu, stddev=sigma, dtype=mu.dtype, name=name)
def random_uniform(minval, maxval, dims, name=None):
minval = ops.convert_to_tensor(minval)
return random_ops.random_uniform(
dims, minval, maxval, dtype=minval.dtype, name=name)
recv = gen_xla_ops.xla_recv
reduce = gen_xla_ops.xla_reduce
def reduce_window(operand,
init,
reducer,
window_dimensions,
window_strides=None,
padding=None,
name=None):
"""Wraps the XLA ReduceWindow operator.
ReduceWindow is documented at
https://www.tensorflow.org/performance/xla/operation_semantics#reducewindow .
Args:
operand: the input tensor
init: a scalar tensor representing the initial value for the reduction
reducer: a reduction function that combines a pair of scalars.
window_dimensions: shape of the window, as a list of integers
window_strides: inter-window strides, as a list of integers. Optional;
if omitted, defaults to strides of 1.
padding: padding to apply to 'operand'. List of (low, high) pairs of
integers that specify the padding to apply before and after each
dimension. Optional; if omitted, defaults to no padding.
name: the operator name, or None.
Returns:
A tensor that represents the output of the reduce_window operator.
"""
window_strides = window_strides or [1] * len(window_dimensions)
padding = padding or [(0, 0)] * len(window_dimensions)
return gen_xla_ops.xla_reduce_window(
input=operand,
init_value=init,
window_dimensions=window_dimensions,
window_strides=window_strides,
padding=padding,
computation=reducer,
name=name)
def reshape(x, new_sizes, dimensions=None, name=None):
if dimensions is not None:
x = array_ops.transpose(x, dimensions)
x = array_ops.reshape(x, new_sizes, name=name)
return x
def select(condition, x, y, name=None):
return array_ops.where(condition, x, y, name)
select_and_scatter = gen_xla_ops.xla_select_and_scatter
send = gen_xla_ops.xla_send
def slice(x, start_dims, limit_dims, strides):
spec = [
_slice(start, limit, stride)
for (start, limit, stride) in zip(start_dims, limit_dims, strides)
]
return x[tuple(spec)]
sort = gen_xla_ops.xla_sort
while_loop = gen_xla_ops.xla_while
| apache-2.0 |
kelvinwong-ca/django-likert-field | setup.py | 1 | 4874 | #!/usr/bin/env python
from distutils.core import setup, Command
import os
import re
import sys
from likert_field import __version__
cmdclasses = dict()
README_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'README.rst')
long_description = open(README_PATH, 'r').read()
def setup_django():
"""Initialize apps for Django 1.7 and later"""
import django
try:
django.setup()
except AttributeError:
pass
class DemoTester(Command):
"""Runs demonstration project tests"""
user_options = []
test_settings = {
'1.4': 'test_projects.django14.django14.settings',
'1.5': 'test_projects.django14.django14.settings',
'1.6': 'test_projects.django14.django14.settings',
'1.7': 'test_projects.django14.django14.settings',
'1.8': 'test_projects.django18.django18.settings',
'1.9': 'test_projects.django18.django18.settings',
}
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
sys.dont_write_bytecode = True
from django import get_version
django_release = re.search(r'^\d\.\d', get_version()).group(0)
test_settings_exist = django_release in self.test_settings.keys()
try:
dj_ver = [int(n) for n in re.split(r'[.ab]', get_version())]
except ValueError:
# Pre-release Djangos must be testable!!!
dj_too_old = False
else:
dj_too_old = dj_ver < [1, 4, 2]
if test_settings_exist is False or dj_too_old:
print("Please install Django 1.4.19 - 1.9 to run the test suite")
exit(-1)
os.environ['DJANGO_SETTINGS_MODULE'] = self.test_settings[
django_release]
try:
from django.core.management import call_command
except ImportError:
print("Please install Django 1.4.2 - 1.9 to run the test suite")
exit(-1)
setup_django()
call_command('test', 'likert_test_app', interactive=False, verbosity=1)
try:
import south
except ImportError:
pass
else:
call_command('test', 'suthern', interactive=False, verbosity=1)
cmdclasses['test_demo'] = DemoTester
class Tester(Command):
"""Runs project unit tests"""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
sys.dont_write_bytecode = True
os.environ['DJANGO_SETTINGS_MODULE'] = 'test_suite.settings_for_tests'
setup_django()
try:
from django.utils.unittest import TextTestRunner, defaultTestLoader
except ImportError:
from unittest import TextTestRunner, defaultTestLoader
from test_suite import (
test_forms, test_models, test_templatetags, test_widgets)
suite = defaultTestLoader.loadTestsFromModule(test_forms)
suite.addTests(defaultTestLoader.loadTestsFromModule(test_models))
suite.addTests(
defaultTestLoader.loadTestsFromModule(test_templatetags))
suite.addTests(defaultTestLoader.loadTestsFromModule(test_widgets))
runner = TextTestRunner()
result = runner.run(suite)
if result.wasSuccessful() is not True:
raise SystemExit(int(bool(result.errors or result.failures)))
cmdclasses['test'] = Tester
setup(
name='django-likert-field',
description='A Likert field for Django models',
long_description=long_description,
version=__version__,
license='BSD',
keywords=[
'Likert', 'ratings', 'star-rating', 'star-classification', 'Django',
'model-field', 'Django-Likert-Field'],
author='Kelvin Wong',
author_email='code@kelvinwong.ca',
url='https://github.com/kelvinwong-ca/django-likert-field',
classifiers=['Development Status :: 3 - Alpha',
# 'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet :: WWW/HTTP'],
packages=['likert_field', 'likert_field.templatetags'],
cmdclass=cmdclasses
)
| bsd-3-clause |
Springerle/cookiecutter | cookiecutter/find.py | 31 | 1092 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
cookiecutter.find
-----------------
Functions for finding Cookiecutter templates and other components.
"""
import logging
import os
from .exceptions import NonTemplatedInputDirException
def find_template(repo_dir):
"""
Determines which child directory of `repo_dir` is the project template.
:param repo_dir: Local directory of newly cloned repo.
:returns project_template: Relative path to project template.
"""
logging.debug('Searching {0} for the project template.'.format(repo_dir))
repo_dir_contents = os.listdir(repo_dir)
project_template = None
for item in repo_dir_contents:
if 'cookiecutter' in item and '{{' in item and '}}' in item:
project_template = item
break
if project_template:
project_template = os.path.join(repo_dir, project_template)
logging.debug(
'The project template appears to be {0}'.format(project_template)
)
return project_template
else:
raise NonTemplatedInputDirException
| bsd-3-clause |
zahodi/ansible | lib/ansible/modules/windows/win_regmerge.py | 25 | 3884 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = r'''
---
module: win_regmerge
version_added: "2.1"
short_description: Merges the contents of a registry file into the windows registry
description:
- Wraps the reg.exe command to import the contents of a registry file.
- Suitable for use with registry files created using M(win_template).
- Windows registry files have a specific format and must be constructed correctly with carriage return and line feed line endings otherwise they will not be merged.
- Exported registry files often start with a Byte Order Mark which must be removed if the file is to templated using M(win_template).
- Registry file format is described at U(https://support.microsoft.com/en-us/kb/310516)
- See also M(win_template), M(win_regedit)
options:
path:
description:
- The full path including file name to the registry file on the remote machine to be merged
required: true
default: no default
compare_key:
description:
- The parent key to use when comparing the contents of the registry to the contents of the file. Needs to be in HKLM or HKCU part of registry. Use a PS-Drive style path for example HKLM:\SOFTWARE not HKEY_LOCAL_MACHINE\SOFTWARE
If not supplied, or the registry key is not found, no comparison will be made, and the module will report changed.
required: false
default: no default
author: "Jon Hawkesworth (@jhawkesworth)"
notes:
- Organise your registry files so that they contain a single root registry
key if you want to use the compare_to functionality.
This module does not force registry settings to be in the state
described in the file. If registry settings have been modified externally
the module will merge the contents of the file but continue to report
differences on subsequent runs.
To force registry change, use M(win_regedit) with state=absent before
using M(win_regmerge).
'''
EXAMPLES = r'''
# Merge in a registry file without comparing to current registry
# Note that paths using / to separate are preferred as they require less special handling than \
win_regmerge:
path: C:/autodeploy/myCompany-settings.reg
# Compare and merge registry file
win_regmerge:
path: C:/autodeploy/myCompany-settings.reg
compare_to: HKLM:\SOFTWARE\myCompany
'''
RETURN = r'''
compare_to_key_found:
description: whether the parent registry key has been found for comparison
returned: when comparison key not found in registry
type: boolean
sample: false
difference_count:
description: number of differences between the registry and the file
returned: changed
type: integer
sample: 1
compared:
description: whether a comparison has taken place between the registry and the file
returned: when a comparison key has been supplied and comparison has been attempted
type: boolean
sample: true
'''
| gpl-3.0 |
alexforencich/hdg2000 | fpga/lib/axis/rtl/axis_arb_mux_64.py | 1 | 5741 | #!/usr/bin/env python
"""axis_arb_mux_64
Generates an arbitrated AXI Stream mux with the specified number of ports
Usage: axis_arb_mux_64 [OPTION]...
-?, --help display this help and exit
-p, --ports specify number of ports
-n, --name specify module name
-o, --output specify output file name
"""
import io
import sys
import getopt
from math import *
from jinja2 import Template
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def main(argv=None):
if argv is None:
argv = sys.argv
try:
try:
opts, args = getopt.getopt(argv[1:], "?n:p:o:", ["help", "name=", "ports=", "output="])
except getopt.error as msg:
raise Usage(msg)
# more code, unchanged
except Usage as err:
print(err.msg, file=sys.stderr)
print("for help use --help", file=sys.stderr)
return 2
ports = 4
name = None
out_name = None
# process options
for o, a in opts:
if o in ('-?', '--help'):
print(__doc__)
sys.exit(0)
if o in ('-p', '--ports'):
ports = int(a)
if o in ('-n', '--name'):
name = a
if o in ('-o', '--output'):
out_name = a
if name is None:
name = "axis_arb_mux_64_{0}".format(ports)
if out_name is None:
out_name = name + ".v"
print("Opening file '%s'..." % out_name)
try:
out_file = open(out_name, 'w')
except Exception as ex:
print("Error opening \"%s\": %s" %(out_name, ex.strerror), file=sys.stderr)
exit(1)
print("Generating {0} port AXI Stream arbitrated mux {1}...".format(ports, name))
select_width = ceil(log2(ports))
t = Template(u"""/*
Copyright (c) 2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
// Language: Verilog 2001
`timescale 1ns / 1ps
/*
* AXI4-Stream {{n}} port arbitrated multiplexer (64 bit datapath)
*/
module {{name}} #
(
parameter DATA_WIDTH = 64,
parameter KEEP_WIDTH = (DATA_WIDTH/8),
// arbitration type: "PRIORITY" or "ROUND_ROBIN"
parameter ARB_TYPE = "PRIORITY",
// LSB priority: "LOW", "HIGH"
parameter LSB_PRIORITY = "HIGH"
)
(
input wire clk,
input wire rst,
/*
* AXI inputs
*/
{%- for p in ports %}
input wire [DATA_WIDTH-1:0] input_{{p}}_axis_tdata,
input wire [KEEP_WIDTH-1:0] input_{{p}}_axis_tkeep,
input wire input_{{p}}_axis_tvalid,
output wire input_{{p}}_axis_tready,
input wire input_{{p}}_axis_tlast,
input wire input_{{p}}_axis_tuser,
{% endfor %}
/*
* AXI output
*/
output wire [DATA_WIDTH-1:0] output_axis_tdata,
output wire [KEEP_WIDTH-1:0] output_axis_tkeep,
output wire output_axis_tvalid,
input wire output_axis_tready,
output wire output_axis_tlast,
output wire output_axis_tuser
);
wire [{{n-1}}:0] request;
wire [{{n-1}}:0] acknowledge;
wire [{{n-1}}:0] grant;
wire grant_valid;
wire [{{w-1}}:0] grant_encoded;
{% for p in ports %}
assign acknowledge[{{p}}] = input_{{p}}_axis_tvalid & input_{{p}}_axis_tready & input_{{p}}_axis_tlast;
assign request[{{p}}] = input_{{p}}_axis_tvalid & ~acknowledge[{{p}}];
{%- endfor %}
// mux instance
axis_mux_64_{{n}} #(
.DATA_WIDTH(DATA_WIDTH)
)
mux_inst (
.clk(clk),
.rst(rst),
{%- for p in ports %}
.input_{{p}}_axis_tdata(input_{{p}}_axis_tdata),
.input_{{p}}_axis_tkeep(input_{{p}}_axis_tkeep),
.input_{{p}}_axis_tvalid(input_{{p}}_axis_tvalid & grant[{{p}}]),
.input_{{p}}_axis_tready(input_{{p}}_axis_tready),
.input_{{p}}_axis_tlast(input_{{p}}_axis_tlast),
.input_{{p}}_axis_tuser(input_{{p}}_axis_tuser),
{%- endfor %}
.output_axis_tdata(output_axis_tdata),
.output_axis_tkeep(output_axis_tkeep),
.output_axis_tvalid(output_axis_tvalid),
.output_axis_tready(output_axis_tready),
.output_axis_tlast(output_axis_tlast),
.output_axis_tuser(output_axis_tuser),
.enable(grant_valid),
.select(grant_encoded)
);
// arbiter instance
arbiter #(
.PORTS({{n}}),
.TYPE(ARB_TYPE),
.BLOCK("ACKNOWLEDGE"),
.LSB_PRIORITY(LSB_PRIORITY)
)
arb_inst (
.clk(clk),
.rst(rst),
.request(request),
.acknowledge(acknowledge),
.grant(grant),
.grant_valid(grant_valid),
.grant_encoded(grant_encoded)
);
endmodule
""")
out_file.write(t.render(
n=ports,
w=select_width,
name=name,
ports=range(ports)
))
print("Done")
if __name__ == "__main__":
sys.exit(main())
| mit |
FoxerLee/iOS_sitp | Pods/AVOSCloudCrashReporting/Breakpad/src/tools/gyp/test/target/gyptest-target.py | 241 | 1109 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simplest-possible build of a "Hello, world!" program
using non-default extension. In particular, verifies how
target_extension is used to avoid MSB8012 for msvs.
"""
import sys
import TestGyp
if sys.platform in ('win32', 'cygwin'):
test = TestGyp.TestGyp()
test.run_gyp('target.gyp')
test.build('target.gyp')
# executables
test.built_file_must_exist('hello1.stuff', test.EXECUTABLE, bare=True)
test.built_file_must_exist('hello2.exe', test.EXECUTABLE, bare=True)
test.built_file_must_not_exist('hello2.stuff', test.EXECUTABLE, bare=True)
# check msvs log for errors
if test.format == "msvs":
log_file = "obj\\hello1\\hello1.log"
test.built_file_must_exist(log_file)
test.built_file_must_not_contain(log_file, "MSB8012")
log_file = "obj\\hello2\\hello2.log"
test.built_file_must_exist(log_file)
test.built_file_must_not_contain(log_file, "MSB8012")
test.pass_test()
| mit |
dbckz/ansible | lib/ansible/modules/cloud/amazon/ecs_service.py | 21 | 17469 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ecs_service
short_description: create, terminate, start or stop a service in ecs
description:
- Creates or terminates ecs services.
notes:
- the service role specified must be assumable (i.e. have a trust relationship for the ecs service, ecs.amazonaws.com)
- for details of the parameters and returns see U(http://boto3.readthedocs.org/en/latest/reference/services/ecs.html)
- An IAM role must have been previously created
version_added: "2.1"
author:
- "Mark Chance (@java1guy)"
- "Darek Kaczynski (@kaczynskid)"
- "Stephane Maarek (@simplesteph)"
- "Zac Blazic (@zacblazic)"
requirements: [ json, boto, botocore, boto3 ]
options:
state:
description:
- The desired state of the service
required: true
choices: ["present", "absent", "deleting"]
name:
description:
- The name of the service
required: true
cluster:
description:
- The name of the cluster in which the service exists
required: false
task_definition:
description:
- The task definition the service will run. This parameter is required when state=present
required: false
load_balancers:
description:
- The list of ELBs defined for this service
required: false
desired_count:
description:
- The count of how many instances of the service. This parameter is required when state=present
required: false
client_token:
description:
- Unique, case-sensitive identifier you provide to ensure the idempotency of the request. Up to 32 ASCII characters are allowed.
required: false
role:
description:
- The name or full Amazon Resource Name (ARN) of the IAM role that allows your Amazon ECS container agent to make calls to your load balancer
on your behalf. This parameter is only required if you are using a load balancer with your service.
required: false
delay:
description:
- The time to wait before checking that the service is available
required: false
default: 10
repeat:
description:
- The number of times to check that the service is available
required: false
default: 10
deployment_configuration:
description:
- Optional parameters that control the deployment_configuration; format is '{"maximum_percent":<integer>, "minimum_healthy_percent":<integer>}
required: false
version_added: 2.3
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- ecs_service:
state: present
name: console-test-service
cluster: new_cluster
task_definition: new_cluster-task:1"
desired_count: 0
# Basic provisioning example
- ecs_service:
name: default
state: present
cluster: new_cluster
# Simple example to delete
- ecs_service:
name: default
state: absent
cluster: new_cluster
# With custom deployment configuration
- ecs_service:
name: test-service
cluster: test-cluster
task_definition: test-task-definition
desired_count: 3
deployment_configuration:
minimum_healthy_percent: 75
maximum_percent: 150
state: present
'''
RETURN = '''
service:
description: Details of created service.
returned: when creating a service
type: complex
contains:
clusterArn:
description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service.
returned: always
type: string
desiredCount:
description: The desired number of instantiations of the task definition to keep running on the service.
returned: always
type: int
loadBalancers:
description: A list of load balancer objects
returned: always
type: complex
contains:
loadBalancerName:
description: the name
returned: always
type: string
containerName:
description: The name of the container to associate with the load balancer.
returned: always
type: string
containerPort:
description: The port on the container to associate with the load balancer.
returned: always
type: int
pendingCount:
description: The number of tasks in the cluster that are in the PENDING state.
returned: always
type: int
runningCount:
description: The number of tasks in the cluster that are in the RUNNING state.
returned: always
type: int
serviceArn:
description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region
of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example,
arn:aws:ecs:region :012345678910 :service/my-service .
returned: always
type: string
serviceName:
description: A user-generated string used to identify the service
returned: always
type: string
status:
description: The valid values are ACTIVE, DRAINING, or INACTIVE.
returned: always
type: string
taskDefinition:
description: The ARN of a task definition to use for tasks in the service.
returned: always
type: string
deployments:
description: list of service deployments
returned: always
type: list of complex
deploymentConfiguration:
description: dictionary of deploymentConfiguration
returned: always
type: complex
contains:
maximumPercent:
description: maximumPercent param
returned: always
type: int
minimumHealthyPercent:
description: minimumHealthyPercent param
returned: always
type: int
events:
description: lost of service events
returned: always
type: list of complex
ansible_facts:
description: Facts about deleted service.
returned: when deleting a service
type: complex
contains:
service:
description: Details of deleted service in the same structure described above for service creation.
returned: when service existed and was deleted
type: complex
'''
import time
DEPLOYMENT_CONFIGURATION_TYPE_MAP = {
'maximum_percent': 'int',
'minimum_healthy_percent': 'int'
}
try:
import boto
import botocore
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
try:
import boto3
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info, snake_dict_to_camel_dict, map_complex_type
class EcsServiceManager:
"""Handles ECS Services"""
def __init__(self, module):
self.module = module
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound as e:
self.module.fail_json(msg="Can't authorize connection - %s" % str(e))
def find_in_array(self, array_of_services, service_name, field_name='serviceArn'):
for c in array_of_services:
if c[field_name].endswith(service_name):
return c
return None
def describe_service(self, cluster_name, service_name):
response = self.ecs.describe_services(
cluster=cluster_name,
services=[
service_name
])
msg = ''
if len(response['failures'])>0:
c = self.find_in_array(response['failures'], service_name, 'arn')
msg += ", failure reason is "+c['reason']
if c and c['reason']=='MISSING':
return None
# fall thru and look through found ones
if len(response['services'])>0:
c = self.find_in_array(response['services'], service_name)
if c:
return c
raise StandardError("Unknown problem describing service %s." % service_name)
def is_matching_service(self, expected, existing):
if expected['task_definition'] != existing['taskDefinition']:
return False
if (expected['load_balancers'] or []) != existing['loadBalancers']:
return False
if (expected['desired_count'] or 0) != existing['desiredCount']:
return False
return True
def create_service(self, service_name, cluster_name, task_definition,
load_balancers, desired_count, client_token, role, deployment_configuration):
response = self.ecs.create_service(
cluster=cluster_name,
serviceName=service_name,
taskDefinition=task_definition,
loadBalancers=load_balancers,
desiredCount=desired_count,
clientToken=client_token,
role=role,
deploymentConfiguration=deployment_configuration)
return self.jsonize(response['service'])
def update_service(self, service_name, cluster_name, task_definition,
load_balancers, desired_count, client_token, role, deployment_configuration):
response = self.ecs.update_service(
cluster=cluster_name,
service=service_name,
taskDefinition=task_definition,
desiredCount=desired_count,
deploymentConfiguration=deployment_configuration)
return self.jsonize(response['service'])
def jsonize(self, service):
# some fields are datetime which is not JSON serializable
# make them strings
if 'deployments' in service:
for d in service['deployments']:
if 'createdAt' in d:
d['createdAt'] = str(d['createdAt'])
if 'updatedAt' in d:
d['updatedAt'] = str(d['updatedAt'])
if 'events' in service:
for e in service['events']:
if 'createdAt' in e:
e['createdAt'] = str(e['createdAt'])
return service
def delete_service(self, service, cluster=None):
return self.ecs.delete_service(cluster=cluster, service=service)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent', 'deleting']),
name=dict(required=True, type='str'),
cluster=dict(required=False, type='str'),
task_definition=dict(required=False, type='str'),
load_balancers=dict(required=False, default=[], type='list'),
desired_count=dict(required=False, type='int'),
client_token=dict(required=False, default='', type='str'),
role=dict(required=False, default='', type='str'),
delay=dict(required=False, type='int', default=10),
repeat=dict(required=False, type='int', default=10),
deployment_configuration=dict(required=False, default={}, type='dict')
))
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
('state', 'present', ['task_definition', 'desired_count'])
],
required_together=[['load_balancers', 'role']]
)
if not HAS_BOTO:
module.fail_json(msg='boto is required.')
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.')
service_mgr = EcsServiceManager(module)
deployment_configuration = map_complex_type(module.params['deployment_configuration'],
DEPLOYMENT_CONFIGURATION_TYPE_MAP)
deploymentConfiguration = snake_dict_to_camel_dict(deployment_configuration)
try:
existing = service_mgr.describe_service(module.params['cluster'], module.params['name'])
except Exception as e:
module.fail_json(msg="Exception describing service '"+module.params['name']+"' in cluster '"+module.params['cluster']+"': "+str(e))
results = dict(changed=False)
if module.params['state'] == 'present':
matching = False
update = False
if existing and 'status' in existing and existing['status']=="ACTIVE":
if service_mgr.is_matching_service(module.params, existing):
matching = True
results['service'] = service_mgr.jsonize(existing)
else:
update = True
if not matching:
if not module.check_mode:
loadBalancers = module.params['load_balancers']
role = module.params['role']
clientToken = module.params['client_token']
if update:
# update required
response = service_mgr.update_service(module.params['name'],
module.params['cluster'],
module.params['task_definition'],
loadBalancers,
module.params['desired_count'],
clientToken,
role,
deploymentConfiguration)
else:
# doesn't exist. create it.
response = service_mgr.create_service(module.params['name'],
module.params['cluster'],
module.params['task_definition'],
loadBalancers,
module.params['desired_count'],
clientToken,
role,
deploymentConfiguration)
results['service'] = response
results['changed'] = True
elif module.params['state'] == 'absent':
if not existing:
pass
else:
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
del existing['deployments']
del existing['events']
results['ansible_facts'] = existing
if 'status' in existing and existing['status']=="INACTIVE":
results['changed'] = False
else:
if not module.check_mode:
try:
service_mgr.delete_service(
module.params['name'],
module.params['cluster']
)
except botocore.exceptions.ClientError as e:
module.fail_json(msg=e.message)
results['changed'] = True
elif module.params['state'] == 'deleting':
if not existing:
module.fail_json(msg="Service '"+module.params['name']+" not found.")
return
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
delay = module.params['delay']
repeat = module.params['repeat']
time.sleep(delay)
for i in range(repeat):
existing = service_mgr.describe_service(module.params['cluster'], module.params['name'])
status = existing['status']
if status == "INACTIVE":
results['changed'] = True
break
time.sleep(delay)
if i is repeat-1:
module.fail_json(msg="Service still not deleted after "+str(repeat)+" tries of "+str(delay)+" seconds each.")
return
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
petrleocompel/gnome15 | src/plugins/stopwatch/stopwatch.py | 8 | 12944 | # Gnome15 - Suite of tools for the Logitech G series keyboards and headsets
# Copyright (C) 2011 Nuno Araujo <nuno.araujo@russo79.com>
# Brett Smith <tanktarta@blueyonder.co.uk>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gnome15.g15locale as g15locale
_ = g15locale.get_translation("stopwatch", modfile = __file__).ugettext
import gnome15.g15screen as g15screen
import gnome15.g15theme as g15theme
import gnome15.util.g15gconf as g15gconf
import gnome15.util.g15pythonlang as g15pythonlang
import gnome15.g15driver as g15driver
import gnome15.g15globals as g15globals
import gnome15.g15plugin as g15plugin
import gnome15.g15text as g15text
import datetime
import pango
import timer
import preferences as g15preferences
# Plugin details - All of these must be provided
id="stopwatch"
name=_("Stopwatch")
description=_("Stopwatch/Countdown timer plugin for gnome15.\
Two timers are available. User can select the a mode (stopwatch/countdown) for each of them.")
author="Nuno Araujo <nuno.araujo@russo79.com>"
copyright=_("Copyright (C)2011 Nuno Araujo")
site="http://www.russo79.com/gnome15"
has_preferences=True
unsupported_models = [ g15driver.MODEL_G110, g15driver.MODEL_G11, g15driver.MODEL_G930, g15driver.MODEL_G35 ]
actions={
g15driver.PREVIOUS_SELECTION : _("Toggle selected timer"),
g15driver.NEXT_SELECTION : _("Reset selected timer"),
g15driver.VIEW : _("Switch between timers")
}
actions_g19={
g15driver.PREVIOUS_SELECTION : _("Toggle timer 1"),
g15driver.NEXT_SELECTION : _("Reset timer 1"),
g15driver.NEXT_PAGE : _("Toggle timer 2"),
g15driver.PREVIOUS_PAGE : _("Reset timer 2")
}
#
# A stopwatch / timer plugin for gnome15
#
def create(gconf_key, gconf_client, screen):
return G15Stopwatch(gconf_key, gconf_client, screen)
def show_preferences(parent, driver, gconf_client, gconf_key):
preferences = g15preferences.G15StopwatchPreferences(parent, driver, gconf_client, gconf_key)
preferences.run()
class G15Stopwatch(g15plugin.G15RefreshingPlugin):
def __init__(self, gconf_key, gconf_client, screen):
g15plugin.G15RefreshingPlugin.__init__(self, gconf_client, gconf_key, \
screen, [ "cairo-clock", "clock", "gnome-panel-clock", "xfce4-clock", "rclock", "player-time" ], id, name)
self._active_timer = None
self._message = None
self._priority = g15screen.PRI_NORMAL
def activate(self):
self._timer = None
self._text = g15text.new_text(self.screen)
self._notify_timer = None
self._timer1 = timer.G15Timer()
self._timer1.on_finish = self._on_finish
self._timer2 = timer.G15Timer()
self._timer2.on_finish = self._on_finish
self._load_configuration()
g15plugin.G15RefreshingPlugin.activate(self)
self.screen.key_handler.action_listeners.append(self)
self.watch(None, self._config_changed)
def deactivate(self):
if self._timer1.is_running():
self._timer1.toggle()
if self._timer2.is_running():
self._timer2.toggle()
self.screen.key_handler.action_listeners.remove(self)
g15plugin.G15RefreshingPlugin.deactivate(self)
def destroy(self):
pass
def create_page(self):
page = g15plugin.G15RefreshingPlugin.create_page(self)
if self.screen.driver.get_bpp() != 16:
"""
Don't show on the panel for G15, there just isn't enough room
Long term, this will be configurable per plugin
"""
page.panel_painter = None
return page
def create_theme(self):
variant = None
if self._timer1.get_enabled() and self._timer2.get_enabled():
variant = "two_timers"
elif self._timer1.get_enabled() or self._timer2.get_enabled():
variant = "one_timer"
return g15theme.G15Theme(self, variant)
def action_performed(self, binding):
if self.page and self.page.is_visible():
# G19 we make use of more keys
if self.screen.driver.get_model_name() == g15driver.MODEL_G19:
if self._timer1.get_enabled():
if binding.action == g15driver.PREVIOUS_SELECTION:
self._timer1.toggle()
self._check_page_priority()
self._refresh()
elif binding.action == g15driver.NEXT_SELECTION:
self._timer1.reset()
if self._timer2.get_enabled():
if binding.action == g15driver.PREVIOUS_PAGE:
self._timer2.toggle()
self._check_page_priority()
self._refresh()
elif binding.action == g15driver.NEXT_PAGE:
self._timer2.reset()
else:
# For everything else we allow switching between timers
if binding.action == g15driver.VIEW:
if self._active_timer == self._timer1:
self._active_timer = self._timer2
else:
self._active_timer = self._timer1
self._refresh()
if self._active_timer:
if binding.action == g15driver.PREVIOUS_SELECTION:
self._active_timer.toggle()
self._check_page_priority()
self._refresh()
elif binding.action == g15driver.NEXT_SELECTION:
self._active_timer.reset()
self._check_page_priority()
self._refresh()
def get_next_tick(self):
return g15pythonlang.total_seconds( datetime.timedelta( seconds = 1 ))
def get_theme_properties(self):
properties = { }
if self._timer1.get_enabled() and self._timer2.get_enabled():
properties["timer1_label"] = self._timer1.label
properties["timer1"] = self._format_time_delta(self._timer1.value())
if self._active_timer == self._timer1:
properties["timer1_active"] = True
properties["timer2_active"] = False
else:
properties["timer1_active"] = False
properties["timer2_active"] = True
properties["timer2_label"] = self._timer2.label
properties["timer2"] = self._format_time_delta(self._timer2.value())
elif self._timer1.get_enabled():
properties["timer_label"] = self._timer1.label
properties["timer"] = self._format_time_delta(self._timer1.value())
elif self._timer2.get_enabled():
properties["timer_label"] = self._timer2.label
properties["timer"] = self._format_time_delta(self._timer2.value())
return properties
def _paint_panel(self, canvas, allocated_size, horizontal):
if not self.page or self.screen.is_visible(self.page):
return
if not (self._timer1.get_enabled() or self._timer2.get_enabled()):
return
if not (self._timer1.is_running() or self._timer2.is_running()):
return
properties = self.get_theme_properties()
# Don't display the date or seconds on mono displays, not enough room as it is
if self.screen.driver.get_bpp() == 1:
if self._timer1.get_enabled() and self._timer2.get_enabled():
text = "%s %s" % ( properties["timer1"], properties["timer2"] )
else:
text = properties["timer"]
font_size = 8
factor = 2
font_name = g15globals.fixed_size_font_name
gap = 1
else:
factor = 1 if horizontal else 1.2
font_name = "Sans"
if self._timer1.get_enabled() and self._timer2.get_enabled():
text = "%s\n%s" % (properties["timer1"], properties["timer2"])
font_size = allocated_size / 3
else:
text = properties["timer"]
font_size = allocated_size / 2
gap = 8
self._text.set_canvas(canvas)
self._text.set_attributes(text, align = pango.ALIGN_CENTER, font_desc = font_name, font_absolute_size = font_size * pango.SCALE / factor)
x, y, width, height = self._text.measure()
if horizontal:
if self.screen.driver.get_bpp() == 1:
y = 0
else:
y = (allocated_size / 2) - height / 2
else:
x = (allocated_size / 2) - width / 2
y = 0
self._text.draw(x, y)
if horizontal:
return width + gap
else:
return height + 4
'''
***********************************************************
* Functions specific to plugin *
***********************************************************
'''
def _config_changed(self, client, connection_id, entry, args):
self._load_configuration()
self.reload_theme()
self.screen.set_priority(self.page, g15screen.PRI_HIGH, revert_after = 3.0)
def _get_or_default(self, key, default_value):
v = self.gconf_client.get(key)
return v.get_int() if v != None else default_value
def _load_timer(self, timer_object, number):
timer_object.set_enabled(self.gconf_client.get_bool(self.gconf_key + "/timer%d_enabled" % number) or False)
timer_object.label = self.gconf_client.get_string(self.gconf_key + "/timer%d_label" % number) or ""
if self.gconf_client.get_bool(self.gconf_key + "/timer%d_mode_countdown" % number):
timer_object.mode = timer.G15Timer.TIMER_MODE_COUNTDOWN
timer_object.initial_value = datetime.timedelta(hours = self._get_or_default(self.gconf_key + "/timer%d_hours" % number, 0), \
minutes = self._get_or_default(self.gconf_key + "/timer%d_minutes" % number, 5), \
seconds = self._get_or_default(self.gconf_key + "/timer%d_seconds" % number, 0))
timer_object.loop = self.gconf_client.get_bool(self.gconf_key + "/timer%d_loop" % number )
else:
timer_object.mode = timer.G15Timer.TIMER_MODE_STOPWATCH
timer_object.initial_value = datetime.timedelta(0, 0, 0)
def _load_configuration(self):
self._load_timer(self._timer1, 1)
self._load_timer(self._timer2, 2)
# Set active timer
if self._active_timer == None and self._timer1.get_enabled() and self._timer2.get_enabled():
self._active_timer = self._timer1
elif self._timer1.get_enabled() and self._timer2.get_enabled():
#Keeps the current timer active
pass
elif self._timer1.get_enabled():
self._active_timer = self._timer1
elif self._timer2.get_enabled():
self._active_timer = self._timer2
self._check_page_priority()
def _check_page_priority(self):
self._priority = g15screen.PRI_EXCLUSIVE if self._is_any_timer_active() and g15gconf.get_bool_or_default(self.gconf_client, "%s/keep_page_visible" % self.gconf_key, True) \
else g15screen.PRI_NORMAL
if self.page:
self.page.set_priority(self._priority)
def _format_time_delta(self, td):
hours = td.seconds // 3600
minutes = (td.seconds % 3600) // 60
seconds = td.seconds % 60
return '%s:%02d:%02d' % (hours, minutes, seconds)
def _is_any_timer_active(self):
return ( self._timer1 is not None and self._timer1.is_running() ) or \
( self._timer2 is not None and self._timer2.is_running() )
def _on_finish(self):
self._check_page_priority()
# vim:set ts=4 sw=4 et:
| gpl-3.0 |
strk/mapnik | tests/python_tests/box2d_test.py | 2 | 3279 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from nose.tools import *
import mapnik
def test_coord_init():
c = mapnik.Coord(100, 100)
eq_(c.x, 100)
eq_(c.y, 100)
def test_coord_multiplication():
c = mapnik.Coord(100, 100)
c *= 2
eq_(c.x, 200)
eq_(c.y, 200)
def test_envelope_init():
e = mapnik.Box2d(100, 100, 200, 200)
assert_true(e.contains(100, 100))
assert_true(e.contains(100, 200))
assert_true(e.contains(200, 200))
assert_true(e.contains(200, 100))
assert_true(e.contains(e.center()))
assert_false(e.contains(99.9, 99.9))
assert_false(e.contains(99.9, 200.1))
assert_false(e.contains(200.1, 200.1))
assert_false(e.contains(200.1, 99.9))
eq_(e.width(), 100)
eq_(e.height(), 100)
eq_(e.minx, 100)
eq_(e.miny, 100)
eq_(e.maxx, 200)
eq_(e.maxy, 200)
eq_(e[0],100)
eq_(e[1],100)
eq_(e[2],200)
eq_(e[3],200)
eq_(e[0],e[-4])
eq_(e[1],e[-3])
eq_(e[2],e[-2])
eq_(e[3],e[-1])
c = e.center()
eq_(c.x, 150)
eq_(c.y, 150)
def test_envelope_static_init():
e = mapnik.Box2d.from_string('100 100 200 200')
e2 = mapnik.Box2d.from_string('100,100,200,200')
e3 = mapnik.Box2d.from_string('100 , 100 , 200 , 200')
eq_(e,e2)
eq_(e,e3)
assert_true(e.contains(100, 100))
assert_true(e.contains(100, 200))
assert_true(e.contains(200, 200))
assert_true(e.contains(200, 100))
assert_true(e.contains(e.center()))
assert_false(e.contains(99.9, 99.9))
assert_false(e.contains(99.9, 200.1))
assert_false(e.contains(200.1, 200.1))
assert_false(e.contains(200.1, 99.9))
eq_(e.width(), 100)
eq_(e.height(), 100)
eq_(e.minx, 100)
eq_(e.miny, 100)
eq_(e.maxx, 200)
eq_(e.maxy, 200)
eq_(e[0],100)
eq_(e[1],100)
eq_(e[2],200)
eq_(e[3],200)
eq_(e[0],e[-4])
eq_(e[1],e[-3])
eq_(e[2],e[-2])
eq_(e[3],e[-1])
c = e.center()
eq_(c.x, 150)
eq_(c.y, 150)
def test_envelope_multiplication():
e = mapnik.Box2d(100, 100, 200, 200)
e *= 2
assert_true(e.contains(50, 50))
assert_true(e.contains(50, 250))
assert_true(e.contains(250, 250))
assert_true(e.contains(250, 50))
assert_false(e.contains(49.9, 49.9))
assert_false(e.contains(49.9, 250.1))
assert_false(e.contains(250.1, 250.1))
assert_false(e.contains(250.1, 49.9))
assert_true(e.contains(e.center()))
eq_(e.width(), 200)
eq_(e.height(), 200)
eq_(e.minx, 50)
eq_(e.miny, 50)
eq_(e.maxx, 250)
eq_(e.maxy, 250)
c = e.center()
eq_(c.x, 150)
eq_(c.y, 150)
def test_envelope_clipping():
e1 = mapnik.Box2d(-180,-90,180,90)
e2 = mapnik.Box2d(-120,40,-110,48)
e1.clip(e2)
eq_(e1,e2)
# madagascar in merc
e1 = mapnik.Box2d(4772116.5490, -2744395.0631, 5765186.4203, -1609458.0673)
e2 = mapnik.Box2d(5124338.3753, -2240522.1727, 5207501.8621, -2130452.8520)
e1.clip(e2)
eq_(e1,e2)
# nz in lon/lat
e1 = mapnik.Box2d(163.8062, -47.1897, 179.3628, -33.9069)
e2 = mapnik.Box2d(173.7378, -39.6395, 174.4849, -38.9252)
e1.clip(e2)
eq_(e1,e2)
if __name__ == "__main__":
setup()
[eval(run)() for run in dir() if 'test_' in run]
| lgpl-2.1 |
maljac/odoomrp-wip | product_supplierinfo_for_customer/models/product_supplierinfo.py | 9 | 2120 | # -*- coding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, fields, api
class ProductSupplierinfo(models.Model):
_inherit = 'product.supplierinfo'
type = fields.Selection(
selection=[('customer', 'Customer'),
('supplier', 'Supplier')], string='Type',
default='supplier')
@api.multi
@api.onchange('type')
def onchange_type(self):
if self.type == 'supplier':
return {'domain': {'name': [('supplier', '=', True)]}}
elif self.type == 'customer':
return {'domain': {'name': [('customer', '=', True)]}}
return {'domain': {'name': []}}
def search(self, cr, uid, args, offset=0, limit=None, order=None,
context=None, count=False):
"""Add search argument for field type if the context says so. This
should be in old API because context argument is not the last one.
"""
if context is None:
context = {}
if not any(arg[0] == 'type' for arg in args):
args += [('type', '=',
context.get('supplierinfo_type', 'supplier'))]
return super(ProductSupplierinfo, self).search(
cr, uid, args, offset=offset, limit=limit, order=order,
context=context, count=count)
| agpl-3.0 |
ansible/ansible-modules-extras | windows/win_chocolatey.py | 23 | 3537 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Trond Hindenes <trond@hindenes.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'committer',
'version': '1.0'}
DOCUMENTATION = '''
---
module: win_chocolatey
version_added: "1.9"
short_description: Installs packages using chocolatey
description:
- Installs packages using Chocolatey (http://chocolatey.org/). If Chocolatey is missing from the system, the module will install it. List of packages can be found at http://chocolatey.org/packages
options:
name:
description:
- Name of the package to be installed
required: true
state:
description:
- State of the package on the system
choices:
- present
- absent
default: present
force:
description:
- Forces install of the package (even if it already exists). Using Force will cause ansible to always report that a change was made
choices:
- yes
- no
default: no
upgrade:
description:
- If package is already installed it, try to upgrade to the latest version or to the specified version
choices:
- yes
- no
default: no
version:
description:
- Specific version of the package to be installed
- Ignored when state == 'absent'
source:
description:
- Specify source rather than using default chocolatey repository
install_args:
description:
- Arguments to pass to the native installer
version_added: '2.1'
params:
description:
- Parameters to pass to the package
version_added: '2.1'
allow_empty_checksums:
description:
- Allow empty Checksums to be used
require: false
default: false
version_added: '2.2'
ignore_checksums:
description:
- Ignore Checksums
require: false
default: false
version_added: '2.2'
ignore_dependencies:
description:
- Ignore dependencies, only install/upgrade the package itself
default: false
version_added: '2.1'
author: "Trond Hindenes (@trondhindenes), Peter Mounce (@petemounce), Pepe Barbe (@elventear), Adam Keech (@smadam813)"
'''
# TODO:
# * Better parsing when a package has dependencies - currently fails
# * Time each item that is run
# * Support 'changed' with gems - would require shelling out to `gem list` first and parsing, kinda defeating the point of using chocolatey.
EXAMPLES = '''
# Install git
win_chocolatey:
name: git
# Install notepadplusplus version 6.6
win_chocolatey:
name: notepadplusplus.install
version: '6.6'
# Uninstall git
win_chocolatey:
name: git
state: absent
# Install git from specified repository
win_chocolatey:
name: git
source: https://someserver/api/v2/
'''
| gpl-3.0 |
boneyao/sentry | src/sentry/migrations/0184_auto__del_field_group_checksum__del_unique_group_project_checksum__del.py | 32 | 38374 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'Group', fields ['project', 'checksum']
db.delete_unique('sentry_groupedmessage', ['project_id', 'checksum'])
# Deleting field 'Group.checksum'
db.delete_column('sentry_groupedmessage', 'checksum')
# Deleting field 'Event.checksum'
db.delete_column('sentry_message', 'checksum')
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'Group.checksum'
raise RuntimeError("Cannot reverse this migration. 'Group.checksum' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration # Adding field 'Group.checksum'
db.add_column('sentry_groupedmessage', 'checksum',
self.gf('django.db.models.fields.CharField')(max_length=32, db_index=True),
keep_default=False)
# Adding unique constraint on 'Group', fields ['project', 'checksum']
db.create_unique('sentry_groupedmessage', ['project_id', 'checksum'])
# User chose to not deal with backwards NULL issues for 'Event.checksum'
raise RuntimeError("Cannot reverse this migration. 'Event.checksum' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration # Adding field 'Event.checksum'
db.add_column('sentry_message', 'checksum',
self.gf('django.db.models.fields.CharField')(max_length=32, db_index=True),
keep_default=False)
models = {
'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '50'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': "orm['sentry.AlertRelatedGroup']", 'to': "orm['sentry.Group']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.alertrelatedgroup': {
'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
'alert': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Alert']"}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'badge': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'storage': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'storage_options': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.group': {
'Meta': {'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.helppage': {
'Meta': {'object_name': 'HelpPage'},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True'}),
'priority': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'counter': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
}
}
complete_apps = ['sentry'] | bsd-3-clause |
joshbohde/megaminer-framework | codegen/structures.py | 1 | 1207 | from copy import deepcopy as copy
from odict import OrderedDict
class Model(object):
data = OrderedDict()
functions = OrderedDict()
key = ''
name = ''
parent = None
def __init__(self, name, **kwargs):
self.data = OrderedDict()
self.functions = OrderedDict()
self.name = name
if 'parent' in kwargs:
self.parent = kwargs['parent']
self.data = copy(parent.data)
self.functions = copy(parent.functions)
self.key = parent.key
if 'data' in kwargs:
data = kwargs['data']
for key, value in data:
if key in self.data:
raise ValueError('Duplicate datum %s in %s' % (key, name))
self.data[key] = value
if 'functions' in kwargs:
functions = kwargs['functions']
for key, value in functions:
if key in self.functions:
raise ValueError('Duplicate function %s in %s' % (key, name))
self.functions['key'] = value
if 'key' in kwargs:
self.key = kwargs['key']
"""
TODO: Write these.
class Message(object):
pass
class Function(object):
arguments = None
result = None
def __init__(self, arguments, result):
self.arguments = arguments
self.result = result
""" | agpl-3.0 |
anryko/ansible | test/units/modules/network/fortios/test_fortios_log_fortianalyzer2_setting.py | 21 | 11148 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_log_fortianalyzer2_setting
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_log_fortianalyzer2_setting.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_log_fortianalyzer2_setting_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_fortianalyzer2_setting': {
'__change_ip': '3',
'certificate': 'test_value_4',
'conn_timeout': '5',
'enc_algorithm': 'high-medium',
'faz_type': '7',
'hmac_algorithm': 'sha256',
'ips_archive': 'enable',
'mgmt_name': 'test_value_10',
'monitor_failure_retry_period': '11',
'monitor_keepalive_period': '12',
'reliable': 'enable',
'server': '192.168.100.14',
'source_ip': '84.230.14.15',
'ssl_min_proto_version': 'default',
'status': 'enable',
'upload_day': 'test_value_18',
'upload_interval': 'daily',
'upload_option': 'store-and-upload',
'upload_time': 'test_value_21'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_fortianalyzer2_setting.fortios_log_fortianalyzer2(input_data, fos_instance)
expected_data = {
'--change-ip': '3',
'certificate': 'test_value_4',
'conn-timeout': '5',
'enc-algorithm': 'high-medium',
'faz-type': '7',
'hmac-algorithm': 'sha256',
'ips-archive': 'enable',
'mgmt-name': 'test_value_10',
'monitor-failure-retry-period': '11',
'monitor-keepalive-period': '12',
'reliable': 'enable',
'server': '192.168.100.14',
'source-ip': '84.230.14.15',
'ssl-min-proto-version': 'default',
'status': 'enable',
'upload-day': 'test_value_18',
'upload-interval': 'daily',
'upload-option': 'store-and-upload',
'upload-time': 'test_value_21'
}
set_method_mock.assert_called_with('log.fortianalyzer2', 'setting', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_log_fortianalyzer2_setting_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_fortianalyzer2_setting': {
'__change_ip': '3',
'certificate': 'test_value_4',
'conn_timeout': '5',
'enc_algorithm': 'high-medium',
'faz_type': '7',
'hmac_algorithm': 'sha256',
'ips_archive': 'enable',
'mgmt_name': 'test_value_10',
'monitor_failure_retry_period': '11',
'monitor_keepalive_period': '12',
'reliable': 'enable',
'server': '192.168.100.14',
'source_ip': '84.230.14.15',
'ssl_min_proto_version': 'default',
'status': 'enable',
'upload_day': 'test_value_18',
'upload_interval': 'daily',
'upload_option': 'store-and-upload',
'upload_time': 'test_value_21'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_fortianalyzer2_setting.fortios_log_fortianalyzer2(input_data, fos_instance)
expected_data = {
'--change-ip': '3',
'certificate': 'test_value_4',
'conn-timeout': '5',
'enc-algorithm': 'high-medium',
'faz-type': '7',
'hmac-algorithm': 'sha256',
'ips-archive': 'enable',
'mgmt-name': 'test_value_10',
'monitor-failure-retry-period': '11',
'monitor-keepalive-period': '12',
'reliable': 'enable',
'server': '192.168.100.14',
'source-ip': '84.230.14.15',
'ssl-min-proto-version': 'default',
'status': 'enable',
'upload-day': 'test_value_18',
'upload-interval': 'daily',
'upload-option': 'store-and-upload',
'upload-time': 'test_value_21'
}
set_method_mock.assert_called_with('log.fortianalyzer2', 'setting', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_log_fortianalyzer2_setting_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_fortianalyzer2_setting': {
'__change_ip': '3',
'certificate': 'test_value_4',
'conn_timeout': '5',
'enc_algorithm': 'high-medium',
'faz_type': '7',
'hmac_algorithm': 'sha256',
'ips_archive': 'enable',
'mgmt_name': 'test_value_10',
'monitor_failure_retry_period': '11',
'monitor_keepalive_period': '12',
'reliable': 'enable',
'server': '192.168.100.14',
'source_ip': '84.230.14.15',
'ssl_min_proto_version': 'default',
'status': 'enable',
'upload_day': 'test_value_18',
'upload_interval': 'daily',
'upload_option': 'store-and-upload',
'upload_time': 'test_value_21'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_fortianalyzer2_setting.fortios_log_fortianalyzer2(input_data, fos_instance)
expected_data = {
'--change-ip': '3',
'certificate': 'test_value_4',
'conn-timeout': '5',
'enc-algorithm': 'high-medium',
'faz-type': '7',
'hmac-algorithm': 'sha256',
'ips-archive': 'enable',
'mgmt-name': 'test_value_10',
'monitor-failure-retry-period': '11',
'monitor-keepalive-period': '12',
'reliable': 'enable',
'server': '192.168.100.14',
'source-ip': '84.230.14.15',
'ssl-min-proto-version': 'default',
'status': 'enable',
'upload-day': 'test_value_18',
'upload-interval': 'daily',
'upload-option': 'store-and-upload',
'upload-time': 'test_value_21'
}
set_method_mock.assert_called_with('log.fortianalyzer2', 'setting', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_log_fortianalyzer2_setting_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_fortianalyzer2_setting': {
'random_attribute_not_valid': 'tag',
'__change_ip': '3',
'certificate': 'test_value_4',
'conn_timeout': '5',
'enc_algorithm': 'high-medium',
'faz_type': '7',
'hmac_algorithm': 'sha256',
'ips_archive': 'enable',
'mgmt_name': 'test_value_10',
'monitor_failure_retry_period': '11',
'monitor_keepalive_period': '12',
'reliable': 'enable',
'server': '192.168.100.14',
'source_ip': '84.230.14.15',
'ssl_min_proto_version': 'default',
'status': 'enable',
'upload_day': 'test_value_18',
'upload_interval': 'daily',
'upload_option': 'store-and-upload',
'upload_time': 'test_value_21'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_fortianalyzer2_setting.fortios_log_fortianalyzer2(input_data, fos_instance)
expected_data = {
'--change-ip': '3',
'certificate': 'test_value_4',
'conn-timeout': '5',
'enc-algorithm': 'high-medium',
'faz-type': '7',
'hmac-algorithm': 'sha256',
'ips-archive': 'enable',
'mgmt-name': 'test_value_10',
'monitor-failure-retry-period': '11',
'monitor-keepalive-period': '12',
'reliable': 'enable',
'server': '192.168.100.14',
'source-ip': '84.230.14.15',
'ssl-min-proto-version': 'default',
'status': 'enable',
'upload-day': 'test_value_18',
'upload-interval': 'daily',
'upload-option': 'store-and-upload',
'upload-time': 'test_value_21'
}
set_method_mock.assert_called_with('log.fortianalyzer2', 'setting', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
mjschultz/ansible-modules-extras | packaging/os/dnf.py | 47 | 28095 | #!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# Written by Cristian van Ee <cristian at cvee.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import traceback
import os
import dnf
try:
from dnf import find_unfinished_transactions, find_ts_remaining
from rpmUtils.miscutils import splitFilename
transaction_helpers = True
except:
transaction_helpers = False
DOCUMENTATION = '''
---
module: dnf
version_added: 1.9
short_description: Manages packages with the I(dnf) package manager
description:
- Installs, upgrade, removes, and lists packages and groups with the I(dnf) package manager.
options:
name:
description:
- "Package name, or package specifier with version, like C(name-1.0). When using state=latest, this can be '*' which means run: dnf -y update. You can also pass a url or a local path to a rpm file."
required: true
default: null
aliases: []
list:
description:
- Various (non-idempotent) commands for usage with C(/usr/bin/ansible) and I(not) playbooks. See examples.
required: false
default: null
state:
description:
- Whether to install (C(present), C(latest)), or remove (C(absent)) a package.
required: false
choices: [ "present", "latest", "absent" ]
default: "present"
enablerepo:
description:
- I(Repoid) of repositories to enable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a ",".
required: false
default: null
aliases: []
disablerepo:
description:
- I(Repoid) of repositories to disable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a ",".
required: false
default: null
aliases: []
conf_file:
description:
- The remote dnf configuration file to use for the transaction.
required: false
default: null
aliases: []
disable_gpg_check:
description:
- Whether to disable the GPG checking of signatures of packages being
installed. Has an effect only if state is I(present) or I(latest).
required: false
default: "no"
choices: ["yes", "no"]
aliases: []
notes: []
# informational: requirements for nodes
requirements:
- dnf
- yum-utils (for repoquery)
author: "Cristian van Ee (@DJMuggs)"
'''
EXAMPLES = '''
- name: install the latest version of Apache
dnf: name=httpd state=latest
- name: remove the Apache package
dnf: name=httpd state=absent
- name: install the latest version of Apache from the testing repo
dnf: name=httpd enablerepo=testing state=present
- name: upgrade all packages
dnf: name=* state=latest
- name: install the nginx rpm from a remote repo
dnf: name=http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm state=present
- name: install nginx rpm from a local file
dnf: name=/usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm state=present
- name: install the 'Development tools' package group
dnf: name="@Development tools" state=present
'''
def_qf = "%{name}-%{version}-%{release}.%{arch}"
repoquery='/usr/bin/repoquery'
if not os.path.exists(repoquery):
repoquery = None
dnfbin='/usr/bin/dnf'
import syslog
def log(msg):
syslog.openlog('ansible-dnf', 0, syslog.LOG_USER)
syslog.syslog(syslog.LOG_NOTICE, msg)
def dnf_base(conf_file=None, cachedir=False):
my = dnf.Base()
my.conf.debuglevel=0
if conf_file and os.path.exists(conf_file):
my.conf.config_file_path = conf_file
my.conf.read()
my.read_all_repos()
my.fill_sack()
return my
def install_dnf_utils(module):
if not module.check_mode:
dnf_path = module.get_bin_path('dnf')
if dnf_path:
rc, so, se = module.run_command('%s -y install yum-utils' % dnf_path)
if rc == 0:
this_path = module.get_bin_path('repoquery')
global repoquery
repoquery = this_path
def po_to_nevra(po):
if hasattr(po, 'ui_nevra'):
return po.ui_nevra
else:
return '%s-%s-%s.%s' % (po.name, po.version, po.release, po.arch)
def is_installed(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_repos=[], is_pkg=False):
if not repoq:
pkgs = []
try:
my = dnf_base(conf_file)
for rid in en_repos:
my.repos.enableRepo(rid)
for rid in dis_repos:
my.repos.disableRepo(rid)
e,m,u = my.rpmdb.matchPackageNames([pkgspec])
pkgs = e + m
if not pkgs:
pkgs.extend(my.returnInstalledPackagesByDep(pkgspec))
except Exception, e:
module.fail_json(msg="Failure talking to dnf: %s" % e)
return [ po_to_nevra(p) for p in pkgs ]
else:
cmd = repoq + ["--disablerepo=*", "--pkgnarrow=installed", "--qf", qf, pkgspec]
rc,out,err = module.run_command(cmd)
if not is_pkg:
cmd = repoq + ["--disablerepo=*", "--pkgnarrow=installed", "--qf", qf, "--whatprovides", pkgspec]
rc2,out2,err2 = module.run_command(cmd)
else:
rc2,out2,err2 = (0, '', '')
if rc == 0 and rc2 == 0:
out += out2
return [ p for p in out.split('\n') if p.strip() ]
else:
module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2))
return []
def is_available(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_repos=[]):
if not repoq:
pkgs = []
try:
my = dnf_base(conf_file)
for rid in en_repos:
my.repos.enableRepo(rid)
for rid in dis_repos:
my.repos.disableRepo(rid)
e,m,u = my.pkgSack.matchPackageNames([pkgspec])
pkgs = e + m
if not pkgs:
pkgs.extend(my.returnPackagesByDep(pkgspec))
except Exception, e:
module.fail_json(msg="Failure talking to dnf: %s" % e)
return [ po_to_nevra(p) for p in pkgs ]
else:
myrepoq = list(repoq)
for repoid in dis_repos:
r_cmd = ['--disablerepo', repoid]
myrepoq.extend(r_cmd)
for repoid in en_repos:
r_cmd = ['--enablerepo', repoid]
myrepoq.extend(r_cmd)
cmd = myrepoq + ["--qf", qf, pkgspec]
rc,out,err = module.run_command(cmd)
if rc == 0:
return [ p for p in out.split('\n') if p.strip() ]
else:
module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err))
return []
def is_update(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_repos=[]):
if not repoq:
retpkgs = []
pkgs = []
updates = []
try:
my = dnf_base(conf_file)
for rid in en_repos:
my.repos.enableRepo(rid)
for rid in dis_repos:
my.repos.disableRepo(rid)
pkgs = my.returnPackagesByDep(pkgspec) + my.returnInstalledPackagesByDep(pkgspec)
if not pkgs:
e,m,u = my.pkgSack.matchPackageNames([pkgspec])
pkgs = e + m
updates = my.doPackageLists(pkgnarrow='updates').updates
except Exception, e:
module.fail_json(msg="Failure talking to dnf: %s" % e)
for pkg in pkgs:
if pkg in updates:
retpkgs.append(pkg)
return set([ po_to_nevra(p) for p in retpkgs ])
else:
myrepoq = list(repoq)
for repoid in dis_repos:
r_cmd = ['--disablerepo', repoid]
myrepoq.extend(r_cmd)
for repoid in en_repos:
r_cmd = ['--enablerepo', repoid]
myrepoq.extend(r_cmd)
cmd = myrepoq + ["--pkgnarrow=updates", "--qf", qf, pkgspec]
rc,out,err = module.run_command(cmd)
if rc == 0:
return set([ p for p in out.split('\n') if p.strip() ])
else:
module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err))
return []
def what_provides(module, repoq, req_spec, conf_file, qf=def_qf, en_repos=[], dis_repos=[]):
if not repoq:
pkgs = []
try:
my = dnf_base(conf_file)
for rid in en_repos:
my.repos.enableRepo(rid)
for rid in dis_repos:
my.repos.disableRepo(rid)
pkgs = my.returnPackagesByDep(req_spec) + my.returnInstalledPackagesByDep(req_spec)
if not pkgs:
e,m,u = my.pkgSack.matchPackageNames([req_spec])
pkgs.extend(e)
pkgs.extend(m)
e,m,u = my.rpmdb.matchPackageNames([req_spec])
pkgs.extend(e)
pkgs.extend(m)
except Exception, e:
module.fail_json(msg="Failure talking to dnf: %s" % e)
return set([ po_to_nevra(p) for p in pkgs ])
else:
myrepoq = list(repoq)
for repoid in dis_repos:
r_cmd = ['--disablerepo', repoid]
myrepoq.extend(r_cmd)
for repoid in en_repos:
r_cmd = ['--enablerepo', repoid]
myrepoq.extend(r_cmd)
cmd = myrepoq + ["--qf", qf, "--whatprovides", req_spec]
rc,out,err = module.run_command(cmd)
cmd = myrepoq + ["--qf", qf, req_spec]
rc2,out2,err2 = module.run_command(cmd)
if rc == 0 and rc2 == 0:
out += out2
pkgs = set([ p for p in out.split('\n') if p.strip() ])
if not pkgs:
pkgs = is_installed(module, repoq, req_spec, conf_file, qf=qf)
return pkgs
else:
module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2))
return []
def transaction_exists(pkglist):
"""
checks the package list to see if any packages are
involved in an incomplete transaction
"""
conflicts = []
if not transaction_helpers:
return conflicts
# first, we create a list of the package 'nvreas'
# so we can compare the pieces later more easily
pkglist_nvreas = []
for pkg in pkglist:
pkglist_nvreas.append(splitFilename(pkg))
# next, we build the list of packages that are
# contained within an unfinished transaction
unfinished_transactions = find_unfinished_transactions()
for trans in unfinished_transactions:
steps = find_ts_remaining(trans)
for step in steps:
# the action is install/erase/etc., but we only
# care about the package spec contained in the step
(action, step_spec) = step
(n,v,r,e,a) = splitFilename(step_spec)
# and see if that spec is in the list of packages
# requested for installation/updating
for pkg in pkglist_nvreas:
# if the name and arch match, we're going to assume
# this package is part of a pending transaction
# the label is just for display purposes
label = "%s-%s" % (n,a)
if n == pkg[0] and a == pkg[4]:
if label not in conflicts:
conflicts.append("%s-%s" % (n,a))
break
return conflicts
def local_nvra(module, path):
"""return nvra of a local rpm passed in"""
cmd = ['/bin/rpm', '-qp' ,'--qf',
'%{name}-%{version}-%{release}.%{arch}\n', path ]
rc, out, err = module.run_command(cmd)
if rc != 0:
return None
nvra = out.split('\n')[0]
return nvra
def pkg_to_dict(pkgstr):
if pkgstr.strip():
n,e,v,r,a,repo = pkgstr.split('|')
else:
return {'error_parsing': pkgstr}
d = {
'name':n,
'arch':a,
'epoch':e,
'release':r,
'version':v,
'repo':repo,
'nevra': '%s:%s-%s-%s.%s' % (e,n,v,r,a)
}
if repo == 'installed':
d['dnfstate'] = 'installed'
else:
d['dnfstate'] = 'available'
return d
def repolist(module, repoq, qf="%{repoid}"):
cmd = repoq + ["--qf", qf, "-a"]
rc,out,err = module.run_command(cmd)
ret = []
if rc == 0:
ret = set([ p for p in out.split('\n') if p.strip() ])
return ret
def list_stuff(module, conf_file, stuff):
qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|%{repoid}"
repoq = [repoquery, '--show-duplicates', '--plugins', '--quiet', '-q']
if conf_file and os.path.exists(conf_file):
repoq += ['-c', conf_file]
if stuff == 'installed':
return [ pkg_to_dict(p) for p in is_installed(module, repoq, '-a', conf_file, qf=qf) if p.strip() ]
elif stuff == 'updates':
return [ pkg_to_dict(p) for p in is_update(module, repoq, '-a', conf_file, qf=qf) if p.strip() ]
elif stuff == 'available':
return [ pkg_to_dict(p) for p in is_available(module, repoq, '-a', conf_file, qf=qf) if p.strip() ]
elif stuff == 'repos':
return [ dict(repoid=name, state='enabled') for name in repolist(module, repoq) if name.strip() ]
else:
return [ pkg_to_dict(p) for p in is_installed(module, repoq, stuff, conf_file, qf=qf) + is_available(module, repoq, stuff, conf_file, qf=qf) if p.strip() ]
def install(module, items, repoq, dnf_basecmd, conf_file, en_repos, dis_repos):
res = {}
res['results'] = []
res['msg'] = ''
res['rc'] = 0
res['changed'] = False
for spec in items:
pkg = None
# check if pkgspec is installed (if possible for idempotence)
# localpkg
if spec.endswith('.rpm') and '://' not in spec:
# get the pkg name-v-r.arch
if not os.path.exists(spec):
res['msg'] += "No Package file matching '%s' found on system" % spec
module.fail_json(**res)
nvra = local_nvra(module, spec)
# look for them in the rpmdb
if is_installed(module, repoq, nvra, conf_file, en_repos=en_repos, dis_repos=dis_repos):
# if they are there, skip it
continue
pkg = spec
# URL
elif '://' in spec:
pkg = spec
#groups :(
elif spec.startswith('@'):
# complete wild ass guess b/c it's a group
pkg = spec
# range requires or file-requires or pkgname :(
else:
# most common case is the pkg is already installed and done
# short circuit all the bs - and search for it as a pkg in is_installed
# if you find it then we're done
if not set(['*','?']).intersection(set(spec)):
pkgs = is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos, is_pkg=True)
if pkgs:
res['results'].append('%s providing %s is already installed' % (pkgs[0], spec))
continue
# look up what pkgs provide this
pkglist = what_provides(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos)
if not pkglist:
res['msg'] += "No Package matching '%s' found available, installed or updated" % spec
module.fail_json(**res)
# if any of the packages are involved in a transaction, fail now
# so that we don't hang on the dnf operation later
conflicts = transaction_exists(pkglist)
if len(conflicts) > 0:
res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
module.fail_json(**res)
# if any of them are installed
# then nothing to do
found = False
for this in pkglist:
if is_installed(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos, is_pkg=True):
found = True
res['results'].append('%s providing %s is already installed' % (this, spec))
break
# if the version of the pkg you have installed is not in ANY repo, but there are
# other versions in the repos (both higher and lower) then the previous checks won't work.
# so we check one more time. This really only works for pkgname - not for file provides or virt provides
# but virt provides should be all caught in what_provides on its own.
# highly irritating
if not found:
if is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos):
found = True
res['results'].append('package providing %s is already installed' % (spec))
if found:
continue
# if not - then pass in the spec as what to install
# we could get here if nothing provides it but that's not
# the error we're catching here
pkg = spec
cmd = dnf_basecmd + ['install', pkg]
if module.check_mode:
module.exit_json(changed=True)
changed = True
rc, out, err = module.run_command(cmd)
# Fail on invalid urls:
if (rc == 1 and '://' in spec and ('No package %s available.' % spec in out or 'Cannot open: %s. Skipping.' % spec in err)):
err = 'Package at %s could not be installed' % spec
module.fail_json(changed=False,msg=err,rc=1)
elif (rc != 0 and 'Nothing to do' in err) or 'Nothing to do' in out:
# avoid failing in the 'Nothing To Do' case
# this may happen with an URL spec.
# for an already installed group,
# we get rc = 0 and 'Nothing to do' in out, not in err.
rc = 0
err = ''
out = '%s: Nothing to do' % spec
changed = False
res['rc'] += rc
res['results'].append(out)
res['msg'] += err
# FIXME - if we did an install - go and check the rpmdb to see if it actually installed
# look for the pkg in rpmdb
# look for the pkg via obsoletes
# accumulate any changes
res['changed'] |= changed
module.exit_json(**res)
def remove(module, items, repoq, dnf_basecmd, conf_file, en_repos, dis_repos):
res = {}
res['results'] = []
res['msg'] = ''
res['changed'] = False
res['rc'] = 0
for pkg in items:
is_group = False
# group remove - this is doom on a stick
if pkg.startswith('@'):
is_group = True
else:
if not is_installed(module, repoq, pkg, conf_file, en_repos=en_repos, dis_repos=dis_repos):
res['results'].append('%s is not installed' % pkg)
continue
# run an actual dnf transaction
cmd = dnf_basecmd + ["remove", pkg]
if module.check_mode:
module.exit_json(changed=True)
rc, out, err = module.run_command(cmd)
res['rc'] += rc
res['results'].append(out)
res['msg'] += err
# compile the results into one batch. If anything is changed
# then mark changed
# at the end - if we've end up failed then fail out of the rest
# of the process
# at this point we should check to see if the pkg is no longer present
if not is_group: # we can't sensibly check for a group being uninstalled reliably
# look to see if the pkg shows up from is_installed. If it doesn't
if not is_installed(module, repoq, pkg, conf_file, en_repos=en_repos, dis_repos=dis_repos):
res['changed'] = True
else:
module.fail_json(**res)
if rc != 0:
module.fail_json(**res)
module.exit_json(**res)
def latest(module, items, repoq, dnf_basecmd, conf_file, en_repos, dis_repos):
res = {}
res['results'] = []
res['msg'] = ''
res['changed'] = False
res['rc'] = 0
for spec in items:
pkg = None
basecmd = 'update'
cmd = ''
# groups, again
if spec.startswith('@'):
pkg = spec
elif spec == '*': #update all
# use check-update to see if there is any need
rc,out,err = module.run_command(dnf_basecmd + ['check-update'])
if rc == 100:
cmd = dnf_basecmd + [basecmd]
else:
res['results'].append('All packages up to date')
continue
# dep/pkgname - find it
else:
if is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos):
basecmd = 'update'
else:
basecmd = 'install'
pkglist = what_provides(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos)
if not pkglist:
res['msg'] += "No Package matching '%s' found available, installed or updated" % spec
module.fail_json(**res)
nothing_to_do = True
for this in pkglist:
if basecmd == 'install' and is_available(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos):
nothing_to_do = False
break
if basecmd == 'update' and is_update(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=en_repos):
nothing_to_do = False
break
if nothing_to_do:
res['results'].append("All packages providing %s are up to date" % spec)
continue
# if any of the packages are involved in a transaction, fail now
# so that we don't hang on the dnf operation later
conflicts = transaction_exists(pkglist)
if len(conflicts) > 0:
res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
module.fail_json(**res)
pkg = spec
if not cmd:
cmd = dnf_basecmd + [basecmd, pkg]
if module.check_mode:
return module.exit_json(changed=True)
rc, out, err = module.run_command(cmd)
res['rc'] += rc
res['results'].append(out)
res['msg'] += err
# FIXME if it is - update it and check to see if it applied
# check to see if there is no longer an update available for the pkgspec
if rc:
res['failed'] = True
else:
res['changed'] = True
module.exit_json(**res)
def ensure(module, state, pkgspec, conf_file, enablerepo, disablerepo,
disable_gpg_check):
# take multiple args comma separated
items = pkgspec.split(',')
# need debug level 2 to get 'Nothing to do' for groupinstall.
dnf_basecmd = [dnfbin, '-d', '2', '-y']
if not repoquery:
repoq = None
else:
repoq = [repoquery, '--show-duplicates', '--plugins', '--quiet', '-q']
if conf_file and os.path.exists(conf_file):
dnf_basecmd += ['-c', conf_file]
if repoq:
repoq += ['-c', conf_file]
dis_repos =[]
en_repos = []
if disablerepo:
dis_repos = disablerepo.split(',')
if enablerepo:
en_repos = enablerepo.split(',')
for repoid in dis_repos:
r_cmd = ['--disablerepo=%s' % repoid]
dnf_basecmd.extend(r_cmd)
for repoid in en_repos:
r_cmd = ['--enablerepo=%s' % repoid]
dnf_basecmd.extend(r_cmd)
if state in ['installed', 'present', 'latest']:
my = dnf_base(conf_file)
try:
for r in dis_repos:
my.repos.disableRepo(r)
current_repos = dnf.yum.config.RepoConf()
for r in en_repos:
try:
my.repos.enableRepo(r)
new_repos = my.repos.repos.keys()
for i in new_repos:
if not i in current_repos:
rid = my.repos.getRepo(i)
a = rid.repoXML.repoid
current_repos = new_repos
except dnf.exceptions.Error, e:
module.fail_json(msg="Error setting/accessing repo %s: %s" % (r, e))
except dnf.exceptions.Error, e:
module.fail_json(msg="Error accessing repos: %s" % e)
if state in ['installed', 'present']:
if disable_gpg_check:
dnf_basecmd.append('--nogpgcheck')
install(module, items, repoq, dnf_basecmd, conf_file, en_repos, dis_repos)
elif state in ['removed', 'absent']:
remove(module, items, repoq, dnf_basecmd, conf_file, en_repos, dis_repos)
elif state == 'latest':
if disable_gpg_check:
dnf_basecmd.append('--nogpgcheck')
latest(module, items, repoq, dnf_basecmd, conf_file, en_repos, dis_repos)
# should be caught by AnsibleModule argument_spec
return dict(changed=False, failed=True, results='', errors='unexpected state')
def main():
# state=installed name=pkgspec
# state=removed name=pkgspec
# state=latest name=pkgspec
#
# informational commands:
# list=installed
# list=updates
# list=available
# list=repos
# list=pkgspec
module = AnsibleModule(
argument_spec = dict(
name=dict(aliases=['pkg']),
# removed==absent, installed==present, these are accepted as aliases
state=dict(default='installed', choices=['absent','present','installed','removed','latest']),
enablerepo=dict(),
disablerepo=dict(),
list=dict(),
conf_file=dict(default=None),
disable_gpg_check=dict(required=False, default="no", type='bool'),
# this should not be needed, but exists as a failsafe
install_repoquery=dict(required=False, default="yes", type='bool'),
),
required_one_of = [['name','list']],
mutually_exclusive = [['name','list']],
supports_check_mode = True
)
# this should not be needed, but exists as a failsafe
params = module.params
if params['install_repoquery'] and not repoquery and not module.check_mode:
install_dnf_utils(module)
if not repoquery:
module.fail_json(msg="repoquery is required to use this module at this time. Please install the yum-utils package.")
if params['list']:
results = dict(results=list_stuff(module, params['conf_file'], params['list']))
module.exit_json(**results)
else:
pkg = params['name']
state = params['state']
enablerepo = params.get('enablerepo', '')
disablerepo = params.get('disablerepo', '')
disable_gpg_check = params['disable_gpg_check']
res = ensure(module, state, pkg, params['conf_file'], enablerepo,
disablerepo, disable_gpg_check)
module.fail_json(msg="we should never get here unless this all failed", **res)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
valkjsaaa/sl4a | python/src/Lib/plat-mac/lib-scriptpackages/Finder/__init__.py | 73 | 8727 | """
Package generated from /System/Library/CoreServices/Finder.app
"""
from warnings import warnpy3k
warnpy3k("In 3.x, the Finder package is removed.", stacklevel=2)
import aetools
Error = aetools.Error
import Standard_Suite
import Legacy_suite
import Containers_and_folders
import Files
import Finder_Basics
import Finder_items
import Window_classes
import Type_Definitions
import Enumerations
_code_to_module = {
'CoRe' : Standard_Suite,
'fleg' : Legacy_suite,
'fndr' : Containers_and_folders,
'fndr' : Files,
'fndr' : Finder_Basics,
'fndr' : Finder_items,
'fndr' : Window_classes,
'tpdf' : Type_Definitions,
'tpnm' : Enumerations,
}
_code_to_fullname = {
'CoRe' : ('Finder.Standard_Suite', 'Standard_Suite'),
'fleg' : ('Finder.Legacy_suite', 'Legacy_suite'),
'fndr' : ('Finder.Containers_and_folders', 'Containers_and_folders'),
'fndr' : ('Finder.Files', 'Files'),
'fndr' : ('Finder.Finder_Basics', 'Finder_Basics'),
'fndr' : ('Finder.Finder_items', 'Finder_items'),
'fndr' : ('Finder.Window_classes', 'Window_classes'),
'tpdf' : ('Finder.Type_Definitions', 'Type_Definitions'),
'tpnm' : ('Finder.Enumerations', 'Enumerations'),
}
from Standard_Suite import *
from Legacy_suite import *
from Containers_and_folders import *
from Files import *
from Finder_Basics import *
from Finder_items import *
from Window_classes import *
from Type_Definitions import *
from Enumerations import *
def getbaseclasses(v):
if not getattr(v, '_propdict', None):
v._propdict = {}
v._elemdict = {}
for superclassname in getattr(v, '_superclassnames', []):
superclass = eval(superclassname)
getbaseclasses(superclass)
v._propdict.update(getattr(superclass, '_propdict', {}))
v._elemdict.update(getattr(superclass, '_elemdict', {}))
v._propdict.update(getattr(v, '_privpropdict', {}))
v._elemdict.update(getattr(v, '_privelemdict', {}))
import StdSuites
#
# Set property and element dictionaries now that all classes have been defined
#
getbaseclasses(StdSuites.Type_Names_Suite.small_integer)
getbaseclasses(StdSuites.Type_Names_Suite.system_dictionary)
getbaseclasses(StdSuites.Type_Names_Suite.color_table)
getbaseclasses(StdSuites.Type_Names_Suite.fixed_point)
getbaseclasses(StdSuites.Type_Names_Suite.string)
getbaseclasses(StdSuites.Type_Names_Suite.type_element_info)
getbaseclasses(StdSuites.Type_Names_Suite.machine_location)
getbaseclasses(StdSuites.Type_Names_Suite.PostScript_picture)
getbaseclasses(StdSuites.Type_Names_Suite.type_property_info)
getbaseclasses(StdSuites.Type_Names_Suite.menu_item)
getbaseclasses(StdSuites.Type_Names_Suite.scrap_styles)
getbaseclasses(StdSuites.Type_Names_Suite.fixed_rectangle)
getbaseclasses(StdSuites.Type_Names_Suite.null)
getbaseclasses(StdSuites.Type_Names_Suite.type_event_info)
getbaseclasses(StdSuites.Type_Names_Suite.rotation)
getbaseclasses(StdSuites.Type_Names_Suite.long_fixed_rectangle)
getbaseclasses(StdSuites.Type_Names_Suite.long_point)
getbaseclasses(StdSuites.Type_Names_Suite.target_id)
getbaseclasses(StdSuites.Type_Names_Suite.type_suite_info)
getbaseclasses(StdSuites.Type_Names_Suite.type_parameter_info)
getbaseclasses(StdSuites.Type_Names_Suite.long_fixed_point)
getbaseclasses(StdSuites.Type_Names_Suite.bounding_rectangle)
getbaseclasses(StdSuites.Type_Names_Suite.TIFF_picture)
getbaseclasses(StdSuites.Type_Names_Suite.long_fixed)
getbaseclasses(StdSuites.Type_Names_Suite.version)
getbaseclasses(StdSuites.Type_Names_Suite.RGB16_color)
getbaseclasses(StdSuites.Type_Names_Suite.double_integer)
getbaseclasses(StdSuites.Type_Names_Suite.location_reference)
getbaseclasses(StdSuites.Type_Names_Suite.point)
getbaseclasses(StdSuites.Type_Names_Suite.application_dictionary)
getbaseclasses(StdSuites.Type_Names_Suite.unsigned_integer)
getbaseclasses(StdSuites.Type_Names_Suite.menu)
getbaseclasses(StdSuites.Type_Names_Suite.small_real)
getbaseclasses(StdSuites.Type_Names_Suite.fixed)
getbaseclasses(StdSuites.Type_Names_Suite.type_class_info)
getbaseclasses(StdSuites.Type_Names_Suite.RGB96_color)
getbaseclasses(StdSuites.Type_Names_Suite.dash_style)
getbaseclasses(StdSuites.Type_Names_Suite.pixel_map_record)
getbaseclasses(StdSuites.Type_Names_Suite.extended_real)
getbaseclasses(StdSuites.Type_Names_Suite.long_rectangle)
getbaseclasses(process)
getbaseclasses(application_process)
getbaseclasses(desk_accessory_process)
getbaseclasses(application)
getbaseclasses(trash_2d_object)
getbaseclasses(desktop_2d_object)
getbaseclasses(container)
getbaseclasses(folder)
getbaseclasses(disk)
getbaseclasses(application)
getbaseclasses(alias_file)
getbaseclasses(package)
getbaseclasses(file)
getbaseclasses(application_file)
getbaseclasses(internet_location_file)
getbaseclasses(document_file)
getbaseclasses(clipping)
getbaseclasses(preferences_window)
getbaseclasses(Finder_window)
getbaseclasses(window)
getbaseclasses(clipping_window)
getbaseclasses(information_window)
getbaseclasses(item)
getbaseclasses(icon_view_options)
getbaseclasses(preferences)
getbaseclasses(alias_list)
getbaseclasses(icon_family)
getbaseclasses(label)
getbaseclasses(column)
getbaseclasses(list_view_options)
#
# Indices of types declared in this module
#
_classdeclarations = {
'shor' : StdSuites.Type_Names_Suite.small_integer,
'aeut' : StdSuites.Type_Names_Suite.system_dictionary,
'clrt' : StdSuites.Type_Names_Suite.color_table,
'fpnt' : StdSuites.Type_Names_Suite.fixed_point,
'TEXT' : StdSuites.Type_Names_Suite.string,
'elin' : StdSuites.Type_Names_Suite.type_element_info,
'mLoc' : StdSuites.Type_Names_Suite.machine_location,
'EPS ' : StdSuites.Type_Names_Suite.PostScript_picture,
'pinf' : StdSuites.Type_Names_Suite.type_property_info,
'cmen' : StdSuites.Type_Names_Suite.menu_item,
'styl' : StdSuites.Type_Names_Suite.scrap_styles,
'frct' : StdSuites.Type_Names_Suite.fixed_rectangle,
'null' : StdSuites.Type_Names_Suite.null,
'evin' : StdSuites.Type_Names_Suite.type_event_info,
'trot' : StdSuites.Type_Names_Suite.rotation,
'lfrc' : StdSuites.Type_Names_Suite.long_fixed_rectangle,
'lpnt' : StdSuites.Type_Names_Suite.long_point,
'targ' : StdSuites.Type_Names_Suite.target_id,
'suin' : StdSuites.Type_Names_Suite.type_suite_info,
'pmin' : StdSuites.Type_Names_Suite.type_parameter_info,
'lfpt' : StdSuites.Type_Names_Suite.long_fixed_point,
'qdrt' : StdSuites.Type_Names_Suite.bounding_rectangle,
'TIFF' : StdSuites.Type_Names_Suite.TIFF_picture,
'lfxd' : StdSuites.Type_Names_Suite.long_fixed,
'vers' : StdSuites.Type_Names_Suite.version,
'tr16' : StdSuites.Type_Names_Suite.RGB16_color,
'comp' : StdSuites.Type_Names_Suite.double_integer,
'insl' : StdSuites.Type_Names_Suite.location_reference,
'QDpt' : StdSuites.Type_Names_Suite.point,
'aete' : StdSuites.Type_Names_Suite.application_dictionary,
'magn' : StdSuites.Type_Names_Suite.unsigned_integer,
'cmnu' : StdSuites.Type_Names_Suite.menu,
'sing' : StdSuites.Type_Names_Suite.small_real,
'fixd' : StdSuites.Type_Names_Suite.fixed,
'gcli' : StdSuites.Type_Names_Suite.type_class_info,
'tr96' : StdSuites.Type_Names_Suite.RGB96_color,
'tdas' : StdSuites.Type_Names_Suite.dash_style,
'tpmm' : StdSuites.Type_Names_Suite.pixel_map_record,
'exte' : StdSuites.Type_Names_Suite.extended_real,
'lrct' : StdSuites.Type_Names_Suite.long_rectangle,
'prcs' : process,
'pcap' : application_process,
'pcda' : desk_accessory_process,
'capp' : application,
'ctrs' : trash_2d_object,
'cdsk' : desktop_2d_object,
'ctnr' : container,
'cfol' : folder,
'cdis' : disk,
'capp' : application,
'alia' : alias_file,
'pack' : package,
'file' : file,
'appf' : application_file,
'inlf' : internet_location_file,
'docf' : document_file,
'clpf' : clipping,
'pwnd' : preferences_window,
'brow' : Finder_window,
'cwin' : window,
'lwnd' : clipping_window,
'iwnd' : information_window,
'cobj' : item,
'icop' : icon_view_options,
'cprf' : preferences,
'alst' : alias_list,
'ifam' : icon_family,
'clbl' : label,
'lvcl' : column,
'lvop' : list_view_options,
}
class Finder(Standard_Suite_Events,
Legacy_suite_Events,
Containers_and_folders_Events,
Files_Events,
Finder_Basics_Events,
Finder_items_Events,
Window_classes_Events,
Type_Definitions_Events,
Enumerations_Events,
aetools.TalkTo):
_signature = 'MACS'
_moduleName = 'Finder'
_elemdict = application._elemdict
_propdict = application._propdict
| apache-2.0 |
ryandougherty/mwa-capstone | MWA_Tools/build/lib/mwapy/ft_beam.py | 1 | 8699 | #from mwapy import ft_beam
# Generate automatic calibration model and form a bandpass solution
# Requires pywcs-1.9-4.4.4 and numpy-1.7.0 or numpy-1.6.2 installed into casapy
# You can do this by installing PAPERcasa, and using 'casapython' to install the modules
# Natasha Hurley-Walker 10/07/2013
# Updated 08/08/2013 to scale the YY and XX beams separately
# Updated 01/10/2013 Use the field name as the calibrator name if the calibrator wasn't filled in properly during scheduling
# Updated 21/11/2013 Added sub-calibrators to complex fields (but didn't find much improvement)
# Updated 02/12/2013 Added a spectral beam option; turned subcalibrators off by default
import mwapy, subprocess, re
import mwapy.get_observation_info
from mwapy.obssched.base import schedule
import numpy as n,os,sys,shutil
try:
import pyfits
except:
import astropy.io.fits as pyfits
from mwapy.pb import make_beam
import mwapy
from taskinit import *
import tasks
# Attempte to autoset directories
# Model location:
if not os.environ.has_key('MWA_CODE_BASE'):
print '$MWA_CODE_BASE not set: do not know where models are'
raise KeyboardInterrupt
modeldir=os.environ['MWA_CODE_BASE']+'/MWA_Tools/Models/'
if not os.path.exists(modeldir):
print 'Model directory %s does not exist' % modeldir
raise KeyboardInterrupt
try:
db=schedule.getdb()
except:
print 'Unable to open connection to database'
raise KeyboardInterrupt
def ft_beam(vis=None,refant='Tile012',clobber=True,
spectral_beam=False,
subcalibrator=False,uvrange='>0.03klambda'):
"""
def ft_beam(vis=None,refant='Tile012',clobber=True,
spectral_beam=False,
subcalibrator=False,uvrange='>0.03klambda'):
# Reference antenna
refant='Tile012'
# Overwrite files
clobber=True
# Option to include the spectral index of the primary beam
spectral_beam=False
# Option to add more sources to the field
"""
# output calibration solution
caltable=re.sub('ms','cal',vis)
if vis is None or len(vis)==0 or not os.path.exists(vis):
print 'Input visibility must be defined'
return None
# Get the frequency information of the measurement set
ms.open(vis)
rec = ms.getdata(['axis_info'])
df,f0 = (rec['axis_info']['freq_axis']['resolution'][len(rec['axis_info']['freq_axis']['resolution'])/2],rec['axis_info']['freq_axis']['chan_freq'][len(rec['axis_info']['freq_axis']['resolution'])/2])
F =rec['axis_info']['freq_axis']['chan_freq'].squeeze()/1e6
df=df[0]*len(rec['axis_info']['freq_axis']['resolution'])
f0=f0[0]
rec_time=ms.getdata(['time'])
sectime=qa.quantity(rec_time['time'][0],unitname='s')
midfreq=f0
bandwidth=df
if isinstance(qa.time(sectime,form='fits'),list):
dateobs=qa.time(sectime,form='fits')[0]
else:
dateobs=qa.time(sectime,form='fits')
if spectral_beam:
# Start and end of the channels so we can make the spectral beam image
startfreq=f0-df/2
endfreq=f0+df/2
freq_array=[midfreq,startfreq,endfreq]
else:
freq_array=[midfreq]
# Get observation number directly from the measurement set
tb.open(vis+'/OBSERVATION')
obsnum=int(tb.getcol('MWA_GPS_TIME'))
tb.close
info=mwapy.get_observation_info.MWA_Observation(obsnum,db=db)
print 'Retrieved observation info for %d...\n%s\n' % (obsnum,info)
# Calibrator information
if info.calibration:
calibrator=info.calibrators
else:
# Observation wasn't scheduled properly so calibrator field is missing: try parsing the fieldname
# assuming it's something like 3C444_81
calibrator=info.filename.rsplit('_',1)[0]
print 'Calibrator is %s...' % calibrator
# subcalibrators not yet improving the calibration, probably due to poor beam model
if subcalibrator and calibrator=='PKS0408-65':
subcalibrator='PKS0410-75'
elif subcalibrator and calibrator=='HerA':
subcalibrator='3C353'
else:
subcalibrator=False
# Start models are 150MHz Jy/pixel fits files in a known directory
model=modeldir+calibrator+'.fits'
# With a corresponding spectral index map
spec_index=modeldir+calibrator+'_spec_index.fits'
if not os.path.exists(model):
print 'Could not find calibrator model %s' % model
return None
# Generate the primary beam
delays=info.delays
str_delays=','.join(map(str,delays))
print 'Delays are: %s' % str_delays
# load in the model FITS file as a template for later
ftemplate=pyfits.open(model)
# do this for the start, middle, and end frequencies
for freq in freq_array:
freqstring=str(freq/1.0e6) + 'MHz'
# We'll generate images in the local directory at the right frequency for this ms
outname=calibrator+'_'+freqstring
outnt2=calibrator+'_'+freqstring+'_nt2'
# import model, edit header so make_beam generates the right beam in the right place
if os.path.exists(outname + '.fits') and clobber:
os.remove(outname + '.fits')
shutil.copy(model,outname + '.fits')
fp=pyfits.open(outname + '.fits','update')
fp[0].header['CRVAL3']=freq
fp[0].header['CDELT3']=bandwidth
fp[0].header['DATE-OBS']=dateobs
fp.flush()
print 'Creating primary beam models...'
beamarray=make_beam.make_beam(outname + '.fits',
delays=delays)
# delete the temporary model
os.remove(outname + '.fits')
beamimage={}
for stokes in ['XX','YY']:
beamimage[stokes]=calibrator + '_' + freqstring + '_beam' + stokes + '.fits'
# scale by the primary beam
# Correct way of doing this is to generate separate models for XX and YY
# Unfortunately, ft doesn't really understand cubes
# So instead we just use the XX model, and then scale the YY solution later
freq=midfreq
freqstring=str(freq/1.0e6)+'MHz'
outname=calibrator+'_'+freqstring
outnt2=calibrator+'_'+freqstring+'_nt2'
# divide to make a ratio beam, so we know how to scale the YY solution later
fbeamX=pyfits.open(beamimage['XX'])
fbeamY=pyfits.open(beamimage['YY'])
ratiovalue=(fbeamX[0].data/fbeamY[0].data).mean()
print 'Found <XX/YY>=%.2f' % ratiovalue
# Models are at 150MHz
# Generate scaled image at correct frequency
if os.path.exists(outname + '.fits') and clobber:
os.remove(outname + '.fits')
# Hardcoded to use the XX beam in the model
fbeam=fbeamX
fmodel=pyfits.open(model)
fspec_index=pyfits.open(spec_index)
ftemplate[0].data=fbeam[0].data * fmodel[0].data/((150000000/f0)**(fspec_index[0].data))
ftemplate[0].header['CRVAL3']=freq
ftemplate[0].header['CDELT3']=bandwidth
ftemplate[0].header['DATE-OBS']=dateobs
ftemplate[0].header['CRVAL4']=1
ftemplate.writeto(outname + '.fits')
print 'Wrote scaled model to %s' % (outname + '.fits')
foutname=pyfits.open(outname + '.fits')
# Generate 2nd Taylor term
if os.path.exists(outnt2 + '.fits') and clobber:
os.remove(outnt2 + '.fits')
if spectral_beam:
# Generate spectral image of the beam
fcalstart=pyfits.open(calibrator+'_'+str(startfreq/1.0e6)+'MHz_beamXX.fits')
fcalend=pyfits.open(calibrator+'_'+str(endfreq/1.0e6)+'MHz_beamXX.fits')
ftemplate[0].data=(n.log(fcalstart[0].data/fcalend[0].data)/
n.log((f0-df/2)/(f0+df/2)))
beam_spec='%s_%sMHz--%sMHz_beamXX.fits' % (calibrator,
str(startfreq/1.0e6),
str(endfreq/1.0e6))
if os.path.exists(beam_spec):
os.remove(beam_spec)
ftemplate.writeto(beam_spec)
fbeam_spec=pyfits.open(beam_spec)
ftemplate[0].data=foutname[0].data * fbeam[0].data * (fspec_index[0].data+fbeam_spec[0].data)
else:
ftemplate[0].data=foutname[0].data * fbeam[0].data * fspec_index[0].data
ftemplate[0].header['DATE-OBS']=dateobs
ftemplate.writeto(outnt2 + '.fits')
print 'Wrote scaled Taylor term to %s' % (outnt2 + '.fits')
# import as CASA images
if os.path.exists(outname + '.im') and clobber:
tasks.rmtables(outname + '.im')
if os.path.exists(outnt2 + '.im') and clobber:
tasks.rmtables(outnt2 + '.im')
tasks.importfits(outname + '.fits',outname + '.im')
tasks.importfits(outnt2 + '.fits',outnt2 + '.im')
print 'Fourier transforming model...'
tasks.ft(vis=vis,model=[outname + '.im',outnt2+'.im'],
nterms=2,usescratch=True)
print 'Calibrating...'
tasks.bandpass(vis=vis,caltable=caltable,refant=refant,uvrange=uvrange)
print 'Scaling YY solutions by beam ratio...'
# Scale YY solution by the ratio
tb.open(caltable)
G = tb.getcol('CPARAM')
tb.close()
new_gains = n.empty(shape=G.shape, dtype=n.complex128)
# XX gains stay the same
new_gains[0,:,:]=G[0,:,:]
# YY gains are scaled
new_gains[1,:,:]=ratiovalue*G[1,:,:]
tb.open(caltable,nomodify=False)
tb.putcol('CPARAM',new_gains)
tb.putkeyword('MODEL',model)
tb.putkeyword('SPECINDX',spec_index)
tb.putkeyword('BMRATIO',ratiovalue)
try:
tb.putkeyword('MWAVER',mwapy.__version__)
except:
pass
tb.close()
print 'Created %s!' % caltable
return caltable
#ft_beam.ft_beam(vis=vis)
#caltable=ft_beam(vis=vis)
| gpl-2.0 |
AZed/duplicity | duplicity/globals.py | 2 | 7347 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2002 Ben Escoto <ben@emerose.org>
# Copyright 2007 Kenneth Loafman <kenneth@loafman.com>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Store global configuration information"""
import socket, os
# The current version of duplicity
version = "0.6.21"
# Default file_prefix value
file_prefix = ""
# The name of the current host, or None if it cannot be set
hostname = socket.getfqdn()
# The main local path. For backing up the is the path to be backed
# up. For restoring, this is the destination of the restored files.
local_path = None
# The symbolic name of the backup being operated upon.
backup_name = None
# For testing -- set current time
current_time = None
# Set to the Path of the archive directory (the directory which
# contains the signatures and manifests of the relevent backup
# collection), and for checkpoint state between volumes.
# NOTE: this gets expanded in duplicity.commandline
os.environ["XDG_CACHE_HOME"] = os.getenv("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
archive_dir = os.path.expandvars("$XDG_CACHE_HOME/duplicity")
# config dir for future use
os.environ["XDG_CONFIG_HOME"] = os.getenv("XDG_CONFIG_HOME", os.path.expanduser("~/.config"))
config_dir = os.path.expandvars("$XDG_CONFIG_HOME/duplicity")
# Restores will try to bring back the state as of the following time.
# If it is None, default to current time.
restore_time = None
# If set, restore only the subdirectory or file specified, not the
# whole root.
restore_dir = None
# The backend representing the remote side
backend = None
# If set, the Select object which iterates paths in the local
# source directory.
select = None
# Set to GPGProfile that will be used to compress/uncompress encrypted
# files. Replaces encryption_keys, sign_key, and passphrase settings.
gpg_profile = None
# Options to pass to gpg
gpg_options = ''
# If true, filelists and directory statistics will be split on
# nulls instead of newlines.
null_separator = None
# number of retries on network operations
num_retries = 5
# True if Pydev debugger should be activated
pydevd = False
# Character used like the ":" in time strings like
# 2002-08-06T04:22:00-07:00. The colon isn't good for filenames on
# windows machines.
time_separator = ":"
# If this is true, only warn and don't raise fatal error when backup
# source directory doesn't match previous backup source directory.
allow_source_mismatch = None
# If set, abort if cannot do an incremental backup. Otherwise if
# signatures not found, default to full.
incremental = None
# If set, print the statistics after every backup session
print_statistics = True
# If set, use short (< 30 char) filenames for all the remote files.
short_filenames = False
# If set, forces a full backup if the last full backup is older than
# the time specified
full_force_time = None
# Used to confirm certain destructive operations like deleting old files.
force = None
# If set, signifies time in seconds before which backup files should
# be deleted.
remove_time = None
# If set, signifies the number of backups chains to keep when performing
# a remove-all-but-n-full.
keep_chains = None
# If set, signifies that remove-all-but-n-full in progress
remove_all_but_n_full_mode = None
# If set, signifies that remove-all-inc-of-but-n-full in progress (variant of remove-all-but-n-full)
remove_all_inc_of_but_n_full_mode = None
# Don't actually do anything, but still report what would be done
dry_run = False
# If set to false, then do not encrypt files on remote system
encryption = True
# If set to false, then do not compress files on remote system
compression = True
# volume size. default 25M
volsize = 25*1024*1024
# Working directory for the tempfile module. Defaults to /tmp on most systems.
temproot = None
# network timeout value
timeout = 30
# FTP data connection type
ftp_connection = 'passive'
# Protocol for webdav
webdav_proto = 'http'
# Asynchronous put/get concurrency limit
# (default of 0 disables asynchronicity).
async_concurrency = 0
# Whether to use "new-style" subdomain addressing for S3 buckets. Such
# use is not backwards-compatible with upper-case buckets, or buckets
# that are otherwise not expressable in a valid hostname.
s3_use_new_style = False
# Whether to create European buckets (sorry, hard-coded to only
# support european for now).
s3_european_buckets = False
# File owner uid keeps number from tar file. Like same option in GNU tar.
numeric_owner = False
# Whether to use plain HTTP (without SSL) to send data to S3
# See <https://bugs.launchpad.net/duplicity/+bug/433970>.
s3_unencrypted_connection = False
# Whether to use S3 Reduced Redudancy Storage
s3_use_rrs = False
# True if we should use boto multiprocessing version
s3_use_multiprocessing = False
# Chunk size used for S3 multipart uploads.The number of parallel uploads to
# S3 be given by chunk size / volume size. Use this to maximize the use of
# your bandwidth. Defaults to 25MB
s3_multipart_chunk_size = 25*1024*1024
# Minimum chunk size accepted by S3
s3_multipart_minimum_chunk_size = 5*1024*1024
# Whether to use the full email address as the user name when
# logging into an imap server. If false just the user name
# part of the email address is used.
imap_full_address = False
# Name of the imap folder where we want to store backups.
# Can be changed with a command line argument.
imap_mailbox = "INBOX"
# Whether the old filename format is in effect.
old_filenames = False
# Wheter to specify --use-agent in GnuPG options
use_agent = False
# ssh commands to use, used by ssh_pexpect (defaults to sftp, scp)
scp_command = None
sftp_command = None
# default to batch mode using public-key encryption
ssh_askpass = False
# default ssh backend is paramiko
ssh_backend = "paramiko"
# user added ssh options
ssh_options = ""
# whether to use scp for put/get, sftp is default
use_scp = False
# HTTPS ssl optons (currently only webdav)
ssl_cacert_file = None
ssl_no_check_certificate = False
# user added rsync options
rsync_options = ""
# will be a Restart object if restarting
restart = None
# used in testing only - raises exception after volume
fail_on_volume = 0
# used in testing only - skips uploading a particular volume
skip_volume = 0
# ignore (some) errors during operations; supposed to make it more
# likely that you are able to restore data under problematic
# circumstances. the default should absolutely always be True unless
# you know what you are doing.
ignore_errors = False
# If we should be particularly aggressive when cleaning up
extra_clean = False
# Renames (--rename)
rename = {}
| gpl-2.0 |
barachka/odoo | addons/account_analytic_plans/wizard/account_crossovered_analytic.py | 341 | 2972 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_crossovered_analytic(osv.osv_memory):
_name = "account.crossovered.analytic"
_description = "Print Crossovered Analytic"
_columns = {
'date1': fields.date('Start Date', required=True),
'date2': fields.date('End Date', required=True),
'journal_ids': fields.many2many('account.analytic.journal', 'crossovered_journal_rel', 'crossover_id', 'journal_id', 'Analytic Journal'),
'ref': fields.many2one('account.analytic.account', 'Analytic Account Reference', required=True),
'empty_line': fields.boolean('Dont show empty lines'),
}
_defaults = {
'date1': lambda *a: time.strftime('%Y-01-01'),
'date2': lambda *a: time.strftime('%Y-%m-%d'),
}
def print_report(self, cr, uid, ids, context=None):
cr.execute('SELECT account_id FROM account_analytic_line')
res = cr.fetchall()
acc_ids = [x[0] for x in res]
data = self.read(cr, uid, ids, context=context)[0]
data['ref'] = data['ref'][0]
obj_acc = self.pool.get('account.analytic.account').browse(cr, uid, data['ref'], context=context)
name = obj_acc.name
account_ids = self.pool.get('account.analytic.account').search(cr, uid, [('parent_id', 'child_of', [data['ref']])], context=context)
flag = True
for acc in account_ids:
if acc in acc_ids:
flag = False
break
if flag:
raise osv.except_osv(_('User Error!'),_('There are no analytic lines related to account %s.' % name))
datas = {
'ids': [],
'model': 'account.analytic.account',
'form': data
}
return self.pool['report'].get_action(cr, uid, [], 'account_analytic_plans.report_crossoveredanalyticplans', data=datas, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
eeshangarg/oh-mainline | vendor/packages/Django/django/contrib/gis/db/backends/spatialite/base.py | 113 | 3465 | from ctypes.util import find_library
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.sqlite3.base import (
_sqlite_extract, _sqlite_date_trunc, _sqlite_regexp, _sqlite_format_dtdelta,
connection_created, Database, DatabaseWrapper as SQLiteDatabaseWrapper,
SQLiteCursorWrapper)
from django.contrib.gis.db.backends.spatialite.client import SpatiaLiteClient
from django.contrib.gis.db.backends.spatialite.creation import SpatiaLiteCreation
from django.contrib.gis.db.backends.spatialite.introspection import SpatiaLiteIntrospection
from django.contrib.gis.db.backends.spatialite.operations import SpatiaLiteOperations
class DatabaseWrapper(SQLiteDatabaseWrapper):
def __init__(self, *args, **kwargs):
# Before we get too far, make sure pysqlite 2.5+ is installed.
if Database.version_info < (2, 5, 0):
raise ImproperlyConfigured('Only versions of pysqlite 2.5+ are '
'compatible with SpatiaLite and GeoDjango.')
# Trying to find the location of the SpatiaLite library.
# Here we are figuring out the path to the SpatiaLite library
# (`libspatialite`). If it's not in the system library path (e.g., it
# cannot be found by `ctypes.util.find_library`), then it may be set
# manually in the settings via the `SPATIALITE_LIBRARY_PATH` setting.
self.spatialite_lib = getattr(settings, 'SPATIALITE_LIBRARY_PATH',
find_library('spatialite'))
if not self.spatialite_lib:
raise ImproperlyConfigured('Unable to locate the SpatiaLite library. '
'Make sure it is in your library path, or set '
'SPATIALITE_LIBRARY_PATH in your settings.'
)
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.ops = SpatiaLiteOperations(self)
self.client = SpatiaLiteClient(self)
self.creation = SpatiaLiteCreation(self)
self.introspection = SpatiaLiteIntrospection(self)
def _cursor(self):
if self.connection is None:
self._sqlite_create_connection()
## From here on, customized for GeoDjango ##
# Enabling extension loading on the SQLite connection.
try:
self.connection.enable_load_extension(True)
except AttributeError:
raise ImproperlyConfigured('The pysqlite library does not support C extension loading. '
'Both SQLite and pysqlite must be configured to allow '
'the loading of extensions to use SpatiaLite.'
)
# Loading the SpatiaLite library extension on the connection, and returning
# the created cursor.
cur = self.connection.cursor(factory=SQLiteCursorWrapper)
try:
cur.execute("SELECT load_extension(%s)", (self.spatialite_lib,))
except Exception as msg:
raise ImproperlyConfigured('Unable to load the SpatiaLite library extension '
'"%s" because: %s' % (self.spatialite_lib, msg))
return cur
else:
return self.connection.cursor(factory=SQLiteCursorWrapper)
| agpl-3.0 |
fabioticconi/scikit-learn | sklearn/utils/setup.py | 296 | 2884 | import os
from os.path import join
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
cblas_libs, blas_info = get_blas_info()
cblas_compile_args = blas_info.pop('extra_compile_args', [])
cblas_includes = [join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])]
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('sparsefuncs_fast', sources=['sparsefuncs_fast.c'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.c'],
depends=[join('src', 'cholesky_delete.h')],
libraries=cblas_libs,
include_dirs=cblas_includes,
extra_compile_args=cblas_compile_args,
**blas_info
)
config.add_extension(
'murmurhash',
sources=['murmurhash.c', join('src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.c', join('src', 'gamma.c')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.c'],
include_dirs=[numpy.get_include()])
config.add_extension('fast_dict',
sources=['fast_dict.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('seq_dataset',
sources=['seq_dataset.c'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.c'],
include_dirs=cblas_includes,
libraries=cblas_libs,
**blas_info)
config.add_extension("_random",
sources=["_random.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
mezgani/gomoz | Gomoz/gui/start.py | 2 | 1269 | import wx, interwin, time
class SplashApp(wx.App):
def OnInit(self):
self.count=0
"""for i in range(11,34):
req1="bmp = wx.Image(\"Gomoz/image/splash%s.png\").ConvertToBitmap()" % i
req2="self.spl%s=wx.SplashScreen(bmp, wx.SPLASH_CENTRE_ON_SCREEN | wx.SPLASH_TIMEOUT,4000, None, -1)" % i
exec(req1)
exec(req2)
time.sleep(0.2)
"""
#this="self.gauge%s = wx.Gauge(self.spl%s, -1, 100, (0, 264), (400, 25))" % (i,i)
#exec(this)
#face="self.gauge%s.SetBezelFace(3)" % i
#shadow="self.gauge%s.SetShadowWidth(3)" % i
#exec(face)
#exec(shadow)
#self.Bind(wx.EVT_IDLE, self.OnIdle)
#self.count = self.count + 5
#req="self.gauge%s.SetValue(self.count)" % i
#exec(req)
#time.sleep(1.2)
wx.Yield()
frame = interwin.InterGomoz(None, -1, "")
frame.SetSize((900,550))
frame.SetPosition((100,100))
frame.Show(True)
#frame.SetBackgroundColour('black')
self.SetTopWindow(frame)
frame.Show()
return True
| gpl-3.0 |
andaag/scikit-learn | sklearn/externals/joblib/format_stack.py | 238 | 15536 | """
Represent an exception with a lot of information.
Provides 2 useful functions:
format_exc: format an exception into a complete traceback, with full
debugging instruction.
format_outer_frames: format the current position in the stack call.
Adapted from IPython's VerboseTB.
"""
# Authors: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Nathaniel Gray <n8gray@caltech.edu>
# Fernando Perez <fperez@colorado.edu>
# Copyright: 2010, Gael Varoquaux
# 2001-2004, Fernando Perez
# 2001 Nathaniel Gray
# License: BSD 3 clause
import inspect
import keyword
import linecache
import os
import pydoc
import sys
import time
import tokenize
import traceback
import types
try: # Python 2
generate_tokens = tokenize.generate_tokens
except AttributeError: # Python 3
generate_tokens = tokenize.tokenize
PY3 = (sys.version[0] == '3')
INDENT = ' ' * 8
from ._compat import _basestring
###############################################################################
# some internal-use functions
def safe_repr(value):
"""Hopefully pretty robust repr equivalent."""
# this is pretty horrible but should always return *something*
try:
return pydoc.text.repr(value)
except KeyboardInterrupt:
raise
except:
try:
return repr(value)
except KeyboardInterrupt:
raise
except:
try:
# all still in an except block so we catch
# getattr raising
name = getattr(value, '__name__', None)
if name:
# ick, recursion
return safe_repr(name)
klass = getattr(value, '__class__', None)
if klass:
return '%s instance' % safe_repr(klass)
except KeyboardInterrupt:
raise
except:
return 'UNRECOVERABLE REPR FAILURE'
def eq_repr(value, repr=safe_repr):
return '=%s' % repr(value)
###############################################################################
def uniq_stable(elems):
"""uniq_stable(elems) -> list
Return from an iterable, a list of all the unique elements in the input,
but maintaining the order in which they first appear.
A naive solution to this problem which just makes a dictionary with the
elements as keys fails to respect the stability condition, since
dictionaries are unsorted by nature.
Note: All elements in the input must be hashable.
"""
unique = []
unique_set = set()
for nn in elems:
if nn not in unique_set:
unique.append(nn)
unique_set.add(nn)
return unique
###############################################################################
def fix_frame_records_filenames(records):
"""Try to fix the filenames in each record from inspect.getinnerframes().
Particularly, modules loaded from within zip files have useless filenames
attached to their code object, and inspect.getinnerframes() just uses it.
"""
fixed_records = []
for frame, filename, line_no, func_name, lines, index in records:
# Look inside the frame's globals dictionary for __file__, which should
# be better.
better_fn = frame.f_globals.get('__file__', None)
if isinstance(better_fn, str):
# Check the type just in case someone did something weird with
# __file__. It might also be None if the error occurred during
# import.
filename = better_fn
fixed_records.append((frame, filename, line_no, func_name, lines,
index))
return fixed_records
def _fixed_getframes(etb, context=1, tb_offset=0):
LNUM_POS, LINES_POS, INDEX_POS = 2, 4, 5
records = fix_frame_records_filenames(inspect.getinnerframes(etb, context))
# If the error is at the console, don't build any context, since it would
# otherwise produce 5 blank lines printed out (there is no file at the
# console)
rec_check = records[tb_offset:]
try:
rname = rec_check[0][1]
if rname == '<ipython console>' or rname.endswith('<string>'):
return rec_check
except IndexError:
pass
aux = traceback.extract_tb(etb)
assert len(records) == len(aux)
for i, (file, lnum, _, _) in enumerate(aux):
maybeStart = lnum - 1 - context // 2
start = max(maybeStart, 0)
end = start + context
lines = linecache.getlines(file)[start:end]
# pad with empty lines if necessary
if maybeStart < 0:
lines = (['\n'] * -maybeStart) + lines
if len(lines) < context:
lines += ['\n'] * (context - len(lines))
buf = list(records[i])
buf[LNUM_POS] = lnum
buf[INDEX_POS] = lnum - 1 - start
buf[LINES_POS] = lines
records[i] = tuple(buf)
return records[tb_offset:]
def _format_traceback_lines(lnum, index, lines, lvals=None):
numbers_width = 7
res = []
i = lnum - index
for line in lines:
if i == lnum:
# This is the line with the error
pad = numbers_width - len(str(i))
if pad >= 3:
marker = '-' * (pad - 3) + '-> '
elif pad == 2:
marker = '> '
elif pad == 1:
marker = '>'
else:
marker = ''
num = marker + str(i)
else:
num = '%*s' % (numbers_width, i)
line = '%s %s' % (num, line)
res.append(line)
if lvals and i == lnum:
res.append(lvals + '\n')
i = i + 1
return res
def format_records(records): # , print_globals=False):
# Loop over all records printing context and info
frames = []
abspath = os.path.abspath
for frame, file, lnum, func, lines, index in records:
try:
file = file and abspath(file) or '?'
except OSError:
# if file is '<console>' or something not in the filesystem,
# the abspath call will throw an OSError. Just ignore it and
# keep the original file string.
pass
link = file
try:
args, varargs, varkw, locals = inspect.getargvalues(frame)
except:
# This can happen due to a bug in python2.3. We should be
# able to remove this try/except when 2.4 becomes a
# requirement. Bug details at http://python.org/sf/1005466
print("\nJoblib's exception reporting continues...\n")
if func == '?':
call = ''
else:
# Decide whether to include variable details or not
try:
call = 'in %s%s' % (func, inspect.formatargvalues(args,
varargs, varkw, locals,
formatvalue=eq_repr))
except KeyError:
# Very odd crash from inspect.formatargvalues(). The
# scenario under which it appeared was a call to
# view(array,scale) in NumTut.view.view(), where scale had
# been defined as a scalar (it should be a tuple). Somehow
# inspect messes up resolving the argument list of view()
# and barfs out. At some point I should dig into this one
# and file a bug report about it.
print("\nJoblib's exception reporting continues...\n")
call = 'in %s(***failed resolving arguments***)' % func
# Initialize a list of names on the current line, which the
# tokenizer below will populate.
names = []
def tokeneater(token_type, token, start, end, line):
"""Stateful tokeneater which builds dotted names.
The list of names it appends to (from the enclosing scope) can
contain repeated composite names. This is unavoidable, since
there is no way to disambiguate partial dotted structures until
the full list is known. The caller is responsible for pruning
the final list of duplicates before using it."""
# build composite names
if token == '.':
try:
names[-1] += '.'
# store state so the next token is added for x.y.z names
tokeneater.name_cont = True
return
except IndexError:
pass
if token_type == tokenize.NAME and token not in keyword.kwlist:
if tokeneater.name_cont:
# Dotted names
names[-1] += token
tokeneater.name_cont = False
else:
# Regular new names. We append everything, the caller
# will be responsible for pruning the list later. It's
# very tricky to try to prune as we go, b/c composite
# names can fool us. The pruning at the end is easy
# to do (or the caller can print a list with repeated
# names if so desired.
names.append(token)
elif token_type == tokenize.NEWLINE:
raise IndexError
# we need to store a bit of state in the tokenizer to build
# dotted names
tokeneater.name_cont = False
def linereader(file=file, lnum=[lnum], getline=linecache.getline):
line = getline(file, lnum[0])
lnum[0] += 1
return line
# Build the list of names on this line of code where the exception
# occurred.
try:
# This builds the names list in-place by capturing it from the
# enclosing scope.
for token in generate_tokens(linereader):
tokeneater(*token)
except (IndexError, UnicodeDecodeError):
# signals exit of tokenizer
pass
except tokenize.TokenError as msg:
_m = ("An unexpected error occurred while tokenizing input file %s\n"
"The following traceback may be corrupted or invalid\n"
"The error message is: %s\n" % (file, msg))
print(_m)
# prune names list of duplicates, but keep the right order
unique_names = uniq_stable(names)
# Start loop over vars
lvals = []
for name_full in unique_names:
name_base = name_full.split('.', 1)[0]
if name_base in frame.f_code.co_varnames:
if name_base in locals.keys():
try:
value = safe_repr(eval(name_full, locals))
except:
value = "undefined"
else:
value = "undefined"
name = name_full
lvals.append('%s = %s' % (name, value))
#elif print_globals:
# if frame.f_globals.has_key(name_base):
# try:
# value = safe_repr(eval(name_full,frame.f_globals))
# except:
# value = "undefined"
# else:
# value = "undefined"
# name = 'global %s' % name_full
# lvals.append('%s = %s' % (name,value))
if lvals:
lvals = '%s%s' % (INDENT, ('\n%s' % INDENT).join(lvals))
else:
lvals = ''
level = '%s\n%s %s\n' % (75 * '.', link, call)
if index is None:
frames.append(level)
else:
frames.append('%s%s' % (level, ''.join(
_format_traceback_lines(lnum, index, lines, lvals))))
return frames
###############################################################################
def format_exc(etype, evalue, etb, context=5, tb_offset=0):
""" Return a nice text document describing the traceback.
Parameters
-----------
etype, evalue, etb: as returned by sys.exc_info
context: number of lines of the source file to plot
tb_offset: the number of stack frame not to use (0 = use all)
"""
# some locals
try:
etype = etype.__name__
except AttributeError:
pass
# Header with the exception type, python version, and date
pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
date = time.ctime(time.time())
pid = 'PID: %i' % os.getpid()
head = '%s%s%s\n%s%s%s' % (etype, ' ' * (75 - len(str(etype)) - len(date)),
date, pid, ' ' * (75 - len(str(pid)) - len(pyver)),
pyver)
# Flush cache before calling inspect. This helps alleviate some of the
# problems with python 2.3's inspect.py.
linecache.checkcache()
# Drop topmost frames if requested
try:
records = _fixed_getframes(etb, context, tb_offset)
except:
raise
print('\nUnfortunately, your original traceback can not be '
'constructed.\n')
return ''
# Get (safely) a string form of the exception info
try:
etype_str, evalue_str = map(str, (etype, evalue))
except:
# User exception is improperly defined.
etype, evalue = str, sys.exc_info()[:2]
etype_str, evalue_str = map(str, (etype, evalue))
# ... and format it
exception = ['%s: %s' % (etype_str, evalue_str)]
frames = format_records(records)
return '%s\n%s\n%s' % (head, '\n'.join(frames), ''.join(exception[0]))
###############################################################################
def format_outer_frames(context=5, stack_start=None, stack_end=None,
ignore_ipython=True):
LNUM_POS, LINES_POS, INDEX_POS = 2, 4, 5
records = inspect.getouterframes(inspect.currentframe())
output = list()
for i, (frame, filename, line_no, func_name, lines, index) \
in enumerate(records):
# Look inside the frame's globals dictionary for __file__, which should
# be better.
better_fn = frame.f_globals.get('__file__', None)
if isinstance(better_fn, str):
# Check the type just in case someone did something weird with
# __file__. It might also be None if the error occurred during
# import.
filename = better_fn
if filename.endswith('.pyc'):
filename = filename[:-4] + '.py'
if ignore_ipython:
# Hack to avoid printing the internals of IPython
if (os.path.basename(filename) == 'iplib.py'
and func_name in ('safe_execfile', 'runcode')):
break
maybeStart = line_no - 1 - context // 2
start = max(maybeStart, 0)
end = start + context
lines = linecache.getlines(filename)[start:end]
# pad with empty lines if necessary
if maybeStart < 0:
lines = (['\n'] * -maybeStart) + lines
if len(lines) < context:
lines += ['\n'] * (context - len(lines))
buf = list(records[i])
buf[LNUM_POS] = line_no
buf[INDEX_POS] = line_no - 1 - start
buf[LINES_POS] = lines
output.append(tuple(buf))
return '\n'.join(format_records(output[stack_end:stack_start:-1]))
| bsd-3-clause |
redhat-openstack/glance | glance/db/registry/api.py | 1 | 15384 | # Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This is the Registry's Driver API.
This API relies on the registry RPC client (version >= 2). The functions bellow
work as a proxy for the database back-end configured in the registry service,
which means that everything returned by that back-end will be also returned by
this API.
This API exists for supporting deployments not willing to put database
credentials in glance-api. Those deployments can rely on this registry driver
that will talk to a remote registry service, which will then access the
database back-end.
"""
import functools
import glance.openstack.common.log as logging
from glance.registry.client.v2 import api
LOG = logging.getLogger(__name__)
def configure():
api.configure_registry_client()
def _get_client(func):
"""Injects a client instance to the each function
This decorator creates an instance of the Registry
client and passes it as an argument to each function
in this API.
"""
@functools.wraps(func)
def wrapper(context, *args, **kwargs):
client = api.get_registry_client(context)
return func(client, *args, **kwargs)
return wrapper
@_get_client
def image_create(client, values):
"""Create an image from the values dictionary."""
return client.image_create(values=values)
@_get_client
def image_update(client, image_id, values, purge_props=False, from_state=None):
"""
Set the given properties on an image and update it.
:raises NotFound if image does not exist.
"""
return client.image_update(values=values,
image_id=image_id,
purge_props=purge_props, from_state=from_state)
@_get_client
def image_destroy(client, image_id):
"""Destroy the image or raise if it does not exist."""
return client.image_destroy(image_id=image_id)
@_get_client
def image_get(client, image_id, force_show_deleted=False):
return client.image_get(image_id=image_id,
force_show_deleted=force_show_deleted)
def is_image_visible(context, image, status=None):
"""Return True if the image is visible in this context."""
# Is admin == image visible
if context.is_admin:
return True
# No owner == image visible
if image['owner'] is None:
return True
# Image is_public == image visible
if image['is_public']:
return True
# Perform tests based on whether we have an owner
if context.owner is not None:
if context.owner == image['owner']:
return True
# Figure out if this image is shared with that tenant
members = image_member_find(context,
image_id=image['id'],
member=context.owner,
status=status)
if members:
return True
# Private image
return False
@_get_client
def image_get_all(client, filters=None, marker=None, limit=None,
sort_key='created_at', sort_dir='desc',
member_status='accepted', is_public=None,
admin_as_user=False, return_tag=False):
"""
Get all images that match zero or more filters.
:param filters: dict of filter keys and values. If a 'properties'
key is present, it is treated as a dict of key/value
filters on the image properties attribute
:param marker: image id after which to start page
:param limit: maximum number of images to return
:param sort_key: image attribute by which results should be sorted
:param sort_dir: direction in which results should be sorted (asc, desc)
:param member_status: only return shared images that have this membership
status
:param is_public: If true, return only public images. If false, return
only private and shared images.
:param admin_as_user: For backwards compatibility. If true, then return to
an admin the equivalent set of images which it would see
if it were a regular user
:param return_tag: To indicates whether image entry in result includes it
relevant tag entries. This could improve upper-layer
query performance, to prevent using separated calls
"""
return client.image_get_all(filters=filters, marker=marker, limit=limit,
sort_key=sort_key, sort_dir=sort_dir,
member_status=member_status,
is_public=is_public,
admin_as_user=admin_as_user,
return_tag=return_tag)
@_get_client
def image_property_create(client, values, session=None):
"""Create an ImageProperty object"""
return client.image_property_create(values=values)
@_get_client
def image_property_delete(client, prop_ref, image_ref, session=None):
"""
Used internally by _image_property_create and image_property_update
"""
return client.image_property_delete(prop_ref=prop_ref, image_ref=image_ref)
@_get_client
def image_member_create(client, values, session=None):
"""Create an ImageMember object"""
return client.image_member_create(values=values)
@_get_client
def image_member_update(client, memb_id, values):
"""Update an ImageMember object"""
return client.image_member_update(memb_id=memb_id, values=values)
@_get_client
def image_member_delete(client, memb_id, session=None):
"""Delete an ImageMember object"""
client.image_member_delete(memb_id=memb_id)
@_get_client
def image_member_find(client, image_id=None, member=None, status=None):
"""Find all members that meet the given criteria
:param image_id: identifier of image entity
:param member: tenant to which membership has been granted
"""
return client.image_member_find(image_id=image_id,
member=member,
status=status)
@_get_client
def image_member_count(client, image_id):
"""Return the number of image members for this image
:param image_id: identifier of image entity
"""
return client.image_member_count(image_id=image_id)
@_get_client
def image_tag_set_all(client, image_id, tags):
client.image_tag_set_all(image_id=image_id, tags=tags)
@_get_client
def image_tag_create(client, image_id, value, session=None):
"""Create an image tag."""
return client.image_tag_create(image_id=image_id, value=value)
@_get_client
def image_tag_delete(client, image_id, value, session=None):
"""Delete an image tag."""
client.image_tag_delete(image_id=image_id, value=value)
@_get_client
def image_tag_get_all(client, image_id, session=None):
"""Get a list of tags for a specific image."""
return client.image_tag_get_all(image_id=image_id)
@_get_client
def image_location_delete(client, image_id, location_id, status, session=None):
"""Delete an image location."""
client.image_location_delete(image_id=image_id, location_id=location_id,
status=status)
@_get_client
def user_get_storage_usage(client, owner_id, image_id=None, session=None):
return client.user_get_storage_usage(owner_id=owner_id, image_id=image_id)
@_get_client
def task_get(client, task_id, session=None, force_show_deleted=False):
"""Get a single task object
:return: task dictionary
"""
return client.task_get(task_id=task_id, session=session,
force_show_deleted=force_show_deleted)
@_get_client
def task_get_all(client, filters=None, marker=None, limit=None,
sort_key='created_at', sort_dir='desc', admin_as_user=False):
"""Get all tasks that match zero or more filters.
:param filters: dict of filter keys and values.
:param marker: task id after which to start page
:param limit: maximum number of tasks to return
:param sort_key: task attribute by which results should be sorted
:param sort_dir: direction in which results should be sorted (asc, desc)
:param admin_as_user: For backwards compatibility. If true, then return to
an admin the equivalent set of tasks which it would see
if it were a regular user
:return: tasks set
"""
return client.task_get_all(filters=filters, marker=marker, limit=limit,
sort_key=sort_key, sort_dir=sort_dir,
admin_as_user=admin_as_user)
@_get_client
def task_create(client, values, session=None):
"""Create a task object"""
return client.task_create(values=values, session=session)
@_get_client
def task_delete(client, task_id, session=None):
"""Delete a task object"""
return client.task_delete(task_id=task_id, session=session)
@_get_client
def task_update(client, task_id, values, session=None):
return client.task_update(task_id=task_id, values=values, session=session)
# Metadef
@_get_client
def metadef_namespace_get_all(
client, marker=None, limit=None, sort_key='created_at',
sort_dir=None, filters=None, session=None):
return client.metadef_namespace_get_all(
marker=marker, limit=limit,
sort_key=sort_key, sort_dir=sort_dir, filters=filters)
@_get_client
def metadef_namespace_get(client, namespace_name, session=None):
return client.metadef_namespace_get(namespace_name=namespace_name)
@_get_client
def metadef_namespace_create(client, values, session=None):
return client.metadef_namespace_create(values=values)
@_get_client
def metadef_namespace_update(
client, namespace_id, namespace_dict,
session=None):
return client.metadef_namespace_update(
namespace_id=namespace_id, namespace_dict=namespace_dict)
@_get_client
def metadef_namespace_delete(client, namespace_name, session=None):
return client.metadef_namespace_delete(
namespace_name=namespace_name)
@_get_client
def metadef_object_get_all(client, namespace_name, session=None):
return client.metadef_object_get_all(
namespace_name=namespace_name)
@_get_client
def metadef_object_get(
client,
namespace_name, object_name, session=None):
return client.metadef_object_get(
namespace_name=namespace_name, object_name=object_name)
@_get_client
def metadef_object_create(
client,
namespace_name, object_dict, session=None):
return client.metadef_object_create(
namespace_name=namespace_name, object_dict=object_dict)
@_get_client
def metadef_object_update(
client,
namespace_name, object_id,
object_dict, session=None):
return client.metadef_object_update(
namespace_name=namespace_name, object_id=object_id,
object_dict=object_dict)
@_get_client
def metadef_object_delete(
client,
namespace_name, object_name,
session=None):
return client.metadef_object_delete(
namespace_name=namespace_name, object_name=object_name)
@_get_client
def metadef_object_delete_namespace_content(
client,
namespace_name, session=None):
return client.metadef_object_delete_namespace_content(
namespace_name=namespace_name)
@_get_client
def metadef_object_count(
client,
namespace_name, session=None):
return client.metadef_object_count(
namespace_name=namespace_name)
@_get_client
def metadef_property_get_all(
client,
namespace_name, session=None):
return client.metadef_property_get_all(
namespace_name=namespace_name)
@_get_client
def metadef_property_get(
client,
namespace_name, property_name,
session=None):
return client.metadef_property_get(
namespace_name=namespace_name, property_name=property_name)
@_get_client
def metadef_property_create(
client,
namespace_name, property_dict,
session=None):
return client.metadef_property_create(
namespace_name=namespace_name, property_dict=property_dict)
@_get_client
def metadef_property_update(
client,
namespace_name, property_id,
property_dict, session=None):
return client.metadef_property_update(
namespace_name=namespace_name, property_id=property_id,
property_dict=property_dict)
@_get_client
def metadef_property_delete(
client,
namespace_name, property_name,
session=None):
return client.metadef_property_delete(
namespace_name=namespace_name, property_name=property_name)
@_get_client
def metadef_property_delete_namespace_content(
client,
namespace_name, session=None):
return client.metadef_property_delete_namespace_content(
namespace_name=namespace_name)
@_get_client
def metadef_property_count(
client,
namespace_name, session=None):
return client.metadef_property_count(
namespace_name=namespace_name)
@_get_client
def metadef_resource_type_create(client, values, session=None):
return client.metadef_resource_type_create(values=values)
@_get_client
def metadef_resource_type_get(
client,
resource_type_name, session=None):
return client.metadef_resource_type_get(
resource_type_name=resource_type_name)
@_get_client
def metadef_resource_type_get_all(client, session=None):
return client.metadef_resource_type_get_all()
@_get_client
def metadef_resource_type_delete(
client,
resource_type_name, session=None):
return client.metadef_resource_type_delete(
resource_type_name=resource_type_name)
@_get_client
def metadef_resource_type_association_get(
client,
namespace_name, resource_type_name,
session=None):
return client.metadef_resource_type_association_get(
namespace_name=namespace_name, resource_type_name=resource_type_name)
@_get_client
def metadef_resource_type_association_create(
client,
namespace_name, values, session=None):
return client.metadef_resource_type_association_create(
namespace_name=namespace_name, values=values)
@_get_client
def metadef_resource_type_association_delete(
client,
namespace_name, resource_type_name, session=None):
return client.metadef_resource_type_association_delete(
namespace_name=namespace_name, resource_type_name=resource_type_name)
@_get_client
def metadef_resource_type_association_get_all_by_namespace(
client,
namespace_name, session=None):
return client.metadef_resource_type_association_get_all_by_namespace(
namespace_name=namespace_name)
| apache-2.0 |
tensorflow/lingvo | lingvo/core/base_layer.py | 1 | 41699 | # Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for all layers."""
import abc
import collections
import contextlib
import copy
import enum
import itertools
import re
from typing import Callable, List, Mapping, Optional, Type, TypeVar, Union
import lingvo.compat as tf
from lingvo.core import cluster_factory
from lingvo.core import hyperparams
from lingvo.core import py_utils
FLAGS = tf.flags.FLAGS
_LAYER_STACK = py_utils.ThreadLocalStack()
_CREATE_VARIABLES_STACK = py_utils.ThreadLocalStack()
class Accumulator:
"""Layers can register accumulators to persist step-level state.
Accumulators must be represented by a Tensor of a fixed shape. The default
value must be supplied by overriding DefaultValue(). It is important that
the default tensor value is created on each call in order to avoid
accumulators leaking to different graphs.
Accumulators can be enabled (default) or disabled by pairing
Disable()/Enable() calls. When disabled, the accumulator will only return
the default value and will silently drop calls to SetValue(). When computing
gradients that may touch accumulators, calls should be bracketed with
Disable()/Enable().
Care must be taken when manipulating accumulators across Defun boundaries.
Typically, values for all accumulators in a layer must be explicitly
retrieved and passed in to the Defun scope by calling
layer.GetAccumulatorValues(), marshalling into the Defun and setting them
via layer.SetAccumulatorValues(). The reverse must be done on return.
"""
def __init__(self):
# None for initial value or the current Tensor value.
self._value = None
self._disable_count = 0
@property
def is_disabled(self):
"""Whether the accumulator is disabled."""
return self._disable_count > 0
def Disable(self):
"""Disables the accumulator (must be balanced with Enable)."""
self._disable_count += 1
def Enable(self):
"""Enables the accumulator (must balance a Disable)."""
assert self._disable_count > 0, 'Unbalanced Accumulator Enable/Disable'
self._disable_count -= 1
def GetValue(self):
"""Gets the current value of the accumulator Tensor."""
if self.is_disabled or self._value is None:
return self.DefaultValue()
else:
return self._value
def SetValue(self, value):
"""Sets the current value of the accumulator Tensor."""
if not self.is_disabled:
self._value = value
def Reset(self):
"""Resets the accumulator to its default value."""
if not self.is_disabled:
self._value = None
def DefaultValue(self):
raise NotImplementedError('DefaultValue must be implemented')
def _BaseLayerInitWrapper(func): # pylint: disable=invalid-name
"""A decorator for layer's __init__.
Args:
func: The __init__ method of `BaseLayer`'s subclasses.
Returns:
A decorator wrapper for layer's initializer. Note that this wrapper can
be called multiple times for the same layer instance, once for each
__init__() for classes on the class hierarchy.
"""
def Wrapper(self, *args, **kwargs):
"""Decorator wrapper fn."""
stack = _LAYER_STACK.stack
if stack and stack[-1] is self:
# Short circuit if called multiple times (eg. super() chain).
func(self, *args, **kwargs)
return
# Push back self (the current layer) to the stack.
stack_size = len(stack)
stack.append(self)
try:
# Calls the layer's real __init__ method.
func(self, *args, **kwargs)
if len(stack) > 1:
# Records the fact stack[-2] just created a sub-layer self.
stack[-2]._AutoAddChild(self) # pylint: disable=protected-access
finally:
# Pop out self (the current layer).
assert stack[-1] is self
stack.pop()
assert len(stack) == stack_size
if not stack:
# Outermost layer just finished __init__.
if self.cluster.immediately_instantiate_variables:
self.InstantiateVariables()
return Wrapper
def RecursiveFindLayerParams(params):
"""Returns all params that define a layer."""
if not isinstance(params, hyperparams.Params):
return []
layer_params = []
if hasattr(params, 'cls') and issubclass(params.cls, BaseLayer):
layer_params.append(params)
for _, p in params.IterParams():
if isinstance(p, (list, tuple)):
for item in p:
layer_params.extend(RecursiveFindLayerParams(item))
elif isinstance(p, dict):
for item in p.items():
layer_params.extend(RecursiveFindLayerParams(item))
else:
layer_params.extend(RecursiveFindLayerParams(p))
return layer_params
class BaseLayerMeta(type):
"""Metaclass tracking child layers and variable initialization."""
# pylint: disable=bad-mcs-classmethod-argument
def __new__(mcs, name, bases, dct):
cls = super(BaseLayerMeta, mcs).__new__(mcs, name, bases, dct)
if '__init__' not in dct:
def TrivialInit(self, params):
super(cls, self).__init__(params) # pylint: disable=bad-super-call
cls.__init__ = TrivialInit
cls.__init__ = _BaseLayerInitWrapper(cls.__init__)
return cls
# pylint: enable=bad-mcs-classmethod-argument
def __call__(cls, *args, **kwargs):
self = super().__call__(*args, **kwargs)
# This happens after self.__init__()
# pylint: disable=protected-access
self._disable_create_child = True
self._VerifyChildren()
# pylint: enable=protected-access
return self
class ABCLayerMeta(BaseLayerMeta, abc.ABCMeta):
pass
# NamedTuple that records the metadata for creating a variable.
# For internal use only. Subclasses of BaseLayer should use
# self.CreateVariable() to create variables.
CreateVariableMeta = collections.namedtuple(
'CreateVariableMeta', ['var_params', 'theta_fn', 'kwargs'])
class _CreateLayerVariablesStatus(enum.Enum):
NOT_CALLED = 1
IN_PROGRESS = 2
COMPLETED = 3
PER_SPLIT_COMPLETED = 4
LAYER_WT = 'layer_weight_variable'
BaseLayerS = TypeVar('BaseLayerS', bound='BaseLayer')
BaseLayerT = TypeVar('BaseLayerT', bound='BaseLayer')
BaseLayerParamsT = hyperparams.InstantiableParams[BaseLayerT]
class BaseLayer(tf.Module, metaclass=BaseLayerMeta):
r"""Base class for all the layer object.
As this BaseLayer is a proper sub-class of tf.Module, it supports proper
tracking and reflection of key constituents such as variables and submodules.
self.submodules returns a list of submodules that are reachable through
recursive member access from self.
self.variables returns a list of Variables that are reachable through
recursive member access from self.
self(\*args, \*\*kwargs) carries out computation on the input args and kwargs.
"""
# Set to an inference driver name if this is an inference specialization
# class.
_INFERENCE_DRIVER_NAME = None
@classmethod
def Params(cls: Type[BaseLayerT]) -> BaseLayerParamsT:
"""Returns the layer params."""
p = hyperparams.InstantiableParams(cls)
p.Define('inference_driver_name', cls._INFERENCE_DRIVER_NAME,
'Name of the inference driver used to construct this layer.')
p.Define('name', '', 'Name of this layer object.')
p.Define('dtype', tf.float32, 'Datatype to use.')
# None value will make FProp use dtype instead of fprop_dtype.
# TODO(lepikhin): all @tf.Defun should use p.fprop_dtype if it is set.
p.Define('fprop_dtype', None, 'Activations datatype to use.')
p.Define(
'random_seed', None, 'Random seed for deterministic unittests. This '
'is inherited by child layers if they do not set a random_seed.')
p.Define('vn', py_utils.DefaultVN(),
'How variational noise should be applied.')
p.Define(
'params_init', py_utils.DefaultParamInit(),
'How model weights should be initialized. Not to be confused with '
'hyperparams.')
p.Define('add_name_to_theta', False,
'Wrap theta with tf.identity(var_name).')
# Makes additional alterations for graphs being used for inference.
p.Define('is_inference', None, 'True if in inference mode.')
# In addition to is_inference, indicate that the inference graph is
# for a single step.
p.Define(
'allow_implicit_capture', None,
'When using Defuns, code often asserts that the Defun does not '
'capture undeclared inputs. This eliminates a source of bugs '
'at the expense of making some kinds of models or utilities '
'hard/impossible to use. Setting this to True/False (versus None) '
'causes the setting to apply to this layer and its children.')
p.Define(
'skip_lp_regularization', None,
'If True, all variables in this layer will skip Lp regularization. '
'If None/False, only variables explicitly in the '
'SKIP_LP_REGULARIZATION collection will skip Lp regularization. '
'Also propagated to child layers with default settings (None).')
# SPMD partition related params.
p.Define(
'device_mesh', None,
'A numpy.ndarray specifying the topology of a device mesh to place the'
' computations onto. If device_mesh is None, it is assumed to be a'
' single device. Here are some examples:'
' np.array([0, 1, 2, 3, 4, 5, 6, 7]) which is a 1d mesh with 8 devices,'
' np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) which is 2d matrix of 8'
' devices.')
p.Define(
'weight_split_dims_mapping', None,
'Relevant only if device_mesh above is not None. If not None, it '
'specifies how weight of this layer or those of the sublayers should '
'be sharded over device mesh. ')
p.Define(
'activation_split_dims_mapping', None,
'Relevant only if device_mesh above is not None. If not None, it '
'specifies how activation of this layer or those of the sublayers '
'should be sharded over device mesh. ')
return p
@staticmethod
def CopyBaseParams(from_params: hyperparams.InstantiableParams[BaseLayerS],
to_params: BaseLayerParamsT) -> BaseLayerParamsT:
"""Copies BaseLayer params from `from_params` to `to_params`."""
assert issubclass(from_params.cls, BaseLayer)
assert issubclass(to_params.cls, BaseLayer)
# Copy-over the BaseLayer params.
if to_params.dtype == tf.float32:
to_params.dtype = from_params.dtype
if to_params.fprop_dtype is None:
to_params.fprop_dtype = from_params.fprop_dtype
if to_params.random_seed is None:
to_params.random_seed = from_params.random_seed
if to_params.is_inference is None:
to_params.is_inference = from_params.is_inference
if to_params.allow_implicit_capture is None:
to_params.allow_implicit_capture = from_params.allow_implicit_capture
if to_params.skip_lp_regularization is None:
to_params.skip_lp_regularization = from_params.skip_lp_regularization
if to_params.device_mesh is None:
to_params.device_mesh = copy.deepcopy(from_params.device_mesh)
# Only copy from base when vn config is using the default setting.
if to_params.vn == py_utils.DefaultVN():
to_params.vn = from_params.vn.Copy()
# TODO(rpang): derive to_params.params_init.seed from
# from_params.params_init.seed if it is specified in 'from_params' and not
# in 'to_params'.
if py_utils.IsDefaultParamInit(to_params.params_init):
# Copy over params_init as well.
to_params.params_init = from_params.params_init.Copy()
return to_params
def __init__(self: BaseLayerT, params: BaseLayerParamsT) -> None:
"""Layer constructor.
Args:
params: A params used to construct this layer.
"""
assert params.name, (
'Layer params for %s must have a "name"' % self.__class__.__name__)
tf_module_name = params.name
tf_module_name = re.sub('[^a-zA-Z0-9_]+', '_', tf_module_name)
tf_module_name = 'bbf_' + self.__class__.__name__ + '_' + tf_module_name
py_utils.NestedMap.CheckKey(tf_module_name)
# initialize the base class.
super().__init__(tf_module_name)
# Note AutoTracking doesn't work properly due to its inability to walk
# through py_utils.NestedMap data structures which are used widely
# throughout the Lingvo codebase. Also there seems to be some performance
# hit in turning on auto-tracking in constructing graphs. For now, we
# disable auto-tracking.
# TODO(lingvo): Re-enable auto-tracking when fuller support is
# added for key data structures used in Lingvo, and performance issue is
# debugged more and understood better.
self._setattr_tracking = False
self._parent = None
for parent in reversed(_LAYER_STACK.stack):
if parent is not self:
self._parent = parent
break
self._params = params.Copy()
tf.logging.debug('Creating layer %s with params: \n %s \n',
self.__class__.__name__, str(params))
# Vars created by this layer.
self._private_vars = py_utils.NestedMap()
# Utility for TransformVarsTempContext() to restore self._private_vars.
self._private_vars_transform_restore_stack = []
# Theta derived from this layer's vars.
self._private_theta = py_utils.NestedMap()
# A simple transformation before used by the forward computation. Its
# signature must be (tf.Tensor) -> (tf.Tensor).
self._private_theta_fn = py_utils.NestedMap()
# Child layers created by this layer through CreateChild/CreateChildren.
self._private_children = py_utils.NestedMap()
# Child layers created by this layer. A well-formed layer should
# have self._private_children equals to self._children_list. I.e.,
# all child layers are created using CreateChild/CreateChildren.
self._children_list = []
# Extra theta's not directly correspond to any underlying vars. For example,
# the concatenated sharded variables.
self._extra_theta = py_utils.NestedMap()
# All registered accumulators.
self._private_accumulators = py_utils.NestedMap()
# Layer-private functions. Add with AddFunction.
self._private_fns = dict()
# Mapping from variable names to its symbolic shape.
# self._var_symbolic_shape_map['var_name'] will be a tuple of integers or
# symbolic expressions, one for each dimension of the variable.
self._var_symbolic_shape_map = {}
self._is_variable_free = False
self._variables_to_create = {}
self._create_variables_status = _CreateLayerVariablesStatus.NOT_CALLED
# Keep track of the tf.variable_scope(p.name) this layer creates so we can
# reenter it without creating a new one.
self._self_variable_scope = None
def SetVariableFree(self, value: bool = True) -> None:
"""Marks this layer as having no variables.
Note that this status affects sublayers and child layers too.
Args:
value: True to set layer as variable free.
"""
if self._create_variables_status != _CreateLayerVariablesStatus.NOT_CALLED:
raise ValueError(
'Variable free status for %s must be set before InstantiateVariables().'
% self.params.cls)
if self._variables_to_create:
raise ValueError('Cannot set layer %s with variables as variable free.' %
self.params.cls)
self._is_variable_free = value
def FPropDefaultTheta(self, *args, **kwargs):
"""Calls `FProp`."""
return self.FProp(self.theta, *args, **kwargs)
def __call__(self, *args, **kwargs):
"""Forwards call to FPropDefaultTheta."""
return self.FPropDefaultTheta(*args, **kwargs)
def FProp(self, theta, *args, **kwargs):
"""Forward propagation.
The central interface that subclasses should implement. The caller
calls `FProp` with a `theta` dictionary. E.g.::
foo = InstanceOfASubClassOfFoo(params)
y = foo.FProp(foo.theta, x)
The implementation of `FProp()` computes a function given
the theta and the inputs. E.g.::
subs = self.children
inputs = args[0]
a0 = subs.linear.FProp(theta.linear, inputs)
a1 = subs.softmax.FProp(theta.softmax, a0)
# The same layer applied twice.
a2 = subs.linear.FProp(theta.linear, a1)
return a2
Args:
theta: A `.NestedMap` object containing weights' values of this
layer and its children layers.
*args: List args.
**kwargs: Keyward args.
"""
del theta
del args
del kwargs
raise NotImplementedError('Abstract method of %s' % self)
@classmethod
def FPropMeta(cls, params, *args, **kwargs):
"""Returns metadata about the `FProp` computation for this layer.
**Experimental feature.**
Don't use or depend on it without consulting Lingvo authors.
E.g.::
p = SomeComplexLayer.Params()
meta = p.cls.FPropMeta(p, tshape.Shape([128, 20, 50, 'channels']))
`meta.flops` gives an estimate count of floating point operations done by
one `FProp` given an input tensor of shape [128, 20, 50, channels].
`meta.out_shapes` is a tuple of TShape, which tells you what shape
of tensors this layer will return.
Args:
params: The param of a layer of this layer type.
*args: Corresponds to FProp with Tensors replaced by `TensorShape`.
**kwargs: Corresponds to FProp with Tensors replaced by `TensorShape`.
Returns:
A `.NestedMap` with
- flops - The estimated number of floating point operations incurred by
this fprop.
- out_shapes - A tuple of `TShape`. I.e., `out_shapes[i]`
represents the shape of the `i`-th returned tensor of the fprop.
"""
raise NotImplementedError('FPropMeta of %s' % cls)
@property
def params(self) -> BaseLayerParamsT:
"""Returns the params upon which this layer is built."""
return self._params
@property
def cluster(self):
"""Returns the current cluster configuration."""
return cluster_factory.Current()
@property
def do_eval(self) -> bool:
return self.cluster.do_eval
@property
def parent(self) -> Optional[BaseLayerT]:
"""None if self is the root layer, otherwise the parent layer of self."""
return self._parent
@property
def path(self) -> str:
"""Returns a '.'-separated string with all layer names from the root."""
if self.parent:
return self.parent.path + '.' + self.params.name
else:
return self.params.name
@property
def layer_type(self) -> str:
"""Returns layer type prefixed with 'lingvo.'."""
return 'lingvo.' + self.__class__.__name__
@property
def children(self) -> py_utils.NestedMap:
"""Returns children layers of this layer in a `.NestedMap`."""
return self._private_children
def __getattr__(self, name: str):
"""Returns the child layer of the given name."""
if name == '_private_children':
# Raising AttributeError without custom message triggers normal python
# handling of __getattr__ AttributeError.
raise AttributeError()
if name in self._private_children:
return self._private_children[name]
elif (hasattr(type(self), name) and
isinstance(getattr(type(self), name), property)):
# There was an AttributeError raised by a property getter.
# Call property getter again directly to raise the same error.
return getattr(type(self), name).fget(self)
else:
raise AttributeError('%s is not a sub-layer of %s.' % (name, self))
def GetDescendant(self, path: str) -> BaseLayerT:
"""Returns a descendant layer given the path.
NOTE(yonghui): This GetDescendant is not complete. It is not able to descent
into list/tuple substructures.
Args:
path: a comma separated string denoting a descendant of this layer.
Returns:
The descendant layer.
Raises:
KeyError: if the descendant is not found.
"""
sub = self
if path:
for k in path.split('.'):
if k not in sub.children:
raise KeyError('%s not found in %s' % (k, list(sub.children.keys())))
sub = sub.children[k]
return sub
@property
def vars(self):
"""Returns variables of this layer and its children in a `.NestedMap`."""
if self._is_variable_free:
return self._private_children.Transform(lambda _: py_utils.NestedMap())
if self._create_variables_status == _CreateLayerVariablesStatus.NOT_CALLED:
raise ValueError(
'Cannot access vars for layer %s before they have been created.' %
self.params.cls)
ret = self._private_children.Transform(lambda x: x.vars)
for k in self._private_vars.keys():
ret[k] = self._private_vars[k]
return ret
def _TransformVarsInternal(self, fn):
"""Internal: replaces each variable v in self._private_vars with fn(v).
Also recursively invokes _TransformVarsInternal() on self._private_children.
Args:
fn: A function that takes a variable and returns a variable or a wrapper
of the variable.
"""
self._private_vars_transform_restore_stack.append(self._private_vars)
self._private_vars = self._private_vars.Transform(fn)
if self._create_variables_status == _CreateLayerVariablesStatus.NOT_CALLED:
raise ValueError(
'Cannot access vars for layer %s before they have been created.' %
self.params.cls)
tf.nest.map_structure(
lambda c: c._TransformVarsInternal(fn), # pylint: disable=protected-access
self._private_children)
def _UndoTransformVarsInternal(self):
"""Internal. Undoes _TransformVarsInternal()."""
self._private_vars = self._private_vars_transform_restore_stack.pop()
tf.nest.map_structure(
lambda c: c._UndoTransformVarsInternal(), # pylint: disable=protected-access
self._private_children)
@contextlib.contextmanager
def TransformVarsTempContext(self, fn):
"""Enters a context that temporarily transforms each variable v to fn(v)."""
self._TransformVarsInternal(fn)
try:
yield
finally:
self._UndoTransformVarsInternal()
@property
def theta(self):
"""Returns theta of this layer and its children in a `.NestedMap`."""
if self._is_variable_free:
return self._private_children.Transform(lambda _: py_utils.NestedMap())
if self._create_variables_status == _CreateLayerVariablesStatus.NOT_CALLED:
raise ValueError(
'Cannot access theta for layer %s before they have been created.' %
self.params.cls)
ret = self._private_children.Transform(lambda x: x.theta)
private_theta = self._private_theta.DeepCopy()
for name, theta_fn in self._private_theta_fn.FlattenItems():
private_theta[name] = theta_fn(private_theta[name])
if (self._params.fprop_dtype is not None and
self._params.fprop_dtype != self._params.dtype):
def MaybeCastToFPropDtype(x):
# Need to check `.base_dtype` as x.dtype may be tf.float32_ref.
if x is not None and x.dtype.base_dtype == self._params.dtype:
return tf.cast(x, self._params.fprop_dtype)
else:
return x
private_theta = private_theta.Transform(MaybeCastToFPropDtype)
ret.update(private_theta)
return ret
@property
def accumulators(self):
"""Returns `.NestedMap` of `Accumulator` instances for this and children."""
ret = self._private_children.Transform(lambda x: x.accumulators)
for k, acc in self._private_accumulators.items():
ret[k] = acc
return ret
@property
def fns(self):
"""Returns a read-only view of layer local functions.
Functions can be accessed by index (['name']) or attribute notation
(`fns.foo`).
Returns:
Read-only attribute accessible dict view of the layer's function library.
"""
return py_utils.ReadOnlyAttrDictView(self._private_fns)
def AddFunction(self, name, f, replace=False):
"""Adds a function to the layer's `fns` collection.
This should be used to add op-like functions specific to the operation
of the layer and its children. Such functions should be added in `__init__`
and may either be raw python functions or TensorFlow Defuns. This
facility is just a mechanism for organizing them and having basic checks
on name collisions.
Args:
name: The function name. It will be accessible as `self.fns.{name}`.
f: The function body.
replace: Whether to replace an existing function (default False).
Raises:
AttributeError: If the function already exists and replace == False.
"""
py_utils.NestedMap.CheckKey(name)
if not replace:
if name in self._private_fns:
raise AttributeError(
'Function "%s" is already defined on layer "%r"' % (name, self))
self._private_fns[name] = f
def _CheckName(self, name: str) -> None:
"""Asserts name's validity."""
py_utils.NestedMap.CheckKey(name)
assert name not in self._private_vars, (
'%s exists in vars, %s' % (name, list(self._private_vars.keys())))
assert name not in self._private_theta, (
'%s exists in theta, %s' % (name, list(self._private_theta.keys())))
assert name not in self._private_children, ('%s exists in children, %s' % (
name, list(self._private_children.keys())))
assert name not in self._private_accumulators, (
'%s exists in global_accumulator: %s' %
(name, list(self._private_accumulators.keys())))
def _VariableCollections(self) -> List[str]:
return [LAYER_WT, '%s_vars' % self.__class__.__name__]
def RegisterAccumulator(self, name, acc):
"""Registers an accumulator for this layer.
An accumulator is used to propagate some state to a future point,
where it is acted on (typically as part of `PostTrainingStepUpdate`). This
mechanism allows for arbitrarily nested parts of a model to export state
back to the global scope. Accumulators must be specially handled
when crossing into `Defun` or recurrent scopes. By abstracting the
mechanism, it allows all such state to be handled uniformly and generically.
Example (typically from `__init__`)::
class MyAccumulator(base_layer.Accumulator):
def DefaultValue(self):
# [count, min, max]
return tf.convert_to_tensor([0.0, 0.0, 0.0])
def Update(self, state1):
state0 = self.GetValue()
self.SetValue(tf.stack([
state0[0] + state1[0],
tf.minimum(state0[1], state1[1]),
tf.maximum(state0[2], state1[2])]))
self.RegisterAccumulator('mytracker', acc)
Later, access the current value and update it::
acc = self.accumulators.mytracker
acc.Update(tf.convert_to_tensor([1.0, batch_min, batch_max]))
Then, typically in `PostTrainingStepUpdate`::
acc = self.accumulator.mytracker
acc_value = acc.GetValue()
# Do something with the value.
acc.Reset()
Args:
name: The accumulator name. Shares a namespace with children, vars and
extra theta.
acc: An `Accumulator` instance.
"""
self._CheckName(name)
self._private_accumulators[name] = acc
def GetAccumulatorValues(self):
"""Recursively gets values of all accumulators.
Returns:
`.NestedMap` of Tensors for each registered accumulator.
"""
return self.accumulators.Transform(lambda acc: acc.GetValue())
def SetAccumulatorValues(self, new_values_nmap):
"""Recursively sets the values of all accumulators from a map.
Args:
new_values_nmap: `.NestedMap` of accumulator name:Tensor.
"""
accumulator_list = self.accumulators.Flatten()
value_list = new_values_nmap.Flatten()
for acc, value in zip(accumulator_list, value_list):
acc.SetValue(value)
def GetVariableSymbolicShape(self, var_name):
"""Returns the variable's symbolic shape."""
return self._var_symbolic_shape_map.get(var_name, None)
def CreateVariable(self,
name: str,
var_params: py_utils.WeightParams,
theta_fn: Optional[Callable[[tf.Tensor],
tf.Tensor]] = None,
**kwargs) -> None:
"""Create a variable of this layer according to the parameter `var_params`.
E.g.::
def __init__(self, ...): # A layer's constructor
self.CreateVariable(
'weight', py_utils.WeightParams(shape=[100, 100]))
`theta_fn` is used to apply a simple transformation on the created
variable's value before used by the forward computation. E.g., to
add the global variational noise according to this layer's
parameter, one can do::
def __init__(self, ...): # A layer's constructor
self.CreateVariable(
name='weight',
var_params=py_utils.WeightParams(shape=[100, 100]),
theta_fn=self.AddVN)
In some contexts, eg. TPU training, variables may not be created immediately
but rather the creation request will be cached and created later via a call
to layer.InstantiateVariables().
Args:
name: Variable name which is used as the key into vars/theta.
var_params: `Params` used to create the variable.
theta_fn: A python function that takes a variable's value and returns a
new value to be used later for computation. Its signature must be
(tf.Tensor) -> (tf.Tensor).
**kwargs: Keyword args passed to `.py_utils.CreateVariable`.
"""
if self.params.device_mesh is not None:
if (len([dim for dim in var_params.shape if dim > 1]) > 1 and
var_params.tensor_split_dims_mapping is None):
tf.logging.warning(
'tensor_split_dims_mapping missing for %s.%s: shape=%s', self.path,
name, var_params.shape)
if self._is_variable_free:
raise ValueError('Cannot create variable in variable free layer.')
if self._create_variables_status == _CreateLayerVariablesStatus.COMPLETED:
raise ValueError(
'CreateVariable call after variable creation has completed! '
'CreateVariable should be called in __init__ or _CreateLayerVariables.'
)
self._CheckName(name)
if (self.params.skip_lp_regularization and
py_utils.SKIP_LP_REGULARIZATION not in var_params.collections):
var_params = py_utils.WeightParams(
shape=var_params.shape,
dtype=var_params.dtype,
init=var_params.init,
collections=(var_params.collections +
[py_utils.SKIP_LP_REGULARIZATION]))
self._var_symbolic_shape_map[name] = var_params.shape
meta = CreateVariableMeta(
var_params=var_params.Copy(),
theta_fn=theta_fn,
kwargs=kwargs)
if self._create_variables_status == _CreateLayerVariablesStatus.IN_PROGRESS:
# If InstantiateVariables has been called, create variable immediately.
self._CreateVariableInternal(name, meta)
else:
# Otherwise cache the variable to be created.
self._variables_to_create[name] = meta
def _CreateVariableInternal(self, name: str,
meta: CreateVariableMeta) -> None:
"""Immediately creates the variable described by `meta`.
DO NOT OVERRIDE. For internal use only. Subclasses of BaseLayer should use
self.CreateVariable() to create variables.
Args:
name: The variable name.
meta: A CreateVariableMeta describing the variable to be created.
"""
meta.kwargs.setdefault('default_seed', self.params.random_seed)
var = py_utils.CreateVariable(name, meta.var_params, **meta.kwargs)
self._private_vars[name] = var
if py_utils.IsEagerMode():
# With eager trainer, always use the variable directly.
value = var
else:
if self.cluster.params.worker.gpus_per_replica > 0:
# On GPU (which always trains a single step per session.run()),
# reference a tensor in FProp to cache it on device and avoid extraneous
# sends from reading variables from ps multiple times.
with tf.device(var.device):
value = tf.identity(var, name=name)
elif self.params.add_name_to_theta:
value = tf.identity(var, name=name)
else:
value = var
if meta.theta_fn is not None:
self._private_theta_fn[name] = meta.theta_fn
self._private_theta[name] = value
@contextlib.contextmanager
def _SelfVariableScope(self):
"""Internal. Used to ensure the same variable & name scopes are used."""
if not self._self_variable_scope:
with tf.variable_scope(py_utils.SanitizeScopeKey(
self.params.name)) as scope:
self._self_variable_scope = scope
with contextlib.ExitStack() as stack:
stack.enter_context(
tf.variable_scope(
self._self_variable_scope, auxiliary_name_scope=False))
stack.enter_context(
tf.name_scope(self._self_variable_scope.original_name_scope))
yield stack
def InstantiateVariables(self) -> None:
"""Create variables for this layer and child layers.
DO NOT OVERRIDE. Override self._CreateLayerVariables instead.
"""
if self._create_variables_status != _CreateLayerVariablesStatus.NOT_CALLED:
return
self._create_variables_status = _CreateLayerVariablesStatus.IN_PROGRESS
stack_size = len(_CREATE_VARIABLES_STACK.stack)
_CREATE_VARIABLES_STACK.stack.append(self)
try:
self._CreateChildrenVariables()
if not self._is_variable_free:
with self._SelfVariableScope():
for name, meta in list(self._variables_to_create.items()):
self._CreateVariableInternal(name, meta)
self._CreateLayerVariables()
finally:
assert _CREATE_VARIABLES_STACK.stack[-1] is self
_CREATE_VARIABLES_STACK.stack.pop()
assert len(_CREATE_VARIABLES_STACK.stack) == stack_size
self._create_variables_status = _CreateLayerVariablesStatus.COMPLETED
if not _CREATE_VARIABLES_STACK.stack:
# Outermost layer just finished InstantiateVariables.
self._VerifyVarsAndTheta()
def _CreateChildrenVariables(self) -> None:
"""Create variables for child layers.
Should be rarely overridden, only in cases when control over the context of
children InstantiateVariables calls are needed. eg, if children variables
need to be created inside of a specific context manager.
There are a few cases of this in the codebase marked as for backwards
compability. This is only to ensure that variable scopes remain compatible
through the code migration. New layers should not copy that pattern, and
instead follow the standard pattern of self.CreateChild() in __init__() and
self.CreateVariable() in _CreateLayerVariables(). If you are okay with
breaking old checkpoints, you can go ahead and delete those functions.
"""
with self._SelfVariableScope():
for child in self._children_list:
if self._is_variable_free and not child._is_variable_free: # pylint: disable=protected-access
raise ValueError(
'Variable free layer %s(%s) child %s(%s) has variables.' %
(self.params.name, self.params.cls, child.params.name,
child.params.cls))
child.InstantiateVariables()
def _CreateLayerVariables(self) -> None:
"""Actually create variables for this layer.
Subclasses should override this function.
Variables are created inside of tf.variable_scope(self.params.name).
"""
pass
def AddExtraTheta(self, theta_name: str, theta_value) -> None:
"""Add extra `theta` that doesn't directly correspond to `vars`."""
self._CheckName(theta_name)
self._private_theta[theta_name] = theta_value
self._extra_theta[theta_name] = theta_value
def AddVN(self, value):
return py_utils.AddVN(self.params, value)
def CreateChild(self, name: str, params: BaseLayerParamsT) -> None:
"""Create a sub layer.
The created sub layer can be accessed by `name`. E.g.::
self.CreateChild('foo', foo_params)
self.foo.FProp...
or::
self.children['foo'].Fprop...
self.children.foo.Fprop...
If the layer does not have a name set, i.e. foo_params.name is None, then
its name will be set to `name`. The layer's name is used as a variable_scope
for its variables.
Args:
name: Sub layer name which is used as the key into vars/theta.
params: `Hyperparams` object to instantiate a layer.
"""
if hasattr(self, '_disable_create_child') and self._disable_create_child:
raise ValueError('Attempting to call CreateChild outside of __init__.')
self._CheckName(name)
p = self.CopyBaseParams(self.params, params.Copy())
if not p.name:
p.name = name
child = p.Instantiate()
self._private_children[name] = child
def CreateChildren(
self, name: str, params: Union[List[BaseLayerParamsT],
Mapping[str, BaseLayerParamsT]]
) -> None:
"""Create a list or dict of sub layers.
The created sub layer list can be accessed by `name`. E.g.::
self.CreateChildren('foo', ...)
self.foo[10].FProp...
or::
self.children['foo'][10].Fprop...
self.children.foo[10].Fprop...
Args:
name: The name for the sub layers, which is used as the key into
vars/theta.
params: a list or dict of `Hyperparams` objects to create.
"""
if hasattr(self, '_disable_create_child') and self._disable_create_child:
raise ValueError('Attempting to call CreateChildren outside of __init__.')
self._CheckName(name)
uid = itertools.count()
def Instantiate(p):
p = self.CopyBaseParams(self.params, p.Copy())
if not p.name:
p.name = '%s_%d' % (name, next(uid))
return p.Instantiate()
self._private_children[name] = py_utils.NestedMap(
sub=params).Transform(Instantiate).sub
def AddChild(self, name: str, children: BaseLayerT) -> None:
"""Add existing layer or layers as sublayer."""
for child in py_utils.Flatten(children):
assert isinstance(child, BaseLayer)
self._CheckName(name)
self._private_children[name] = children
def _AutoAddChild(self, child: BaseLayerT) -> None:
"""Record that a layer `child` is instantiated by this layer.
This method should only be called internally by BaseLayerMeta.
Args:
child: A sub-layer of this layer.
"""
self._children_list.append(child)
def _RemoveChild(self, name: str) -> None:
"""Remove a sublayer instantiated earlier from this layer.
This method should only be called by subclasses, and is usually used to
remove unused layers instantiated by the super class.
Args:
name: the name of an existing sublayer.
"""
self._children_list.remove(self._private_children[name])
del self._private_children[name]
def _VerifyChildren(self) -> None:
"""Verify all children created by this layer are via `CreateChild(ren)`."""
created_children = self._private_children.Flatten()
for v in self._children_list:
if v not in created_children:
tf.logging.info([
(child.params.name, type(child)) for child in created_children
])
raise ValueError(
'%s is not created by BaseLayer.CreateChild(ren) in %r.' %
(v.params.name, self))
def _VerifyVarsAndTheta(self) -> None:
"""Verify that vars and theta have the same nested structure."""
for child in self._children_list:
child._VerifyVarsAndTheta() # pylint: disable=protected-access
def MatchKeys(x, y):
assert len(x) <= len(y)
for k in x.keys():
assert k in y, '%s not in %s.' % (k, y)
if isinstance(x[k], py_utils.NestedMap):
assert isinstance(y[k], py_utils.NestedMap), '%s is not a map' % y[k]
MatchKeys(x[k], y[k])
# NOTE: this check can be quadratically expensive. Maybe only
# enable this in unittests.
MatchKeys(self.vars, self.theta)
# Make sure whatever not in self.vars are in self._extra_theta
for k in self.theta.keys():
assert k in self.vars or k in self._extra_theta
def PostTrainingStepUpdate(self):
"""Returns a TF op which will be invoked at each training step.
Subclasses of `BaseLayer` can implement this method. The method should
return a TF op to be invoked during training after gradients are applied.
"""
update_ops = [
child.PostTrainingStepUpdate()
for child in self._private_children.Flatten()
]
return tf.group(*update_ops)
def _CastToFPropDtype(self, value):
def _Cast(x):
if x is None:
return None
x = tf.convert_to_tensor(x)
if not x.dtype.is_floating:
return x
return tf.cast(x, py_utils.FPropDtype(self.params))
return tf.nest.map_structure(_Cast, value)
def IsLayerParams(x):
return (isinstance(x, hyperparams.InstantiableParams) and
issubclass(x.cls, BaseLayer))
| apache-2.0 |
Danisan/odoo-1 | addons/account_anglo_saxon/__init__.py | 436 | 1090 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import product
import stock
import purchase
import invoice
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
vismantic-ohtuprojekti/qualipy | qualipy/filters/pattern.py | 2 | 4606 | """
Filter for detecting pattern-like images.
The image is first turned into grayscale, after which discrete
fast fourier transformation is applied to construct the magnitude
spectrum of the image. Then frequencies which have intermediate
or low intensities are removed from the magnitude spectrum and
all frequencies with high intensity are intensified to the max
value. After this the distance from the center for each high
intensity frequency is calculated. From this set of distances
anomalies are removed by using the local outlier factor method.
The max from the set of distances is taken. This max distance is
then used as a radius for a circle, and all points outside this
circle in the magnitude spectrum are excluded and the density of
high frequencies is calculated. This density is used to estimate
how pattern-like the image is. Pattern-like images usually exhibit
smaller density than non-pattern-like images.
"""
import cv2
import numpy
from ..utils.image_utils import *
from ..utils.statistic_common import *
from filter import Filter
def distances_from_center(height, width):
"""Returns a matrix of distances from each element to the center of
a matrix of certain size.
:param height: height of the matrix
:type height: int
:param width: width of the matrix
:type width: int
:returns: numpy.ndarray -- the distance matrix
"""
yy, xx = numpy.mgrid[:height, :width]
return (xx - width / 2.0) ** 2 + (yy - height / 2.0) ** 2
def pattern_recognition(magnitude_spectrum):
"""Returns a prediction of how pattern-like an image is
:param magnitude_spectrum: magnitude spectrum of a two-color image
:type magnitude_spectrum: numpy.ndarray
:returns: float
"""
circle = distances_from_center(*magnitude_spectrum.shape)
mask = magnitude_spectrum > 0.7
all_distances = numpy.sqrt(circle[mask].flatten())
all_distances = remove_anomalies(all_distances, 0.4)
max_distances = get_max_values(all_distances, 20)
max_distance_avg = numpy.mean(max_distances)
# Calculate all points that don't fall into a circle
# with a radius of max_distance_avg, and exclude those
# from the mask calculated previously
donut = circle >= max_distance_avg ** 2
intense_points = numpy.sum(mask & numpy.logical_not(donut))
all_points = magnitude_spectrum.size - numpy.sum(donut)
return intense_points / float(all_points)
def scaled_prediction(prediction):
"""Scales the prediction
:param prediction: the prediction to scale
:type prediction: float
:returns: float
"""
if prediction < 0.05:
return 1.0
elif prediction > 0.4:
return 0.0
else:
return 1 - linear_normalize(prediction, 0.0, 0.4).item(0)
class Pattern(Filter):
"""Filter for detecting pattern-like images"""
name = 'pattern'
speed = 3
def __init__(self, threshold=0.5, invert_threshold=False):
"""Initializes a blurred image filter
:param threshold: threshold at which the given prediction is changed
from negative to positive
:type threshold: float
:param invert_threshold: whether the result should be greater than
the given threshold (default) or lower
for an image to be considered positive
:type invert_threshold: bool
"""
super(Pattern, self).__init__(threshold, invert_threshold)
def predict(self, image_path, return_boolean=True, ROI=None):
"""Predict if a given image is pattern-like
:param image_path: path to the image
:type image_path: str
:param return_boolean: whether to return the result as a
float between 0 and 1 or as a boolean
(threshold is given to the class)
:type return_boolean: bool
:param ROI: possible region of interest as a 4-tuple
(x0, y0, width, height), None if not needed
:returns: the prediction as a bool or float depending on the
return_boolean parameter
"""
image = read_color_image(image_path, ROI)
two_color_gray_image = cv2.cvtColor(reduce_colors(image, 2),
cv2.COLOR_BGR2GRAY)
magnitude_spectrum = count_magnitude_spectrum(two_color_gray_image)
prediction = scaled_prediction(pattern_recognition(magnitude_spectrum))
if return_boolean:
return self.boolean_result(prediction)
return prediction
| mit |
duducosmos/pgs4a | python-install/lib/python2.7/test/test_xmlrpc.py | 16 | 38637 | import base64
import datetime
import sys
import time
import unittest
import xmlrpclib
import SimpleXMLRPCServer
import mimetools
import httplib
import socket
import StringIO
import os
import re
from test import test_support
try:
import threading
except ImportError:
threading = None
try:
unicode
except NameError:
have_unicode = False
else:
have_unicode = True
alist = [{'astring': 'foo@bar.baz.spam',
'afloat': 7283.43,
'anint': 2**20,
'ashortlong': 2L,
'anotherlist': ['.zyx.41'],
'abase64': xmlrpclib.Binary("my dog has fleas"),
'boolean': xmlrpclib.False,
'unicode': u'\u4000\u6000\u8000',
u'ukey\u4000': 'regular value',
'datetime1': xmlrpclib.DateTime('20050210T11:41:23'),
'datetime2': xmlrpclib.DateTime(
(2005, 02, 10, 11, 41, 23, 0, 1, -1)),
'datetime3': xmlrpclib.DateTime(
datetime.datetime(2005, 02, 10, 11, 41, 23)),
}]
class XMLRPCTestCase(unittest.TestCase):
def test_dump_load(self):
self.assertEqual(alist,
xmlrpclib.loads(xmlrpclib.dumps((alist,)))[0][0])
def test_dump_bare_datetime(self):
# This checks that an unwrapped datetime.date object can be handled
# by the marshalling code. This can't be done via test_dump_load()
# since with use_datetime set to 1 the unmarshaller would create
# datetime objects for the 'datetime[123]' keys as well
dt = datetime.datetime(2005, 02, 10, 11, 41, 23)
s = xmlrpclib.dumps((dt,))
(newdt,), m = xmlrpclib.loads(s, use_datetime=1)
self.assertEqual(newdt, dt)
self.assertEqual(m, None)
(newdt,), m = xmlrpclib.loads(s, use_datetime=0)
self.assertEqual(newdt, xmlrpclib.DateTime('20050210T11:41:23'))
def test_datetime_before_1900(self):
# same as before but with a date before 1900
dt = datetime.datetime(1, 02, 10, 11, 41, 23)
s = xmlrpclib.dumps((dt,))
(newdt,), m = xmlrpclib.loads(s, use_datetime=1)
self.assertEqual(newdt, dt)
self.assertEqual(m, None)
(newdt,), m = xmlrpclib.loads(s, use_datetime=0)
self.assertEqual(newdt, xmlrpclib.DateTime('00010210T11:41:23'))
def test_cmp_datetime_DateTime(self):
now = datetime.datetime.now()
dt = xmlrpclib.DateTime(now.timetuple())
self.assertTrue(dt == now)
self.assertTrue(now == dt)
then = now + datetime.timedelta(seconds=4)
self.assertTrue(then >= dt)
self.assertTrue(dt < then)
def test_bug_1164912 (self):
d = xmlrpclib.DateTime()
((new_d,), dummy) = xmlrpclib.loads(xmlrpclib.dumps((d,),
methodresponse=True))
self.assertIsInstance(new_d.value, str)
# Check that the output of dumps() is still an 8-bit string
s = xmlrpclib.dumps((new_d,), methodresponse=True)
self.assertIsInstance(s, str)
def test_newstyle_class(self):
class T(object):
pass
t = T()
t.x = 100
t.y = "Hello"
((t2,), dummy) = xmlrpclib.loads(xmlrpclib.dumps((t,)))
self.assertEqual(t2, t.__dict__)
def test_dump_big_long(self):
self.assertRaises(OverflowError, xmlrpclib.dumps, (2L**99,))
def test_dump_bad_dict(self):
self.assertRaises(TypeError, xmlrpclib.dumps, ({(1,2,3): 1},))
def test_dump_recursive_seq(self):
l = [1,2,3]
t = [3,4,5,l]
l.append(t)
self.assertRaises(TypeError, xmlrpclib.dumps, (l,))
def test_dump_recursive_dict(self):
d = {'1':1, '2':1}
t = {'3':3, 'd':d}
d['t'] = t
self.assertRaises(TypeError, xmlrpclib.dumps, (d,))
def test_dump_big_int(self):
if sys.maxint > 2L**31-1:
self.assertRaises(OverflowError, xmlrpclib.dumps,
(int(2L**34),))
xmlrpclib.dumps((xmlrpclib.MAXINT, xmlrpclib.MININT))
self.assertRaises(OverflowError, xmlrpclib.dumps, (xmlrpclib.MAXINT+1,))
self.assertRaises(OverflowError, xmlrpclib.dumps, (xmlrpclib.MININT-1,))
def dummy_write(s):
pass
m = xmlrpclib.Marshaller()
m.dump_int(xmlrpclib.MAXINT, dummy_write)
m.dump_int(xmlrpclib.MININT, dummy_write)
self.assertRaises(OverflowError, m.dump_int, xmlrpclib.MAXINT+1, dummy_write)
self.assertRaises(OverflowError, m.dump_int, xmlrpclib.MININT-1, dummy_write)
def test_dump_none(self):
value = alist + [None]
arg1 = (alist + [None],)
strg = xmlrpclib.dumps(arg1, allow_none=True)
self.assertEqual(value,
xmlrpclib.loads(strg)[0][0])
self.assertRaises(TypeError, xmlrpclib.dumps, (arg1,))
def test_default_encoding_issues(self):
# SF bug #1115989: wrong decoding in '_stringify'
utf8 = """<?xml version='1.0' encoding='iso-8859-1'?>
<params>
<param><value>
<string>abc \x95</string>
</value></param>
<param><value>
<struct>
<member>
<name>def \x96</name>
<value><string>ghi \x97</string></value>
</member>
</struct>
</value></param>
</params>
"""
# sys.setdefaultencoding() normally doesn't exist after site.py is
# loaded. Import a temporary fresh copy to get access to it
# but then restore the original copy to avoid messing with
# other potentially modified sys module attributes
old_encoding = sys.getdefaultencoding()
with test_support.CleanImport('sys'):
import sys as temp_sys
temp_sys.setdefaultencoding("iso-8859-1")
try:
(s, d), m = xmlrpclib.loads(utf8)
finally:
temp_sys.setdefaultencoding(old_encoding)
items = d.items()
if have_unicode:
self.assertEqual(s, u"abc \x95")
self.assertIsInstance(s, unicode)
self.assertEqual(items, [(u"def \x96", u"ghi \x97")])
self.assertIsInstance(items[0][0], unicode)
self.assertIsInstance(items[0][1], unicode)
else:
self.assertEqual(s, "abc \xc2\x95")
self.assertEqual(items, [("def \xc2\x96", "ghi \xc2\x97")])
class HelperTestCase(unittest.TestCase):
def test_escape(self):
self.assertEqual(xmlrpclib.escape("a&b"), "a&b")
self.assertEqual(xmlrpclib.escape("a<b"), "a<b")
self.assertEqual(xmlrpclib.escape("a>b"), "a>b")
class FaultTestCase(unittest.TestCase):
def test_repr(self):
f = xmlrpclib.Fault(42, 'Test Fault')
self.assertEqual(repr(f), "<Fault 42: 'Test Fault'>")
self.assertEqual(repr(f), str(f))
def test_dump_fault(self):
f = xmlrpclib.Fault(42, 'Test Fault')
s = xmlrpclib.dumps((f,))
(newf,), m = xmlrpclib.loads(s)
self.assertEqual(newf, {'faultCode': 42, 'faultString': 'Test Fault'})
self.assertEqual(m, None)
s = xmlrpclib.Marshaller().dumps(f)
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, s)
class DateTimeTestCase(unittest.TestCase):
def test_default(self):
t = xmlrpclib.DateTime()
def test_time(self):
d = 1181399930.036952
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), time.strftime("%Y%m%dT%H:%M:%S", time.localtime(d)))
def test_time_tuple(self):
d = (2007,6,9,10,38,50,5,160,0)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), '20070609T10:38:50')
def test_time_struct(self):
d = time.localtime(1181399930.036952)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), time.strftime("%Y%m%dT%H:%M:%S", d))
def test_datetime_datetime(self):
d = datetime.datetime(2007,1,2,3,4,5)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), '20070102T03:04:05')
def test_repr(self):
d = datetime.datetime(2007,1,2,3,4,5)
t = xmlrpclib.DateTime(d)
val ="<DateTime '20070102T03:04:05' at %x>" % id(t)
self.assertEqual(repr(t), val)
def test_decode(self):
d = ' 20070908T07:11:13 '
t1 = xmlrpclib.DateTime()
t1.decode(d)
tref = xmlrpclib.DateTime(datetime.datetime(2007,9,8,7,11,13))
self.assertEqual(t1, tref)
t2 = xmlrpclib._datetime(d)
self.assertEqual(t1, tref)
class BinaryTestCase(unittest.TestCase):
def test_default(self):
t = xmlrpclib.Binary()
self.assertEqual(str(t), '')
def test_string(self):
d = '\x01\x02\x03abc123\xff\xfe'
t = xmlrpclib.Binary(d)
self.assertEqual(str(t), d)
def test_decode(self):
d = '\x01\x02\x03abc123\xff\xfe'
de = base64.encodestring(d)
t1 = xmlrpclib.Binary()
t1.decode(de)
self.assertEqual(str(t1), d)
t2 = xmlrpclib._binary(de)
self.assertEqual(str(t2), d)
ADDR = PORT = URL = None
# The evt is set twice. First when the server is ready to serve.
# Second when the server has been shutdown. The user must clear
# the event after it has been set the first time to catch the second set.
def http_server(evt, numrequests, requestHandler=None):
class TestInstanceClass:
def div(self, x, y):
return x // y
def _methodHelp(self, name):
if name == 'div':
return 'This is the div function'
def my_function():
'''This is my function'''
return True
class MyXMLRPCServer(SimpleXMLRPCServer.SimpleXMLRPCServer):
def get_request(self):
# Ensure the socket is always non-blocking. On Linux, socket
# attributes are not inherited like they are on *BSD and Windows.
s, port = self.socket.accept()
s.setblocking(True)
return s, port
if not requestHandler:
requestHandler = SimpleXMLRPCServer.SimpleXMLRPCRequestHandler
serv = MyXMLRPCServer(("localhost", 0), requestHandler,
logRequests=False, bind_and_activate=False)
try:
serv.socket.settimeout(3)
serv.server_bind()
global ADDR, PORT, URL
ADDR, PORT = serv.socket.getsockname()
#connect to IP address directly. This avoids socket.create_connection()
#trying to connect to to "localhost" using all address families, which
#causes slowdown e.g. on vista which supports AF_INET6. The server listens
#on AF_INET only.
URL = "http://%s:%d"%(ADDR, PORT)
serv.server_activate()
serv.register_introspection_functions()
serv.register_multicall_functions()
serv.register_function(pow)
serv.register_function(lambda x,y: x+y, 'add')
serv.register_function(my_function)
serv.register_instance(TestInstanceClass())
evt.set()
# handle up to 'numrequests' requests
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.socket.close()
PORT = None
evt.set()
def http_multi_server(evt, numrequests, requestHandler=None):
class TestInstanceClass:
def div(self, x, y):
return x // y
def _methodHelp(self, name):
if name == 'div':
return 'This is the div function'
def my_function():
'''This is my function'''
return True
class MyXMLRPCServer(SimpleXMLRPCServer.MultiPathXMLRPCServer):
def get_request(self):
# Ensure the socket is always non-blocking. On Linux, socket
# attributes are not inherited like they are on *BSD and Windows.
s, port = self.socket.accept()
s.setblocking(True)
return s, port
if not requestHandler:
requestHandler = SimpleXMLRPCServer.SimpleXMLRPCRequestHandler
class MyRequestHandler(requestHandler):
rpc_paths = []
serv = MyXMLRPCServer(("localhost", 0), MyRequestHandler,
logRequests=False, bind_and_activate=False)
serv.socket.settimeout(3)
serv.server_bind()
try:
global ADDR, PORT, URL
ADDR, PORT = serv.socket.getsockname()
#connect to IP address directly. This avoids socket.create_connection()
#trying to connect to to "localhost" using all address families, which
#causes slowdown e.g. on vista which supports AF_INET6. The server listens
#on AF_INET only.
URL = "http://%s:%d"%(ADDR, PORT)
serv.server_activate()
paths = ["/foo", "/foo/bar"]
for path in paths:
d = serv.add_dispatcher(path, SimpleXMLRPCServer.SimpleXMLRPCDispatcher())
d.register_introspection_functions()
d.register_multicall_functions()
serv.get_dispatcher(paths[0]).register_function(pow)
serv.get_dispatcher(paths[1]).register_function(lambda x,y: x+y, 'add')
evt.set()
# handle up to 'numrequests' requests
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.socket.close()
PORT = None
evt.set()
# This function prevents errors like:
# <ProtocolError for localhost:57527/RPC2: 500 Internal Server Error>
def is_unavailable_exception(e):
'''Returns True if the given ProtocolError is the product of a server-side
exception caused by the 'temporarily unavailable' response sometimes
given by operations on non-blocking sockets.'''
# sometimes we get a -1 error code and/or empty headers
try:
if e.errcode == -1 or e.headers is None:
return True
exc_mess = e.headers.get('X-exception')
except AttributeError:
# Ignore socket.errors here.
exc_mess = str(e)
if exc_mess and 'temporarily unavailable' in exc_mess.lower():
return True
return False
@unittest.skipUnless(threading, 'Threading required for this test.')
class BaseServerTestCase(unittest.TestCase):
requestHandler = None
request_count = 1
threadFunc = staticmethod(http_server)
def setUp(self):
# enable traceback reporting
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = True
self.evt = threading.Event()
# start server thread to handle requests
serv_args = (self.evt, self.request_count, self.requestHandler)
threading.Thread(target=self.threadFunc, args=serv_args).start()
# wait for the server to be ready
self.evt.wait(10)
self.evt.clear()
def tearDown(self):
# wait on the server thread to terminate
self.evt.wait(10)
# disable traceback reporting
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = False
# NOTE: The tests in SimpleServerTestCase will ignore failures caused by
# "temporarily unavailable" exceptions raised in SimpleXMLRPCServer. This
# condition occurs infrequently on some platforms, frequently on others, and
# is apparently caused by using SimpleXMLRPCServer with a non-blocking socket
# If the server class is updated at some point in the future to handle this
# situation more gracefully, these tests should be modified appropriately.
class SimpleServerTestCase(BaseServerTestCase):
def test_simple1(self):
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.pow(6,8), 6**8)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_nonascii(self):
start_string = 'P\N{LATIN SMALL LETTER Y WITH CIRCUMFLEX}t'
end_string = 'h\N{LATIN SMALL LETTER O WITH HORN}n'
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.add(start_string, end_string),
start_string + end_string)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket unavailable errors.
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
# [ch] The test 404 is causing lots of false alarms.
def XXXtest_404(self):
# send POST with httplib, it should return 404 header and
# 'Not Found' message.
conn = httplib.HTTPConnection(ADDR, PORT)
conn.request('POST', '/this-is-not-valid')
response = conn.getresponse()
conn.close()
self.assertEqual(response.status, 404)
self.assertEqual(response.reason, 'Not Found')
def test_introspection1(self):
try:
p = xmlrpclib.ServerProxy(URL)
meth = p.system.listMethods()
expected_methods = set(['pow', 'div', 'my_function', 'add',
'system.listMethods', 'system.methodHelp',
'system.methodSignature', 'system.multicall'])
self.assertEqual(set(meth), expected_methods)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection2(self):
try:
# test _methodHelp()
p = xmlrpclib.ServerProxy(URL)
divhelp = p.system.methodHelp('div')
self.assertEqual(divhelp, 'This is the div function')
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_introspection3(self):
try:
# test native doc
p = xmlrpclib.ServerProxy(URL)
myfunction = p.system.methodHelp('my_function')
self.assertEqual(myfunction, 'This is my function')
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection4(self):
# the SimpleXMLRPCServer doesn't support signatures, but
# at least check that we can try making the call
try:
p = xmlrpclib.ServerProxy(URL)
divsig = p.system.methodSignature('div')
self.assertEqual(divsig, 'signatures not supported')
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_multicall(self):
try:
p = xmlrpclib.ServerProxy(URL)
multicall = xmlrpclib.MultiCall(p)
multicall.add(2,3)
multicall.pow(6,8)
multicall.div(127,42)
add_result, pow_result, div_result = multicall()
self.assertEqual(add_result, 2+3)
self.assertEqual(pow_result, 6**8)
self.assertEqual(div_result, 127//42)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_non_existing_multicall(self):
try:
p = xmlrpclib.ServerProxy(URL)
multicall = xmlrpclib.MultiCall(p)
multicall.this_is_not_exists()
result = multicall()
# result.results contains;
# [{'faultCode': 1, 'faultString': '<type \'exceptions.Exception\'>:'
# 'method "this_is_not_exists" is not supported'>}]
self.assertEqual(result.results[0]['faultCode'], 1)
self.assertEqual(result.results[0]['faultString'],
'<type \'exceptions.Exception\'>:method "this_is_not_exists" '
'is not supported')
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_dotted_attribute(self):
# Raises an AttributeError because private methods are not allowed.
self.assertRaises(AttributeError,
SimpleXMLRPCServer.resolve_dotted_attribute, str, '__add')
self.assertTrue(SimpleXMLRPCServer.resolve_dotted_attribute(str, 'title'))
# Get the test to run faster by sending a request with test_simple1.
# This avoids waiting for the socket timeout.
self.test_simple1()
class MultiPathServerTestCase(BaseServerTestCase):
threadFunc = staticmethod(http_multi_server)
request_count = 2
def test_path1(self):
p = xmlrpclib.ServerProxy(URL+"/foo")
self.assertEqual(p.pow(6,8), 6**8)
self.assertRaises(xmlrpclib.Fault, p.add, 6, 8)
def test_path2(self):
p = xmlrpclib.ServerProxy(URL+"/foo/bar")
self.assertEqual(p.add(6,8), 6+8)
self.assertRaises(xmlrpclib.Fault, p.pow, 6, 8)
#A test case that verifies that a server using the HTTP/1.1 keep-alive mechanism
#does indeed serve subsequent requests on the same connection
class BaseKeepaliveServerTestCase(BaseServerTestCase):
#a request handler that supports keep-alive and logs requests into a
#class variable
class RequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
parentClass = SimpleXMLRPCServer.SimpleXMLRPCRequestHandler
protocol_version = 'HTTP/1.1'
myRequests = []
def handle(self):
self.myRequests.append([])
self.reqidx = len(self.myRequests)-1
return self.parentClass.handle(self)
def handle_one_request(self):
result = self.parentClass.handle_one_request(self)
self.myRequests[self.reqidx].append(self.raw_requestline)
return result
requestHandler = RequestHandler
def setUp(self):
#clear request log
self.RequestHandler.myRequests = []
return BaseServerTestCase.setUp(self)
#A test case that verifies that a server using the HTTP/1.1 keep-alive mechanism
#does indeed serve subsequent requests on the same connection
class KeepaliveServerTestCase1(BaseKeepaliveServerTestCase):
def test_two(self):
p = xmlrpclib.ServerProxy(URL)
#do three requests.
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
#they should have all been handled by a single request handler
self.assertEqual(len(self.RequestHandler.myRequests), 1)
#check that we did at least two (the third may be pending append
#due to thread scheduling)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-1]), 2)
#test special attribute access on the serverproxy, through the __call__
#function.
class KeepaliveServerTestCase2(BaseKeepaliveServerTestCase):
#ask for two keepalive requests to be handled.
request_count=2
def test_close(self):
p = xmlrpclib.ServerProxy(URL)
#do some requests with close.
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
p("close")() #this should trigger a new keep-alive request
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
#they should have all been two request handlers, each having logged at least
#two complete requests
self.assertEqual(len(self.RequestHandler.myRequests), 2)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-1]), 2)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-2]), 2)
def test_transport(self):
p = xmlrpclib.ServerProxy(URL)
#do some requests with close.
self.assertEqual(p.pow(6,8), 6**8)
p("transport").close() #same as above, really.
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(len(self.RequestHandler.myRequests), 2)
#A test case that verifies that gzip encoding works in both directions
#(for a request and the response)
class GzipServerTestCase(BaseServerTestCase):
#a request handler that supports keep-alive and logs requests into a
#class variable
class RequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
parentClass = SimpleXMLRPCServer.SimpleXMLRPCRequestHandler
protocol_version = 'HTTP/1.1'
def do_POST(self):
#store content of last request in class
self.__class__.content_length = int(self.headers["content-length"])
return self.parentClass.do_POST(self)
requestHandler = RequestHandler
class Transport(xmlrpclib.Transport):
#custom transport, stores the response length for our perusal
fake_gzip = False
def parse_response(self, response):
self.response_length=int(response.getheader("content-length", 0))
return xmlrpclib.Transport.parse_response(self, response)
def send_content(self, connection, body):
if self.fake_gzip:
#add a lone gzip header to induce decode error remotely
connection.putheader("Content-Encoding", "gzip")
return xmlrpclib.Transport.send_content(self, connection, body)
def setUp(self):
BaseServerTestCase.setUp(self)
def test_gzip_request(self):
t = self.Transport()
t.encode_threshold = None
p = xmlrpclib.ServerProxy(URL, transport=t)
self.assertEqual(p.pow(6,8), 6**8)
a = self.RequestHandler.content_length
t.encode_threshold = 0 #turn on request encoding
self.assertEqual(p.pow(6,8), 6**8)
b = self.RequestHandler.content_length
self.assertTrue(a>b)
def test_bad_gzip_request(self):
t = self.Transport()
t.encode_threshold = None
t.fake_gzip = True
p = xmlrpclib.ServerProxy(URL, transport=t)
cm = self.assertRaisesRegexp(xmlrpclib.ProtocolError,
re.compile(r"\b400\b"))
with cm:
p.pow(6, 8)
def test_gsip_response(self):
t = self.Transport()
p = xmlrpclib.ServerProxy(URL, transport=t)
old = self.requestHandler.encode_threshold
self.requestHandler.encode_threshold = None #no encoding
self.assertEqual(p.pow(6,8), 6**8)
a = t.response_length
self.requestHandler.encode_threshold = 0 #always encode
self.assertEqual(p.pow(6,8), 6**8)
b = t.response_length
self.requestHandler.encode_threshold = old
self.assertTrue(a>b)
#Test special attributes of the ServerProxy object
class ServerProxyTestCase(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
if threading:
self.url = URL
else:
# Without threading, http_server() and http_multi_server() will not
# be executed and URL is still equal to None. 'http://' is a just
# enough to choose the scheme (HTTP)
self.url = 'http://'
def test_close(self):
p = xmlrpclib.ServerProxy(self.url)
self.assertEqual(p('close')(), None)
def test_transport(self):
t = xmlrpclib.Transport()
p = xmlrpclib.ServerProxy(self.url, transport=t)
self.assertEqual(p('transport'), t)
# This is a contrived way to make a failure occur on the server side
# in order to test the _send_traceback_header flag on the server
class FailingMessageClass(mimetools.Message):
def __getitem__(self, key):
key = key.lower()
if key == 'content-length':
return 'I am broken'
return mimetools.Message.__getitem__(self, key)
@unittest.skipUnless(threading, 'Threading required for this test.')
class FailingServerTestCase(unittest.TestCase):
def setUp(self):
self.evt = threading.Event()
# start server thread to handle requests
serv_args = (self.evt, 1)
threading.Thread(target=http_server, args=serv_args).start()
# wait for the server to be ready
self.evt.wait()
self.evt.clear()
def tearDown(self):
# wait on the server thread to terminate
self.evt.wait()
# reset flag
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = False
# reset message class
SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.MessageClass = mimetools.Message
def test_basic(self):
# check that flag is false by default
flagval = SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header
self.assertEqual(flagval, False)
# enable traceback reporting
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = True
# test a call that shouldn't fail just as a smoke test
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.pow(6,8), 6**8)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_fail_no_info(self):
# use the broken message class
SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.MessageClass = FailingMessageClass
try:
p = xmlrpclib.ServerProxy(URL)
p.pow(6,8)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e) and hasattr(e, "headers"):
# The two server-side error headers shouldn't be sent back in this case
self.assertTrue(e.headers.get("X-exception") is None)
self.assertTrue(e.headers.get("X-traceback") is None)
else:
self.fail('ProtocolError not raised')
def test_fail_with_info(self):
# use the broken message class
SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.MessageClass = FailingMessageClass
# Check that errors in the server send back exception/traceback
# info when flag is set
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = True
try:
p = xmlrpclib.ServerProxy(URL)
p.pow(6,8)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e) and hasattr(e, "headers"):
# We should get error info in the response
expected_err = "invalid literal for int() with base 10: 'I am broken'"
self.assertEqual(e.headers.get("x-exception"), expected_err)
self.assertTrue(e.headers.get("x-traceback") is not None)
else:
self.fail('ProtocolError not raised')
class CGIHandlerTestCase(unittest.TestCase):
def setUp(self):
self.cgi = SimpleXMLRPCServer.CGIXMLRPCRequestHandler()
def tearDown(self):
self.cgi = None
def test_cgi_get(self):
with test_support.EnvironmentVarGuard() as env:
env['REQUEST_METHOD'] = 'GET'
# if the method is GET and no request_text is given, it runs handle_get
# get sysout output
with test_support.captured_stdout() as data_out:
self.cgi.handle_request()
# parse Status header
data_out.seek(0)
handle = data_out.read()
status = handle.split()[1]
message = ' '.join(handle.split()[2:4])
self.assertEqual(status, '400')
self.assertEqual(message, 'Bad Request')
def test_cgi_xmlrpc_response(self):
data = """<?xml version='1.0'?>
<methodCall>
<methodName>test_method</methodName>
<params>
<param>
<value><string>foo</string></value>
</param>
<param>
<value><string>bar</string></value>
</param>
</params>
</methodCall>
"""
with test_support.EnvironmentVarGuard() as env, \
test_support.captured_stdout() as data_out, \
test_support.captured_stdin() as data_in:
data_in.write(data)
data_in.seek(0)
env['CONTENT_LENGTH'] = str(len(data))
self.cgi.handle_request()
data_out.seek(0)
# will respond exception, if so, our goal is achieved ;)
handle = data_out.read()
# start with 44th char so as not to get http header, we just need only xml
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, handle[44:])
# Also test the content-length returned by handle_request
# Using the same test method inorder to avoid all the datapassing
# boilerplate code.
# Test for bug: http://bugs.python.org/issue5040
content = handle[handle.find("<?xml"):]
self.assertEqual(
int(re.search('Content-Length: (\d+)', handle).group(1)),
len(content))
class FakeSocket:
def __init__(self):
self.data = StringIO.StringIO()
def send(self, buf):
self.data.write(buf)
return len(buf)
def sendall(self, buf):
self.data.write(buf)
def getvalue(self):
return self.data.getvalue()
def makefile(self, x='r', y=-1):
raise RuntimeError
def close(self):
pass
class FakeTransport(xmlrpclib.Transport):
"""A Transport instance that records instead of sending a request.
This class replaces the actual socket used by httplib with a
FakeSocket object that records the request. It doesn't provide a
response.
"""
def make_connection(self, host):
conn = xmlrpclib.Transport.make_connection(self, host)
conn.sock = self.fake_socket = FakeSocket()
return conn
class TransportSubclassTestCase(unittest.TestCase):
def issue_request(self, transport_class):
"""Return an HTTP request made via transport_class."""
transport = transport_class()
proxy = xmlrpclib.ServerProxy("http://example.com/",
transport=transport)
try:
proxy.pow(6, 8)
except RuntimeError:
return transport.fake_socket.getvalue()
return None
def test_custom_user_agent(self):
class TestTransport(FakeTransport):
def send_user_agent(self, conn):
xmlrpclib.Transport.send_user_agent(self, conn)
conn.putheader("X-Test", "test_custom_user_agent")
req = self.issue_request(TestTransport)
self.assertIn("X-Test: test_custom_user_agent\r\n", req)
def test_send_host(self):
class TestTransport(FakeTransport):
def send_host(self, conn, host):
xmlrpclib.Transport.send_host(self, conn, host)
conn.putheader("X-Test", "test_send_host")
req = self.issue_request(TestTransport)
self.assertIn("X-Test: test_send_host\r\n", req)
def test_send_request(self):
class TestTransport(FakeTransport):
def send_request(self, conn, url, body):
xmlrpclib.Transport.send_request(self, conn, url, body)
conn.putheader("X-Test", "test_send_request")
req = self.issue_request(TestTransport)
self.assertIn("X-Test: test_send_request\r\n", req)
def test_send_content(self):
class TestTransport(FakeTransport):
def send_content(self, conn, body):
conn.putheader("X-Test", "test_send_content")
xmlrpclib.Transport.send_content(self, conn, body)
req = self.issue_request(TestTransport)
self.assertIn("X-Test: test_send_content\r\n", req)
@test_support.reap_threads
def test_main():
xmlrpc_tests = [XMLRPCTestCase, HelperTestCase, DateTimeTestCase,
BinaryTestCase, FaultTestCase, TransportSubclassTestCase]
xmlrpc_tests.append(SimpleServerTestCase)
xmlrpc_tests.append(KeepaliveServerTestCase1)
xmlrpc_tests.append(KeepaliveServerTestCase2)
try:
import gzip
xmlrpc_tests.append(GzipServerTestCase)
except ImportError:
pass #gzip not supported in this build
xmlrpc_tests.append(MultiPathServerTestCase)
xmlrpc_tests.append(ServerProxyTestCase)
xmlrpc_tests.append(FailingServerTestCase)
xmlrpc_tests.append(CGIHandlerTestCase)
test_support.run_unittest(*xmlrpc_tests)
if __name__ == "__main__":
test_main()
| lgpl-2.1 |
sanjuro/RCJK | vendor/gdata/docs/service.py | 16 | 22871 | #!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DocsService extends the GDataService to streamline Google Documents
operations.
DocsService: Provides methods to query feeds and manipulate items.
Extends GDataService.
DocumentQuery: Queries a Google Document list feed.
DocumentAclQuery: Queries a Google Document Acl feed.
"""
__author__ = ('api.jfisher (Jeff Fisher), '
'e.bidelman (Eric Bidelman)')
import re
import atom
import gdata.service
import gdata.docs
import urllib
# XML Namespaces used in Google Documents entities.
DATA_KIND_SCHEME = gdata.GDATA_NAMESPACE + '#kind'
DOCUMENT_LABEL = 'document'
SPREADSHEET_LABEL = 'spreadsheet'
PRESENTATION_LABEL = 'presentation'
FOLDER_LABEL = 'folder'
PDF_LABEL = 'pdf'
LABEL_SCHEME = gdata.GDATA_NAMESPACE + '/labels'
STARRED_LABEL_TERM = LABEL_SCHEME + '#starred'
TRASHED_LABEL_TERM = LABEL_SCHEME + '#trashed'
HIDDEN_LABEL_TERM = LABEL_SCHEME + '#hidden'
MINE_LABEL_TERM = LABEL_SCHEME + '#mine'
PRIVATE_LABEL_TERM = LABEL_SCHEME + '#private'
SHARED_WITH_DOMAIN_LABEL_TERM = LABEL_SCHEME + '#shared-with-domain'
VIEWED_LABEL_TERM = LABEL_SCHEME + '#viewed'
FOLDERS_SCHEME_PREFIX = gdata.docs.DOCUMENTS_NAMESPACE + '/folders/'
# File extensions of documents that are permitted to be uploaded or downloaded.
SUPPORTED_FILETYPES = {
'CSV': 'text/csv',
'TSV': 'text/tab-separated-values',
'TAB': 'text/tab-separated-values',
'DOC': 'application/msword',
'DOCX': ('application/vnd.openxmlformats-officedocument.'
'wordprocessingml.document'),
'ODS': 'application/x-vnd.oasis.opendocument.spreadsheet',
'ODT': 'application/vnd.oasis.opendocument.text',
'RTF': 'application/rtf',
'SXW': 'application/vnd.sun.xml.writer',
'TXT': 'text/plain',
'XLS': 'application/vnd.ms-excel',
'XLSX': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'PDF': 'application/pdf',
'PNG': 'image/png',
'PPT': 'application/vnd.ms-powerpoint',
'PPS': 'application/vnd.ms-powerpoint',
'HTM': 'text/html',
'HTML': 'text/html',
'ZIP': 'application/zip',
'SWF': 'application/x-shockwave-flash'
}
class DocsService(gdata.service.GDataService):
"""Client extension for the Google Documents service Document List feed."""
__FILE_EXT_PATTERN = re.compile('.*\.([a-zA-Z]{3,}$)')
__RESOURCE_ID_PATTERN = re.compile('^([a-z]*)(:|%3A)([\w-]*)$')
def __init__(self, email=None, password=None, source=None,
server='docs.google.com', additional_headers=None, **kwargs):
"""Creates a client for the Google Documents service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'docs.google.com'.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
gdata.service.GDataService.__init__(
self, email=email, password=password, service='writely', source=source,
server=server, additional_headers=additional_headers, **kwargs)
def _MakeKindCategory(self, label):
if label is None:
return None
return atom.Category(scheme=DATA_KIND_SCHEME,
term=gdata.docs.DOCUMENTS_NAMESPACE + '#' + label, label=label)
def _MakeContentLinkFromId(self, resource_id):
match = self.__RESOURCE_ID_PATTERN.match(resource_id)
label = match.group(1)
doc_id = match.group(3)
if label == DOCUMENT_LABEL:
return '/feeds/download/documents/Export?docId=%s' % doc_id
if label == PRESENTATION_LABEL:
return '/feeds/download/presentations/Export?docId=%s' % doc_id
if label == SPREADSHEET_LABEL:
return ('http://spreadsheets.google.com/feeds/download/spreadsheets/'
'Export?key=%s' % doc_id)
raise ValueError, 'Invalid resource id: %s' % resource_id
def _UploadFile(self, media_source, title, category, folder_or_uri=None):
"""Uploads a file to the Document List feed.
Args:
media_source: A gdata.MediaSource object containing the file to be
uploaded.
title: string The title of the document on the server after being
uploaded.
category: An atom.Category object specifying the appropriate document
type.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
Returns:
A DocumentListEntry containing information about the document created on
the Google Documents service.
"""
if folder_or_uri:
try:
uri = folder_or_uri.content.src
except AttributeError:
uri = folder_or_uri
else:
uri = '/feeds/documents/private/full'
entry = gdata.docs.DocumentListEntry()
entry.title = atom.Title(text=title)
if category is not None:
entry.category.append(category)
entry = self.Post(entry, uri, media_source=media_source,
extra_headers={'Slug': media_source.file_name},
converter=gdata.docs.DocumentListEntryFromString)
return entry
def _DownloadFile(self, uri, file_path):
"""Downloads a file.
Args:
uri: string The full Export URL to download the file from.
file_path: string The full path to save the file to.
Raises:
RequestError: on error response from server.
"""
server_response = self.request('GET', uri)
response_body = server_response.read()
if server_response.status != 200:
raise gdata.service.RequestError, {'status': server_response.status,
'reason': server_response.reason,
'body': response_body}
f = open(file_path, 'wb')
f.write(response_body)
f.flush()
f.close()
def MoveIntoFolder(self, source_entry, folder_entry):
"""Moves a document into a folder in the Document List Feed.
Args:
source_entry: DocumentListEntry An object representing the source
document/folder.
folder_entry: DocumentListEntry An object with a link to the destination
folder.
Returns:
A DocumentListEntry containing information about the document created on
the Google Documents service.
"""
entry = gdata.docs.DocumentListEntry()
entry.id = source_entry.id
entry = self.Post(entry, folder_entry.content.src,
converter=gdata.docs.DocumentListEntryFromString)
return entry
def Query(self, uri, converter=gdata.docs.DocumentListFeedFromString):
"""Queries the Document List feed and returns the resulting feed of
entries.
Args:
uri: string The full URI to be queried. This can contain query
parameters, a hostname, or simply the relative path to a Document
List feed. The DocumentQuery object is useful when constructing
query parameters.
converter: func (optional) A function which will be executed on the
retrieved item, generally to render it into a Python object.
By default the DocumentListFeedFromString function is used to
return a DocumentListFeed object. This is because most feed
queries will result in a feed and not a single entry.
"""
return self.Get(uri, converter=converter)
def QueryDocumentListFeed(self, uri):
"""Retrieves a DocumentListFeed by retrieving a URI based off the Document
List feed, including any query parameters. A DocumentQuery object can
be used to construct these parameters.
Args:
uri: string The URI of the feed being retrieved possibly with query
parameters.
Returns:
A DocumentListFeed object representing the feed returned by the server.
"""
return self.Get(uri, converter=gdata.docs.DocumentListFeedFromString)
def GetDocumentListEntry(self, uri):
"""Retrieves a particular DocumentListEntry by its unique URI.
Args:
uri: string The unique URI of an entry in a Document List feed.
Returns:
A DocumentListEntry object representing the retrieved entry.
"""
return self.Get(uri, converter=gdata.docs.DocumentListEntryFromString)
def GetDocumentListFeed(self, uri=None):
"""Retrieves a feed containing all of a user's documents.
Args:
uri: string A full URI to query the Document List feed.
"""
if not uri:
uri = gdata.docs.service.DocumentQuery().ToUri()
return self.QueryDocumentListFeed(uri)
def GetDocumentListAclEntry(self, uri):
"""Retrieves a particular DocumentListAclEntry by its unique URI.
Args:
uri: string The unique URI of an entry in a Document List feed.
Returns:
A DocumentListAclEntry object representing the retrieved entry.
"""
return self.Get(uri, converter=gdata.docs.DocumentListAclEntryFromString)
def GetDocumentListAclFeed(self, uri):
"""Retrieves a feed containing all of a user's documents.
Args:
uri: string The URI of a document's Acl feed to retrieve.
Returns:
A DocumentListAclFeed object representing the ACL feed
returned by the server.
"""
return self.Get(uri, converter=gdata.docs.DocumentListAclFeedFromString)
def Upload(self, media_source, title, folder_or_uri=None, label=None):
"""Uploads a document inside of a MediaSource object to the Document List
feed with the given title.
Args:
media_source: MediaSource The gdata.MediaSource object containing a
document file to be uploaded.
title: string The title of the document on the server after being
uploaded.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
label: optional label describing the type of the document to be created.
Returns:
A DocumentListEntry containing information about the document created
on the Google Documents service.
"""
return self._UploadFile(media_source, title, self._MakeKindCategory(label),
folder_or_uri)
def Download(self, entry_or_id_or_url, file_path, export_format=None,
gid=None, extra_params=None):
"""Downloads a document from the Document List.
Args:
entry_or_id_or_url: a DocumentListEntry, or the resource id of an entry,
or a url to download from (such as the content src).
file_path: string The full path to save the file to.
export_format: the format to convert to, if conversion is required.
gid: grid id, for downloading a single grid of a spreadsheet
extra_params: a map of any further parameters to control how the document
is downloaded
Raises:
RequestError if the service does not respond with success
"""
if isinstance(entry_or_id_or_url, gdata.docs.DocumentListEntry):
url = entry_or_id_or_url.content.src
else:
if self.__RESOURCE_ID_PATTERN.match(entry_or_id_or_url):
url = self._MakeContentLinkFromId(entry_or_id_or_url)
else:
url = entry_or_id_or_url
if export_format is not None:
if url.find('/Export?') == -1:
raise gdata.service.Error, ('This entry cannot be exported '
'as a different format')
url += '&exportFormat=%s' % export_format
if gid is not None:
if url.find('spreadsheets') == -1:
raise gdata.service.Error, 'grid id param is not valid for this entry'
url += '&gid=%s' % gid
if extra_params:
url += '&' + urllib.urlencode(extra_params)
self._DownloadFile(url, file_path)
def Export(self, entry_or_id_or_url, file_path, gid=None, extra_params=None):
"""Downloads a document from the Document List in a different format.
Args:
entry_or_id_or_url: a DocumentListEntry, or the resource id of an entry,
or a url to download from (such as the content src).
file_path: string The full path to save the file to. The export
format is inferred from the the file extension.
gid: grid id, for downloading a single grid of a spreadsheet
extra_params: a map of any further parameters to control how the document
is downloaded
Raises:
RequestError if the service does not respond with success
"""
ext = None
match = self.__FILE_EXT_PATTERN.match(file_path)
if match:
ext = match.group(1)
self.Download(entry_or_id_or_url, file_path, ext, gid, extra_params)
def CreateFolder(self, title, folder_or_uri=None):
"""Creates a folder in the Document List feed.
Args:
title: string The title of the folder on the server after being created.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
Returns:
A DocumentListEntry containing information about the folder created on
the Google Documents service.
"""
if folder_or_uri:
try:
uri = folder_or_uri.content.src
except AttributeError:
uri = folder_or_uri
else:
uri = '/feeds/documents/private/full'
folder_entry = gdata.docs.DocumentListEntry()
folder_entry.title = atom.Title(text=title)
folder_entry.category.append(self._MakeKindCategory(FOLDER_LABEL))
folder_entry = self.Post(folder_entry, uri,
converter=gdata.docs.DocumentListEntryFromString)
return folder_entry
def MoveOutOfFolder(self, source_entry):
"""Moves a document into a folder in the Document List Feed.
Args:
source_entry: DocumentListEntry An object representing the source
document/folder.
Returns:
True if the entry was moved out.
"""
return self.Delete(source_entry.GetEditLink().href)
# Deprecated methods
#@atom.deprecated('Please use Upload instead')
def UploadPresentation(self, media_source, title, folder_or_uri=None):
"""Uploads a presentation inside of a MediaSource object to the Document
List feed with the given title.
This method is deprecated, use Upload instead.
Args:
media_source: MediaSource The MediaSource object containing a
presentation file to be uploaded.
title: string The title of the presentation on the server after being
uploaded.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
Returns:
A DocumentListEntry containing information about the presentation created
on the Google Documents service.
"""
return self._UploadFile(
media_source, title, self._MakeKindCategory(PRESENTATION_LABEL),
folder_or_uri=folder_or_uri)
UploadPresentation = atom.deprecated('Please use Upload instead')(
UploadPresentation)
#@atom.deprecated('Please use Upload instead')
def UploadSpreadsheet(self, media_source, title, folder_or_uri=None):
"""Uploads a spreadsheet inside of a MediaSource object to the Document
List feed with the given title.
This method is deprecated, use Upload instead.
Args:
media_source: MediaSource The MediaSource object containing a spreadsheet
file to be uploaded.
title: string The title of the spreadsheet on the server after being
uploaded.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
Returns:
A DocumentListEntry containing information about the spreadsheet created
on the Google Documents service.
"""
return self._UploadFile(
media_source, title, self._MakeKindCategory(SPREADSHEET_LABEL),
folder_or_uri=folder_or_uri)
UploadSpreadsheet = atom.deprecated('Please use Upload instead')(
UploadSpreadsheet)
#@atom.deprecated('Please use Upload instead')
def UploadDocument(self, media_source, title, folder_or_uri=None):
"""Uploads a document inside of a MediaSource object to the Document List
feed with the given title.
This method is deprecated, use Upload instead.
Args:
media_source: MediaSource The gdata.MediaSource object containing a
document file to be uploaded.
title: string The title of the document on the server after being
uploaded.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
Returns:
A DocumentListEntry containing information about the document created
on the Google Documents service.
"""
return self._UploadFile(
media_source, title, self._MakeKindCategory(DOCUMENT_LABEL),
folder_or_uri=folder_or_uri)
UploadDocument = atom.deprecated('Please use Upload instead')(
UploadDocument)
"""Calling any of these functions is the same as calling Export"""
DownloadDocument = atom.deprecated('Please use Export instead')(Export)
DownloadPresentation = atom.deprecated('Please use Export instead')(Export)
DownloadSpreadsheet = atom.deprecated('Please use Export instead')(Export)
"""Calling any of these functions is the same as calling MoveIntoFolder"""
MoveDocumentIntoFolder = atom.deprecated(
'Please use MoveIntoFolder instead')(MoveIntoFolder)
MovePresentationIntoFolder = atom.deprecated(
'Please use MoveIntoFolder instead')(MoveIntoFolder)
MoveSpreadsheetIntoFolder = atom.deprecated(
'Please use MoveIntoFolder instead')(MoveIntoFolder)
MoveFolderIntoFolder = atom.deprecated(
'Please use MoveIntoFolder instead')(MoveIntoFolder)
class DocumentQuery(gdata.service.Query):
"""Object used to construct a URI to query the Google Document List feed"""
def __init__(self, feed='/feeds/documents', visibility='private',
projection='full', text_query=None, params=None,
categories=None):
"""Constructor for Document List Query
Args:
feed: string (optional) The path for the feed. (e.g. '/feeds/documents')
visibility: string (optional) The visibility chosen for the current feed.
projection: string (optional) The projection chosen for the current feed.
text_query: string (optional) The contents of the q query parameter. This
string is URL escaped upon conversion to a URI.
params: dict (optional) Parameter value string pairs which become URL
params when translated to a URI. These parameters are added to
the query's items.
categories: list (optional) List of category strings which should be
included as query categories. See gdata.service.Query for
additional documentation.
Yields:
A DocumentQuery object used to construct a URI based on the Document
List feed.
"""
self.visibility = visibility
self.projection = projection
gdata.service.Query.__init__(self, feed, text_query, params, categories)
def ToUri(self):
"""Generates a URI from the query parameters set in the object.
Returns:
A string containing the URI used to retrieve entries from the Document
List feed.
"""
old_feed = self.feed
self.feed = '/'.join([old_feed, self.visibility, self.projection])
new_feed = gdata.service.Query.ToUri(self)
self.feed = old_feed
return new_feed
def AddNamedFolder(self, email, folder_name):
"""Adds a named folder category, qualified by a schema.
This function lets you query for documents that are contained inside a
named folder without fear of collision with other categories.
Args:
email: string The email of the user who owns the folder.
folder_name: string The name of the folder.
Returns:
The string of the category that was added to the object.
"""
category = '{%s%s}%s' % (FOLDERS_SCHEME_PREFIX, email, folder_name)
self.categories.append(category)
return category
def RemoveNamedFolder(self, email, folder_name):
"""Removes a named folder category, qualified by a schema.
Args:
email: string The email of the user who owns the folder.
folder_name: string The name of the folder.
Returns:
The string of the category that was removed to the object.
"""
category = '{%s%s}%s' % (FOLDERS_SCHEME_PREFIX, email, folder_name)
self.categories.remove(category)
return category
class DocumentAclQuery(gdata.service.Query):
"""Object used to construct a URI to query a Document's ACL feed"""
def __init__(self, resource_id, feed='/feeds/acl/private/full'):
"""Constructor for Document ACL Query
Args:
resource_id: string The resource id. (e.g. 'document%3Adocument_id',
'spreadsheet%3Aspreadsheet_id', etc.)
feed: string (optional) The path for the feed.
(e.g. '/feeds/acl/private/full')
Yields:
A DocumentAclQuery object used to construct a URI based on the Document
ACL feed.
"""
self.resource_id = resource_id
gdata.service.Query.__init__(self, feed)
def ToUri(self):
"""Generates a URI from the query parameters set in the object.
Returns:
A string containing the URI used to retrieve entries from the Document
ACL feed.
"""
return '%s/%s' % (gdata.service.Query.ToUri(self), self.resource_id)
| apache-2.0 |
vaygr/ansible | lib/ansible/modules/network/aci/aci_aep_to_domain.py | 26 | 8771 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Dag Wieers <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_aep_to_domain
short_description: Bind AEPs to Physical or Virtual Domains (infra:RsDomP)
description:
- Bind AEPs to Physical or Virtual Domains on Cisco ACI fabrics.
notes:
- The C(aep) and C(domain) parameters should exist before using this module.
The M(aci_aep) and M(aci_domain) can be used for these.
- More information about the internal APIC class B(infra:RsDomP) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Dag Wieers (@dagwieers)
version_added: '2.5'
options:
aep:
description:
- The name of the Attachable Access Entity Profile.
aliases: [ aep_name ]
domain:
description:
- Name of the physical or virtual domain being associated with the AEP.
aliases: [ domain_name, domain_profile ]
domain_type:
description:
- Determines if the Domain is physical (phys) or virtual (vmm).
choices: [ fc, l2dom, l3dom, phys, vmm ]
aliases: [ type ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
vm_provider:
description:
- The VM platform for VMM Domains.
- Support for Kubernetes was added in ACI v3.0.
- Support for CloudFoundry, OpenShift and Red Hat was added in ACI v3.1.
choices: [ cloudfoundry, kubernetes, microsoft, openshift, openstack, redhat, vmware ]
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Add AEP to domain binding
aci_aep_to_domain: &binding_present
host: apic
username: admin
password: SomeSecretPassword
aep: test_aep
domain: phys_dom
domain_type: phys
state: present
- name: Remove AEP to domain binding
aci_aep_to_domain: &binding_absent
host: apic
username: admin
password: SomeSecretPassword
aep: test_aep
domain: phys_dom
domain_type: phys
state: absent
- name: Query our AEP to domain binding
aci_aep_to_domain:
host: apic
username: admin
password: SomeSecretPassword
aep: test_aep
domain: phys_dom
domain_type: phys
state: query
- name: Query all AEP to domain bindings
aci_aep_to_domain: &binding_query
host: apic
username: admin
password: SomeSecretPassword
state: query
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
VM_PROVIDER_MAPPING = dict(
cloudfoundry='CloudFoundry',
kubernetes='Kubernetes',
microsoft='Microsoft',
openshift='OpenShift',
openstack='OpenStack',
redhat='Redhat',
vmware='VMware',
)
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
aep=dict(type='str', aliases=['aep_name']), # Not required for querying all objects
domain=dict(type='str', aliases=['domain_name', 'domain_profile']), # Not required for querying all objects
domain_type=dict(type='str', choices=['fc', 'l2dom', 'l3dom', 'phys', 'vmm'], aliases=['type']), # Not required for querying all objects
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
vm_provider=dict(type='str', choices=['cloudfoundry', 'kubernetes', 'microsoft', 'openshift', 'openstack', 'redhat', 'vmware']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['domain_type', 'vmm', ['vm_provider']],
['state', 'absent', ['aep', 'domain', 'domain_type']],
['state', 'present', ['aep', 'domain', 'domain_type']],
],
required_together=[
['domain', 'domain_type'],
],
)
aep = module.params['aep']
domain = module.params['domain']
domain_type = module.params['domain_type']
vm_provider = module.params['vm_provider']
state = module.params['state']
# Report when vm_provider is set when type is not virtual
if domain_type != 'vmm' and vm_provider is not None:
module.fail_json(msg="Domain type '{0}' cannot have a 'vm_provider'".format(domain_type))
# Compile the full domain for URL building
if domain_type == 'fc':
domain_mo = 'uni/fc-{0}'.format(domain)
elif domain_type == 'l2dom':
domain_mo = 'uni/l2dom-{0}'.format(domain)
elif domain_type == 'l3dom':
domain_mo = 'uni/l3dom-{0}'.format(domain)
elif domain_type == 'phys':
domain_mo = 'uni/phys-{0}'.format(domain)
elif domain_type == 'vmm':
domain_mo = 'uni/vmmp-{0}/dom-{1}'.format(VM_PROVIDER_MAPPING[vm_provider], domain)
else:
domain_mo = None
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='infraAttEntityP',
aci_rn='infra/attentp-{0}'.format(aep),
filter_target='eq(infraAttEntityP.name, "{0}")'.format(aep),
module_object=aep,
),
subclass_1=dict(
aci_class='infraRsDomP',
aci_rn='rsdomP-[{0}]'.format(domain_mo),
filter_target='eq(infraRsDomP.tDn, "{0}")'.format(domain_mo),
module_object=domain_mo,
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='infraRsDomP',
class_config=dict(tDn=domain_mo),
)
aci.get_diff(aci_class='infraRsDomP')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
Jgarcia-IAS/localizacion | openerp/addons-extra/odoo-pruebas/odoo-server/addons/product_margin/wizard/product_margin.py | 338 | 3457 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class product_margin(osv.osv_memory):
_name = 'product.margin'
_description = 'Product Margin'
_columns = {
'from_date': fields.date('From'),
'to_date': fields.date('To'),
'invoice_state': fields.selection([
('paid', 'Paid'),
('open_paid', 'Open and Paid'),
('draft_open_paid', 'Draft, Open and Paid'),
], 'Invoice State', select=True, required=True),
}
_defaults = {
'from_date': time.strftime('%Y-01-01'),
'to_date': time.strftime('%Y-12-31'),
'invoice_state': "open_paid",
}
def action_open_window(self, cr, uid, ids, context=None):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: the ID or list of IDs if we want more than one
@return:
"""
context = dict(context or {})
def ref(module, xml_id):
proxy = self.pool.get('ir.model.data')
return proxy.get_object_reference(cr, uid, module, xml_id)
model, search_view_id = ref('product', 'product_search_form_view')
model, graph_view_id = ref('product_margin', 'view_product_margin_graph')
model, form_view_id = ref('product_margin', 'view_product_margin_form')
model, tree_view_id = ref('product_margin', 'view_product_margin_tree')
#get the current product.margin object to obtain the values from it
records = self.browse(cr, uid, ids, context=context)
record = records[0]
context.update(invoice_state=record.invoice_state)
if record.from_date:
context.update(date_from=record.from_date)
if record.to_date:
context.update(date_to=record.to_date)
views = [
(tree_view_id, 'tree'),
(form_view_id, 'form'),
(graph_view_id, 'graph')
]
return {
'name': _('Product Margins'),
'context': context,
'view_type': 'form',
"view_mode": 'tree,form,graph',
'res_model': 'product.product',
'type': 'ir.actions.act_window',
'views': views,
'view_id': False,
'search_view_id': search_view_id,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
funson/rt-xen | tools/python/xen/xend/XendPIF.py | 44 | 11830 | #============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (c) 2006 Xensource Inc.
#============================================================================
import commands
import logging
import os
import re
from xen.xend import uuid as genuuid
from xen.xend import XendAPIStore
from xen.xend.XendBase import XendBase
from xen.xend.XendPIFMetrics import XendPIFMetrics
from xen.xend.XendError import *
from xen.xend import Vifctl
from xen.util import auxbin
log = logging.getLogger("xend.XendPIF")
log.setLevel(logging.TRACE)
MAC_RE = re.compile(':'.join(['[0-9a-f]{2}'] * 6))
IP_IFACE_RE = re.compile(r'^\d+: (\w+):.*mtu (\d+) .* link/\w+ ([0-9a-f:]+)')
Vifctl.network('start')
def linux_phy_to_virt(pif_name):
return 'eth' + re.sub(r'^[a-z]+', '', pif_name)
def linux_get_phy_ifaces():
"""Returns a list of physical interfaces.
Identifies PIFs as those that have a interface name starting with
'peth'.
See /etc/xen/scripts/network-bridge for how the devices are renamed.
@rtype: array of 3-element tuple (name, mtu, mac)
"""
ip_cmd = 'ip -o link show'
rc, output = commands.getstatusoutput(ip_cmd)
ifaces = {}
phy_ifaces = []
if rc == 0:
# parse all interfaces into (name, mtu, mac)
for line in output.split('\n'):
has_if = re.search(IP_IFACE_RE, line)
if has_if:
ifaces[has_if.group(1)] = has_if.groups()
# resolve pifs' mac addresses
for name, mtu, mac in ifaces.values():
if name.startswith('peth'):
bridged_ifname = linux_phy_to_virt(name)
bridged_if = ifaces.get(bridged_ifname)
bridged_mac = ''
if bridged_if:
bridged_mac = bridged_if[2]
phy_ifaces.append((name, int(mtu), bridged_mac))
return phy_ifaces
def linux_set_mac(iface, mac):
if not re.search(MAC_RE, mac):
return False
ip_mac_cmd = 'ip link set %s addr %s' % \
(linux_phy_to_virt(iface), mac)
rc, output = commands.getstatusoutput(ip_mac_cmd)
if rc == 0:
return True
return False
def linux_set_mtu(iface, mtu):
try:
ip_mtu_cmd = 'ip link set %s mtu %d' % \
(linux_phy_to_virt(iface), int(mtu))
rc, output = commands.getstatusoutput(ip_mtu_cmd)
if rc == 0:
return True
return False
except ValueError:
return False
def linux_get_mtu(device):
return _linux_get_pif_param(device, 'mtu')
def linux_get_mac(device):
return _linux_get_pif_param(device, 'link/ether')
def _linux_get_pif_param(device, param_name):
ip_get_dev_data = 'ip link show %s' % device
rc, output = commands.getstatusoutput(ip_get_dev_data)
if rc == 0:
params = output.split(' ')
for i in xrange(len(params)):
if params[i] == param_name:
return params[i+1]
return ''
def _create_VLAN(dev, vlan):
rc, _ = commands.getstatusoutput('vconfig add %s %d' %
(dev, vlan))
if rc != 0:
return False
rc, _ = commands.getstatusoutput('ifconfig %s.%d up' %
(dev, vlan))
return rc == 0
def _destroy_VLAN(dev, vlan):
rc, _ = commands.getstatusoutput('ifconfig %s.%d down' %
(dev, vlan))
if rc != 0:
return False
rc, _ = commands.getstatusoutput('vconfig rem %s.%d' %
(dev, vlan))
return rc == 0
class XendPIF(XendBase):
"""Representation of a Physical Network Interface."""
def getClass(self):
return "PIF"
def getAttrRO(self):
attrRO = ['network',
'host',
'metrics',
'device',
'VLAN']
return XendBase.getAttrRO() + attrRO
def getAttrRW(self):
attrRW = ['MAC',
'MTU']
return XendBase.getAttrRW() + attrRW
def getAttrInst(self):
attrInst = ['network',
'device',
'MAC',
'MTU',
'VLAN']
return attrInst
def getMethods(self):
methods = ['plug',
'unplug',
'destroy']
return XendBase.getMethods() + methods
def getFuncs(self):
funcs = ['create_VLAN']
return XendBase.getFuncs() + funcs
getClass = classmethod(getClass)
getAttrRO = classmethod(getAttrRO)
getAttrRW = classmethod(getAttrRW)
getAttrInst = classmethod(getAttrInst)
getMethods = classmethod(getMethods)
getFuncs = classmethod(getFuncs)
def create_phy(self, network_uuid, device,
MAC, MTU):
"""
Called when a new physical PIF is found
Could be a VLAN...
"""
# Create new uuids
pif_uuid = genuuid.createString()
metrics_uuid = genuuid.createString()
# Create instances
metrics = XendPIFMetrics(metrics_uuid, pif_uuid)
# Is this a VLAN?
VLANdot = device.split(".")
VLANcolon = device.split(":")
if len(VLANdot) > 1:
VLAN = VLANdot[1]
device = VLANdot[0]
elif len(VLANcolon) > 1:
VLAN = VLANcolon[1]
device = VLANcolon[0]
else:
VLAN = -1
record = {
'network': network_uuid,
'device': device,
'MAC': MAC,
'MTU': MTU,
'VLAN': VLAN
}
pif = XendPIF(record, pif_uuid, metrics_uuid)
return pif_uuid
def recreate(self, record, uuid):
"""Called on xend start / restart"""
pif_uuid = uuid
metrics_uuid = record['metrics']
# Create instances
metrics = XendPIFMetrics(metrics_uuid, pif_uuid)
pif = XendPIF(record, pif_uuid, metrics_uuid)
# If physical PIF, check exists
# If VLAN, create if not exist
ifs = [dev for dev, _1, _2 in linux_get_phy_ifaces()]
if pif.get_VLAN() == -1:
if pif.get_device() not in ifs:
XendBase.destroy(pif)
metrics.destroy()
return None
else:
if pif.get_interface_name() not in ifs:
_create_VLAN(pif.get_device(), pif.get_VLAN())
pif.plug()
return pif_uuid
def create_VLAN(self, device, network_uuid, host_ref, vlan):
"""Exposed via API - create a new VLAN from existing VIF"""
ifs = [name for name, _, _ in linux_get_phy_ifaces()]
vlan = int(vlan)
# Check VLAN tag is valid
if vlan < 0 or vlan >= 4096:
raise VLANTagInvalid(vlan)
# Check device exists
if device not in ifs:
raise InvalidDeviceError(device)
# Check VLAN doesn't already exist
if "%s.%d" % (device, vlan) in ifs:
raise DeviceExistsError("%s.%d" % (device, vlan))
# Check network ref is valid
from XendNetwork import XendNetwork
if network_uuid not in XendNetwork.get_all():
raise InvalidHandleError("Network", network_uuid)
# Check host_ref is this host
import XendNode
if host_ref != XendNode.instance().get_uuid():
raise InvalidHandleError("Host", host_ref)
# Create the VLAN
_create_VLAN(device, vlan)
# Create new uuids
pif_uuid = genuuid.createString()
metrics_uuid = genuuid.createString()
# Create the record
record = {
"device": device,
"MAC": linux_get_mac('%s.%d' % (device, vlan)),
"MTU": linux_get_mtu('%s.%d' % (device, vlan)),
"network": network_uuid,
"VLAN": vlan
}
# Create instances
metrics = XendPIFMetrics(metrics_uuid, pif_uuid)
pif = XendPIF(record, pif_uuid, metrics_uuid)
# Not sure if they should be created plugged or not...
pif.plug()
XendNode.instance().save_PIFs()
return pif_uuid
create_phy = classmethod(create_phy)
recreate = classmethod(recreate)
create_VLAN = classmethod(create_VLAN)
def __init__(self, record, uuid, metrics_uuid):
XendBase.__init__(self, uuid, record)
self.metrics = metrics_uuid
def plug(self):
"""Plug the PIF into the network"""
network = XendAPIStore.get(self.network,
"network")
bridge_name = network.get_name_label()
from xen.util import Brctl
Brctl.vif_bridge_add({
"bridge": bridge_name,
"vif": self.get_interface_name()
})
def unplug(self):
"""Unplug the PIF from the network"""
network = XendAPIStore.get(self.network,
"network")
bridge_name = network.get_name_label()
from xen.util import Brctl
Brctl.vif_bridge_rem({
"bridge": bridge_name,
"vif": self.get_interface_name()
})
def destroy(self):
# Figure out if this is a physical device
if self.get_interface_name() == \
self.get_device():
raise PIFIsPhysical()
self.unplug()
if _destroy_VLAN(self.get_device(), self.get_VLAN()):
XendBase.destroy(self)
import XendNode
XendNode.instance().save_PIFs()
else:
raise NetworkError("Unable to delete VLAN", self.get_uuid())
def get_interface_name(self):
if self.get_VLAN() == -1:
return self.get_device()
else:
return "%s.%d" % (self.get_device(), self.get_VLAN())
def get_device(self):
"""
This is the base interface.
For phy if (VLAN == -1) this is same as
if name.
For VLANs, this it the bit before the period
"""
return self.device
def get_network(self):
return self.network
def get_host(self):
from xen.xend import XendNode
return XendNode.instance().get_uuid()
def get_metrics(self):
return self.metrics
def get_MAC(self):
return self.MAC
def set_MAC(self, new_mac):
success = linux_set_mac(self.device, new_mac)
if success:
self.MAC = new_mac
import XendNode
XendNode.instance().save_PIFs()
return success
def get_MTU(self):
return self.MTU
def set_MTU(self, new_mtu):
success = linux_set_mtu(self.device, new_mtu)
if success:
self.MTU = new_mtu
import XendNode
XendNode.instance().save_PIFs()
return success
def get_VLAN(self):
return self.VLAN
| gpl-2.0 |
coreyoconnor/nixops | nixops/resources/azure_table.py | 6 | 4211 | # -*- coding: utf-8 -*-
# Automatic provisioning of Azure Tables.
import os
import azure
from nixops.util import attr_property
from nixops.azure_common import StorageResourceDefinition, StorageResourceState
from nixops.resources.azure_resource_group import AzureResourceGroupState
from nixops.resources.azure_storage import AzureStorageState
class AzureTableDefinition(StorageResourceDefinition):
"""Definition of an Azure Table"""
@classmethod
def get_type(cls):
return "azure-table"
@classmethod
def get_resource_type(cls):
return "azureTables"
def __init__(self, xml):
StorageResourceDefinition.__init__(self, xml)
self.table_name = self.get_option_value(xml, 'name', str)
if any(c == '-' for c in self.table_name):
raise Exception("{0}: table name must not contain dashes"
.format(self.table_name))
self.copy_option(xml, 'storage', 'resource')
self.copy_signed_identifiers(xml.find("attrs/attr[@name='acl']"))
def show_type(self):
return "{0}".format(self.get_type())
class AzureTableState(StorageResourceState):
"""State of an Azure Table"""
table_name = attr_property("azure.name", None)
storage = attr_property("azure.storage", None)
signed_identifiers = attr_property("azure.signedIdentifiers", {}, 'json')
@classmethod
def get_type(cls):
return "azure-table"
def show_type(self):
s = super(AzureTableState, self).show_type()
if self.state == self.UP: s = "{0}".format(s)
return s
@property
def resource_id(self):
return self.table_name
@property
def full_name(self):
return "Azure table '{0}'".format(self.resource_id)
def get_storage_name(self):
return self.storage
def get_key(self):
storage = self.get_resource_state(AzureStorageState, self.storage)
access_key = self.access_key or (storage and storage.access_key)
if not access_key:
raise Exception("Can't obtain the access key needed to manage {0}"
.format(self.full_name))
return access_key
def is_settled(self, resource):
return True
def get_resource_allow_exceptions(self):
return self.ts().get_table_acl(self.resource_id)
def destroy_resource(self):
self.ts().delete_table(self.resource_id, fail_not_exist = True)
def create(self, defn, check, allow_reboot, allow_recreate):
self.no_property_change(defn, 'storage')
self.table_name = defn.table_name
self.access_key = defn.access_key
self.storage = defn.storage
if check:
table = self.get_settled_resource()
if table is None:
self.warn_missing_resource()
elif self.state == self.UP:
self.handle_changed_signed_identifiers(
self.ts().get_table_acl(self.table_name))
else:
self.warn_not_supposed_to_exist()
self.confirm_destroy()
if self.state != self.UP:
if self.get_settled_resource() is not None:
raise Exception("tried creating a table that already exists; "
"please run 'deploy --check' to fix this")
self.log("creating {0} in {1}...".format(self.full_name, defn.storage))
self.ts().create_table(defn.table_name, fail_on_exist = True)
self.state = self.UP
if self.signed_identifiers != defn.signed_identifiers:
self.log("updating the ACL of {0}..."
.format(self.full_name))
self.get_settled_resource_assert_exists()
signed_identifiers = self._dict_to_signed_identifiers(defn.signed_identifiers)
self.ts().set_table_acl(self.table_name,
signed_identifiers = signed_identifiers)
self.signed_identifiers = defn.signed_identifiers
def create_after(self, resources, defn):
return {r for r in resources
if isinstance(r, AzureResourceGroupState) or isinstance(r, AzureStorageState)}
| lgpl-3.0 |
hunch/hunch-gift-app | django/contrib/messages/storage/session.py | 12 | 1246 | from django.contrib.messages.storage.base import BaseStorage
class SessionStorage(BaseStorage):
"""
Stores messages in the session (that is, django.contrib.sessions).
"""
session_key = '_messages'
def __init__(self, request, *args, **kwargs):
assert hasattr(request, 'session'), "The session-based temporary "\
"message storage requires session middleware to be installed, "\
"and come before the message middleware in the "\
"MIDDLEWARE_CLASSES list."
super(SessionStorage, self).__init__(request, *args, **kwargs)
def _get(self, *args, **kwargs):
"""
Retrieves a list of messages from the request's session. This storage
always stores everything it is given, so return True for the
all_retrieved flag.
"""
return self.request.session.get(self.session_key), True
def _store(self, messages, response, *args, **kwargs):
"""
Stores a list of messages to the request's session.
"""
if messages:
self.request.session[self.session_key] = messages
else:
self.request.session.pop(self.session_key, None)
return []
| mit |
liresearchgroup/submtr | submtr/lib/requests/packages/chardet/mbcsgroupprober.py | 2769 | 1967 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetgroupprober import CharSetGroupProber
from .utf8prober import UTF8Prober
from .sjisprober import SJISProber
from .eucjpprober import EUCJPProber
from .gb2312prober import GB2312Prober
from .euckrprober import EUCKRProber
from .cp949prober import CP949Prober
from .big5prober import Big5Prober
from .euctwprober import EUCTWProber
class MBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [
UTF8Prober(),
SJISProber(),
EUCJPProber(),
GB2312Prober(),
EUCKRProber(),
CP949Prober(),
Big5Prober(),
EUCTWProber()
]
self.reset()
| mit |
hyiltiz/youtube-dl | youtube_dl/extractor/playvid.py | 115 | 2864 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
)
from ..utils import (
clean_html,
ExtractorError,
)
class PlayvidIE(InfoExtractor):
_VALID_URL = r'https?://www\.playvid\.com/watch(\?v=|/)(?P<id>.+?)(?:#|$)'
_TEST = {
'url': 'http://www.playvid.com/watch/RnmBNgtrrJu',
'md5': 'ffa2f6b2119af359f544388d8c01eb6c',
'info_dict': {
'id': 'RnmBNgtrrJu',
'ext': 'mp4',
'title': 'md5:9256d01c6317e3f703848b5906880dc8',
'duration': 82,
'age_limit': 18,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
m_error = re.search(
r'<div class="block-error">\s*<div class="heading">\s*<div>(?P<msg>.+?)</div>\s*</div>', webpage)
if m_error:
raise ExtractorError(clean_html(m_error.group('msg')), expected=True)
video_title = None
duration = None
video_thumbnail = None
formats = []
# most of the information is stored in the flashvars
flashvars = self._html_search_regex(
r'flashvars="(.+?)"', webpage, 'flashvars')
infos = compat_urllib_parse_unquote(flashvars).split(r'&')
for info in infos:
videovars_match = re.match(r'^video_vars\[(.+?)\]=(.+?)$', info)
if videovars_match:
key = videovars_match.group(1)
val = videovars_match.group(2)
if key == 'title':
video_title = compat_urllib_parse_unquote_plus(val)
if key == 'duration':
try:
duration = int(val)
except ValueError:
pass
if key == 'big_thumb':
video_thumbnail = val
videourl_match = re.match(
r'^video_urls\]\[(?P<resolution>[0-9]+)p', key)
if videourl_match:
height = int(videourl_match.group('resolution'))
formats.append({
'height': height,
'url': val,
})
self._sort_formats(formats)
# Extract title - should be in the flashvars; if not, look elsewhere
if video_title is None:
video_title = self._html_search_regex(
r'<title>(.*?)</title', webpage, 'title')
return {
'id': video_id,
'formats': formats,
'title': video_title,
'thumbnail': video_thumbnail,
'duration': duration,
'description': None,
'age_limit': 18
}
| unlicense |
AgataGibas/python101 | docs/pygame/pong/pong_z5.py | 4 | 5158 | # coding=utf-8
import pygame
import pygame.locals
class Board(object):
"""
Plansza do gry. Odpowiada za rysowanie okna gry.
"""
def __init__(self, width, height):
"""
Konstruktor planszy do gry. Przygotowuje okienko gry.
:param width:
:param height:
"""
self.surface = pygame.display.set_mode((width, height), 0, 32)
pygame.display.set_caption('Simple Pong')
def draw(self, *args):
"""
Rysuje okno gry
:param args: lista obiektów do narysowania
"""
background = (230, 255, 255)
self.surface.fill(background)
for drawable in args:
drawable.draw_on(self.surface)
# dopiero w tym miejscu następuje fatyczne rysowanie
# w oknie gry, wcześniej tylko ustalaliśmy co i jak ma zostać narysowane
pygame.display.update()
class PongGame(object):
"""
Łączy wszystkie elementy gry w całość.
"""
def __init__(self, width, height):
pygame.init()
self.board = Board(width, height)
# zegar którego użyjemy do kontrolowania szybkości rysowania
# kolejnych klatek gry
self.fps_clock = pygame.time.Clock()
self.ball = Ball(width=20, height=20, x=width/2, y=height/2)
self.player1 = Racket(width=80, height=20, x=width/2, y=height/2)
def run(self):
"""
Główna pętla programu
"""
while not self.handle_events():
# działaj w pętli do momentu otrzymania sygnału do wyjścia
self.ball.move(self.board, self.player1)
self.board.draw(
self.ball,
self.player1,
)
self.fps_clock.tick(30)
def handle_events(self):
"""
Obsługa zdarzeń systemowych, tutaj zinterpretujemy np. ruchy myszką
:return True jeżeli pygame przekazał zdarzenie wyjścia z gry
"""
for event in pygame.event.get():
if event.type == pygame.locals.QUIT:
pygame.quit()
return True
if event.type == pygame.locals.MOUSEMOTION:
# myszka steruje ruchem pierwszego gracza
x, y = event.pos
self.player1.move(x)
class Drawable(object):
"""
Klasa bazowa dla rysowanych obiektów
"""
def __init__(self, width, height, x, y, color=(0, 255, 0)):
self.width = width
self.height = height
self.color = color
self.surface = pygame.Surface([width, height], pygame.SRCALPHA, 32).convert_alpha()
self.rect = self.surface.get_rect(x=x, y=y)
def draw_on(self, surface):
surface.blit(self.surface, self.rect)
class Ball(Drawable):
"""
Piłeczka, sama kontroluje swoją prędkość i kierunek poruszania się.
"""
def __init__(self, width, height, x, y, color=(255, 0, 0), x_speed=3, y_speed=3):
super(Ball, self).__init__(width, height, x, y, color)
pygame.draw.ellipse(self.surface, self.color, [0, 0, self.width, self.height])
self.x_speed = x_speed
self.y_speed = y_speed
self.start_x = x
self.start_y = y
def bounce_y(self):
"""
Odwraca wektor prędkości w osi Y
"""
self.y_speed *= -1
def bounce_x(self):
"""
Odwraca wektor prędkości w osi X
"""
self.x_speed *= -1
def reset(self):
"""
Ustawia piłeczkę w położeniu początkowym i odwraca wektor prędkości w osi Y
"""
self.rect.move(self.start_x, self.start_y)
self.bounce_y()
def move(self, board, *args):
"""
Przesuwa piłeczkę o wektor prędkości
"""
self.rect.x += self.x_speed
self.rect.y += self.y_speed
if self.rect.x < 0 or self.rect.x > board.surface.get_width():
self.bounce_x()
if self.rect.y < 0 or self.rect.y > board.surface.get_height():
self.bounce_y()
for racket in args:
if self.rect.colliderect(racket.rect):
self.bounce_y()
class Racket(Drawable):
"""
Rakietka, porusza się w osi X z ograniczeniem prędkości.
"""
def __init__(self, width, height, x, y, color=(0, 255, 0), max_speed=10):
super(Racket, self).__init__(width, height, x, y, color)
self.max_speed = max_speed
self.surface.fill(color)
def move(self, x):
"""
Przesuwa rakietkę w wyznaczone miejsce.
"""
delta = x - self.rect.x
if abs(delta) > self.max_speed:
delta = self.max_speed if delta > 0 else -self.max_speed
self.rect.x += delta
# Ta część powinna być zawsze na końcu modułu (ten plik jest modułem)
# chcemy uruchomić naszą grę dopiero po tym jak wszystkie klasy zostaną zadeklarowane
if __name__ == "__main__":
game = PongGame(800, 400)
game.run()
| mit |
baiyunping333/BurpSuite-Plugins | faraday/utils/pickled_dict.py | 2 | 2136 | #!/usr/bin/env python
'''
Faraday Penetration Test IDE - Community Version
Copyright (C) 2013 Infobyte LLC (http://www.infobytesec.com/)
See the file 'doc/LICENSE' for the license information
'''
'''
File: pickled_dict.py
Author: Daniel J. Foguelman
Description: A persist-to-disk picklebased dictionary with all the normal features.
'''
import cPickle as pickle
import IPython
import os
import threading
import unittest
class PickleBackedDict(dict):
def __init__(self, path, filename = None):
self.path = os.path.join(path, filename) if not filename is None else path
self.lock = threading.Lock()
if os.path.exists(self.path):
with open(self.path, 'rb') as f:
self.dict = pickle.load(f)
else:
self.dict = {}
def cleanUp(self):
with self.lock:
with open(self.path, 'wb', 0) as writer:
self.dict = {}
pickle.dump(self.dict, writer)
def __setitem__(self, key, value):
with self.lock:
with open(self.path, 'wb', 0) as writer:
self.dict.__setitem__(key, value)
pickle.dump(self.dict, writer)
def __getitem__(self, key):
return self.dict.__getitem__(key)
def __repr__(self):
return self.dict.__repr__()
def __str__(self):
return self.dict.__str__()
class TestPickledDict(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
"""docstring for tearDown"""
pass
def test_time_insert_and_retrieve(self):
from time import time
d_file = os.tmpfile()
d = PickleBackedDict(path = d_file.name)
it = time() * 1000
for i in range(10):
d[i] = range(50)
et = time() * 1000
self.assertTrue( et - it < 2500, "Inserting a millon records takes more than a 2.5sec")
it = time() * 1000
a = d[3]
et = time() * 1000
self.assertTrue( et - it < 500, "reading is a heavy task")
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
napkindrawing/ansible | lib/ansible/modules/notification/flowdock.py | 16 | 6195 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2013 Matt Coddington <coddington@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: flowdock
version_added: "1.2"
author: "Matt Coddington (@mcodd)"
short_description: Send a message to a flowdock
description:
- Send a message to a flowdock team inbox or chat using the push API (see https://www.flowdock.com/api/team-inbox and https://www.flowdock.com/api/chat)
options:
token:
description:
- API token.
required: true
type:
description:
- Whether to post to 'inbox' or 'chat'
required: true
choices: [ "inbox", "chat" ]
msg:
description:
- Content of the message
required: true
tags:
description:
- tags of the message, separated by commas
required: false
external_user_name:
description:
- (chat only - required) Name of the "user" sending the message
required: false
from_address:
description:
- (inbox only - required) Email address of the message sender
required: false
source:
description:
- (inbox only - required) Human readable identifier of the application that uses the Flowdock API
required: false
subject:
description:
- (inbox only - required) Subject line of the message
required: false
from_name:
description:
- (inbox only) Name of the message sender
required: false
reply_to:
description:
- (inbox only) Email address for replies
required: false
project:
description:
- (inbox only) Human readable identifier for more detailed message categorization
required: false
link:
description:
- (inbox only) Link associated with the message. This will be used to link the message subject in Team Inbox.
required: false
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
requirements: [ ]
'''
EXAMPLES = '''
- flowdock:
type: inbox
token: AAAAAA
from_address: user@example.com
source: my cool app
msg: test from ansible
subject: test subject
- flowdock:
type: chat
token: AAAAAA
external_user_name: testuser
msg: test from ansible
tags: tag1,tag2,tag3
'''
from ansible.module_utils.six.moves.urllib.parse import urlencode
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
token=dict(required=True, no_log=True),
msg=dict(required=True),
type=dict(required=True, choices=["inbox","chat"]),
external_user_name=dict(required=False),
from_address=dict(required=False),
source=dict(required=False),
subject=dict(required=False),
from_name=dict(required=False),
reply_to=dict(required=False),
project=dict(required=False),
tags=dict(required=False),
link=dict(required=False),
validate_certs = dict(default='yes', type='bool'),
),
supports_check_mode=True
)
type = module.params["type"]
token = module.params["token"]
if type == 'inbox':
url = "https://api.flowdock.com/v1/messages/team_inbox/%s" % (token)
else:
url = "https://api.flowdock.com/v1/messages/chat/%s" % (token)
params = {}
# required params
params['content'] = module.params["msg"]
# required params for the 'chat' type
if module.params['external_user_name']:
if type == 'inbox':
module.fail_json(msg="external_user_name is not valid for the 'inbox' type")
else:
params['external_user_name'] = module.params["external_user_name"]
elif type == 'chat':
module.fail_json(msg="external_user_name is required for the 'chat' type")
# required params for the 'inbox' type
for item in [ 'from_address', 'source', 'subject' ]:
if module.params[item]:
if type == 'chat':
module.fail_json(msg="%s is not valid for the 'chat' type" % item)
else:
params[item] = module.params[item]
elif type == 'inbox':
module.fail_json(msg="%s is required for the 'inbox' type" % item)
# optional params
if module.params["tags"]:
params['tags'] = module.params["tags"]
# optional params for the 'inbox' type
for item in [ 'from_name', 'reply_to', 'project', 'link' ]:
if module.params[item]:
if type == 'chat':
module.fail_json(msg="%s is not valid for the 'chat' type" % item)
else:
params[item] = module.params[item]
# If we're in check mode, just exit pretending like we succeeded
if module.check_mode:
module.exit_json(changed=False)
# Send the data to Flowdock
data = urlencode(params)
response, info = fetch_url(module, url, data=data)
if info['status'] != 200:
module.fail_json(msg="unable to send msg: %s" % info['msg'])
module.exit_json(changed=True, msg=module.params["msg"])
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| gpl-3.0 |
oberlin/django | tests/middleware/test_security.py | 291 | 7781 | from django.http import HttpResponse
from django.test import RequestFactory, SimpleTestCase
from django.test.utils import override_settings
class SecurityMiddlewareTest(SimpleTestCase):
@property
def middleware(self):
from django.middleware.security import SecurityMiddleware
return SecurityMiddleware()
@property
def secure_request_kwargs(self):
return {"wsgi.url_scheme": "https"}
def response(self, *args, **kwargs):
headers = kwargs.pop("headers", {})
response = HttpResponse(*args, **kwargs)
for k, v in headers.items():
response[k] = v
return response
def process_response(self, *args, **kwargs):
request_kwargs = {}
if kwargs.pop("secure", False):
request_kwargs.update(self.secure_request_kwargs)
request = (kwargs.pop("request", None) or
self.request.get("/some/url", **request_kwargs))
ret = self.middleware.process_request(request)
if ret:
return ret
return self.middleware.process_response(
request, self.response(*args, **kwargs))
request = RequestFactory()
def process_request(self, method, *args, **kwargs):
if kwargs.pop("secure", False):
kwargs.update(self.secure_request_kwargs)
req = getattr(self.request, method.lower())(*args, **kwargs)
return self.middleware.process_request(req)
@override_settings(SECURE_HSTS_SECONDS=3600)
def test_sts_on(self):
"""
With HSTS_SECONDS=3600, the middleware adds
"strict-transport-security: max-age=3600" to the response.
"""
self.assertEqual(
self.process_response(secure=True)["strict-transport-security"],
"max-age=3600")
@override_settings(SECURE_HSTS_SECONDS=3600)
def test_sts_already_present(self):
"""
The middleware will not override a "strict-transport-security" header
already present in the response.
"""
response = self.process_response(
secure=True,
headers={"strict-transport-security": "max-age=7200"})
self.assertEqual(response["strict-transport-security"], "max-age=7200")
@override_settings(HSTS_SECONDS=3600)
def test_sts_only_if_secure(self):
"""
The "strict-transport-security" header is not added to responses going
over an insecure connection.
"""
self.assertNotIn("strict-transport-security", self.process_response(secure=False))
@override_settings(HSTS_SECONDS=0)
def test_sts_off(self):
"""
With HSTS_SECONDS of 0, the middleware does not add a
"strict-transport-security" header to the response.
"""
self.assertNotIn("strict-transport-security", self.process_response(secure=True))
@override_settings(
SECURE_HSTS_SECONDS=600, SECURE_HSTS_INCLUDE_SUBDOMAINS=True)
def test_sts_include_subdomains(self):
"""
With HSTS_SECONDS non-zero and HSTS_INCLUDE_SUBDOMAINS
True, the middleware adds a "strict-transport-security" header with the
"includeSubDomains" tag to the response.
"""
response = self.process_response(secure=True)
self.assertEqual(
response["strict-transport-security"],
"max-age=600; includeSubDomains",
)
@override_settings(
SECURE_HSTS_SECONDS=600, SECURE_HSTS_INCLUDE_SUBDOMAINS=False)
def test_sts_no_include_subdomains(self):
"""
With HSTS_SECONDS non-zero and HSTS_INCLUDE_SUBDOMAINS
False, the middleware adds a "strict-transport-security" header without
the "includeSubDomains" tag to the response.
"""
response = self.process_response(secure=True)
self.assertEqual(response["strict-transport-security"], "max-age=600")
@override_settings(SECURE_CONTENT_TYPE_NOSNIFF=True)
def test_content_type_on(self):
"""
With CONTENT_TYPE_NOSNIFF set to True, the middleware adds
"x-content-type-options: nosniff" header to the response.
"""
self.assertEqual(self.process_response()["x-content-type-options"], "nosniff")
@override_settings(SECURE_CONTENT_TYPE_NO_SNIFF=True)
def test_content_type_already_present(self):
"""
The middleware will not override an "x-content-type-options" header
already present in the response.
"""
response = self.process_response(secure=True, headers={"x-content-type-options": "foo"})
self.assertEqual(response["x-content-type-options"], "foo")
@override_settings(SECURE_CONTENT_TYPE_NOSNIFF=False)
def test_content_type_off(self):
"""
With CONTENT_TYPE_NOSNIFF False, the middleware does not add an
"x-content-type-options" header to the response.
"""
self.assertNotIn("x-content-type-options", self.process_response())
@override_settings(SECURE_BROWSER_XSS_FILTER=True)
def test_xss_filter_on(self):
"""
With BROWSER_XSS_FILTER set to True, the middleware adds
"s-xss-protection: 1; mode=block" header to the response.
"""
self.assertEqual(
self.process_response()["x-xss-protection"],
"1; mode=block")
@override_settings(SECURE_BROWSER_XSS_FILTER=True)
def test_xss_filter_already_present(self):
"""
The middleware will not override an "x-xss-protection" header
already present in the response.
"""
response = self.process_response(secure=True, headers={"x-xss-protection": "foo"})
self.assertEqual(response["x-xss-protection"], "foo")
@override_settings(BROWSER_XSS_FILTER=False)
def test_xss_filter_off(self):
"""
With BROWSER_XSS_FILTER set to False, the middleware does not add an
"x-xss-protection" header to the response.
"""
self.assertNotIn("x-xss-protection", self.process_response())
@override_settings(SECURE_SSL_REDIRECT=True)
def test_ssl_redirect_on(self):
"""
With SSL_REDIRECT True, the middleware redirects any non-secure
requests to the https:// version of the same URL.
"""
ret = self.process_request("get", "/some/url?query=string")
self.assertEqual(ret.status_code, 301)
self.assertEqual(
ret["Location"], "https://testserver/some/url?query=string")
@override_settings(SECURE_SSL_REDIRECT=True)
def test_no_redirect_ssl(self):
"""
The middleware does not redirect secure requests.
"""
ret = self.process_request("get", "/some/url", secure=True)
self.assertEqual(ret, None)
@override_settings(
SECURE_SSL_REDIRECT=True, SECURE_REDIRECT_EXEMPT=["^insecure/"])
def test_redirect_exempt(self):
"""
The middleware does not redirect requests with URL path matching an
exempt pattern.
"""
ret = self.process_request("get", "/insecure/page")
self.assertEqual(ret, None)
@override_settings(
SECURE_SSL_REDIRECT=True, SECURE_SSL_HOST="secure.example.com")
def test_redirect_ssl_host(self):
"""
The middleware redirects to SSL_HOST if given.
"""
ret = self.process_request("get", "/some/url")
self.assertEqual(ret.status_code, 301)
self.assertEqual(ret["Location"], "https://secure.example.com/some/url")
@override_settings(SECURE_SSL_REDIRECT=False)
def test_ssl_redirect_off(self):
"""
With SSL_REDIRECT False, the middleware does no redirect.
"""
ret = self.process_request("get", "/some/url")
self.assertEqual(ret, None)
| bsd-3-clause |
ConPaaS-team/conpaas | cps-tools/src/cps_tools/htc.py | 1 | 9385 |
# from .service import ServiceCmd
# class HTCCmd(ServiceCmd):
# def __init__(self, htc_parser, client):
# ServiceCmd.__init__(self, htc_parser, client, "htc",
# ['node'], "HTC service sub-commands help")
# self._add_create_job()
# self._add_upload_file()
# self._add_add()
# self._add_sample()
# self._add_submit()
# self._add_get_config()
# self._add_throughput()
# self._add_select()
# # ========== remove_nodes
# def _add_remove_nodes(self):
# """Overrides ServiceCmd._add_remove_nodes(self)."""
# subparser = self.add_parser('remove_nodes',
# help="remove nodes from a service")
# subparser.set_defaults(run_cmd=self.remove_nodes, parser=subparser)
# subparser.add_argument('app_name_or_id',
# help="Name or identifier of an application")
# subparser.add_argument('serv_name_or_id',
# help="Name or identifier of a service")
# subparser.add_argument('node_id', help="Identifier of node to remove")
# def remove_nodes(self, args):
# """Overrides ServiceCmd.remove_nodes(self, args)."""
# app_id, service_id = self.check_service(args.app_name_or_id, args.serv_name_or_id)
# data = {'node': 1, 'id': args.node_id}
# res = self.client.call_manager_post(app_id, service_id, "remove_nodes", data)
# if 'error' in res:
# self.client.error("Could not remove node %s from service %s: %s"
# % (args.node_id, service_id, res['error']))
# else:
# print("Node %s has been successfully removed from service %s."
# % (args.node_id, service_id))
# # ========== create_job
# def _add_create_job(self):
# subparser = self.add_parser('create_job', help="create a job")
# subparser.set_defaults(run_cmd=self.create_job, parser=subparser)
# subparser.add_argument('app_name_or_id',
# help="Name or identifier of an application")
# subparser.add_argument('serv_name_or_id',
# help="Name or identifier of a service")
# subparser.add_argument('filename', help="path to job file")
# def create_job(self, args):
# app_id, service_id = self.check_service(args.app_name_or_id, args.serv_name_or_id)
# with open(args.filename, 'r') as jobfile:
# contents = jobfile.read()
# files = [(args.filename, args.filename, contents)]
# params = {'method': 'create_job'}
# res = self.client.call_manager_post(app_id, service_id, "/", params, files)
# if 'error' in res:
# print res['error']
# else:
# print res['id']
# # ========== upload_file
# def _add_upload_file(self):
# subparser = self.add_parser('upload_file', help="upload a file")
# subparser.set_defaults(run_cmd=self.upload_file, parser=subparser)
# subparser.add_argument('app_name_or_id',
# help="Name or identifier of an application")
# subparser.add_argument('serv_name_or_id',
# help="Name or identifier of a service")
# subparser.add_argument('filename', help="path to file")
# def upload_file(self, args):
# app_id, service_id = self.check_service(args.app_name_or_id, args.serv_name_or_id)
# with open(args.filename, 'r') as jobfile:
# contents = jobfile.read()
# files = [(args.filename, args.filename, contents)]
# params = {'method': 'upload_file'}
# res = self.client.call_manager_post(app_id, service_id, "/", params, files)
# if 'error' in res:
# print res['error']
# else:
# print res['out']
# # ========== add
# def _add_add(self):
# subparser = self.add_parser('add', help="add tasks to a job")
# subparser.set_defaults(run_cmd=self.add, parser=subparser)
# subparser.add_argument('app_name_or_id',
# help="Name or identifier of an application")
# subparser.add_argument('serv_name_or_id',
# help="Name or identifier of a service")
# subparser.add_argument('job_id', type=int, help="Job identifier")
# subparser.add_argument('filename', help="path to job file")
# def add(self, args):
# app_id, service_id = self.check_service(args.app_name_or_id, args.serv_name_or_id)
# with open(args.filename, 'r') as jobfile:
# contents = jobfile.read()
# files = [(args.filename, args.filename, contents)]
# params = {'method': 'add', 'job_id': args.job_id}
# res = self.client.call_manager_post(app_id, service_id, "/", params, files)
# if 'error' in res:
# print res['error']
# else:
# print res["id"]
# # ========== sample
# def _add_sample(self):
# subparser = self.add_parser('sample', help="sample a job")
# subparser.set_defaults(run_cmd=self.sample, parser=subparser)
# subparser.add_argument('app_name_or_id',
# help="Name or identifier of an application")
# subparser.add_argument('serv_name_or_id',
# help="Name or identifier of a service")
# subparser.add_argument('job_id', type=int, help="Job identifier")
# def sample(self, args):
# app_id, service_id = self.check_service(args.app_name_or_id, args.serv_name_or_id)
# params = {'job_id': args.job_id}
# res = self.client.call_manager_post(app_id, service_id, "sample", params)
# if 'error' in res:
# print res['error']
# else:
# print res["out"]
# # ========== submit
# def _add_submit(self):
# subparser = self.add_parser('submit', help="execute a job")
# subparser.set_defaults(run_cmd=self.submit, parser=subparser)
# subparser.add_argument('app_name_or_id',
# help="Name or identifier of an application")
# subparser.add_argument('serv_name_or_id',
# help="Name or identifier of a service")
# subparser.add_argument('job_id', type=int, help="Job identifier")
# def submit(self, args):
# app_id, service_id = self.check_service(args.app_name_or_id, args.serv_name_or_id)
# params = {'job_id': args.job_id}
# res = self.client.call_manager_post(app_id, service_id, "execute", params)
# if 'error' in res:
# print res['error']
# else:
# print res['out']
# # ========== get_config
# def _add_get_config(self):
# subparser = self.add_parser('get_config',
# help="get configuration for a throughput")
# subparser.set_defaults(run_cmd=self.get_config, parser=subparser)
# subparser.add_argument('app_name_or_id',
# help="Name or identifier of an application")
# subparser.add_argument('serv_name_or_id',
# help="Name or identifier of a service")
# subparser.add_argument('throughput', type=int, help="target throughput")
# def get_config(self, args):
# app_id, service_id = self.check_service(args.app_name_or_id, args.serv_name_or_id)
# params = {'t': args.throughput}
# res = self.client.call_manager_post(app_id, service_id, "get_config", params)
# print "%s" % res
# # ========== throughput
# def _add_throughput(self):
# subparser = self.add_parser('throughput', help="throughput (??)")
# subparser.set_defaults(run_cmd=self.get_config, parser=subparser)
# subparser.add_argument('app_name_or_id',
# help="Name or identifier of an application")
# subparser.add_argument('serv_name_or_id',
# help="Name or identifier of a service")
# subparser.add_argument('throughput', type=int, help="target throughput")
# def throughput(self, args):
# app_id, service_id = self.check_service(args.app_name_or_id, args.serv_name_or_id)
# params = {'t': args.throughput}
# res = self.client.call_manager_post(app_id, service_id, "get_m", params)
# print "%s" % res
# # ========== select
# def _add_select(self):
# subparser = self.add_parser('select', help="select a throughput")
# subparser.set_defaults(run_cmd=self.get_config, parser=subparser)
# subparser.add_argument('app_name_or_id',
# help="Name or identifier of an application")
# subparser.add_argument('serv_name_or_id',
# help="Name or identifier of a service")
# subparser.add_argument('throughput', type=int, help="target throughput")
# def select(self, args):
# app_id, service_id = self.check_service(args.app_name_or_id, args.serv_name_or_id)
# params = {'t': args.throughput}
# res = self.client.call_manager_post(app_id, service_id, "select", params)
# print "%s" % res
| bsd-3-clause |
chop-dbhi/prov-extractor | prov_extractor/sources/mongodb.py | 1 | 3546 | import os
import pymongo
from bson.code import Code
from . import base
# Map-reduce functions for getting and counting the unique fields
# across documents in a collection.
map_fields = Code('''
function() {
for (var key in this) {
emit(key, 1);
}
}
''')
count_fields = Code('''
function(key, values) {
return Array.sum(values);
}
''')
class Client(base.Client):
name = 'MongoDB'
description = '''
Generator for a MongoDB database. The database, collections, and
document fields are extracted as entities.
'''
options = {
'required': ['database'],
'properties': {
'database': {
'description': 'Name of the database.',
'type': 'string',
},
'host': {
'description': 'Host of the server.',
'type': 'string',
'default': 'localhost',
},
'port': {
'description': 'Port of the server.',
'type': 'number',
'default': 27017
},
}
}
def setup(self):
self.conn = pymongo.MongoClient(host=self.options.host,
port=self.options.port)
self.db = self.conn[self.options.database]
def get_collections(self):
"Return a list of collection dicts in the database."
return [{
'name': n,
} for n in self.db.collection_names() if n != 'system.indexes']
def get_fields(self, collection_name):
"""Return a list of field dicts in the collection.
This performs a map-reduce job on the collection to get the unique set
of fields across documents.
"""
output = self.db[collection_name]\
.inline_map_reduce(map_fields,
count_fields,
full_response=True)
# result['value'] / output['counts']['input'] would produce the
# occurrence of the field across documents.
fields = []
for result in output['results']:
fields.append({
'name': result['_id']
})
return fields
def parse_database(self):
name = self.options.database
version = self.conn.server_info()['version']
return {
'origins:ident': name,
'prov:label': name,
'prov:type': 'Database',
'version': version
}
def parse_collection(self, attrs, db):
attrs['origins:ident'] = os.path.join(db['origins:ident'],
attrs['name'])
attrs['prov:label'] = attrs['name']
attrs['prov:type'] = 'Collection'
attrs['database'] = db
return attrs
def parse_field(self, attrs, col):
attrs['origins:ident'] = os.path.join(col['origins:ident'],
attrs['name'])
attrs['prov:label'] = attrs['name']
attrs['prov:type'] = 'Field'
attrs['column'] = col
return attrs
def parse(self):
db = self.parse_database()
self.document.add('entity', db)
for col in self.get_collections():
col = self.parse_collection(col, db)
self.document.add('entity', col)
for field in self.get_fields(col['name']):
field = self.parse_field(field, col)
self.document.add('entity', field)
| bsd-2-clause |
JVillella/tensorflow | tensorflow/tools/pip_package/simple_console_for_windows.py | 605 | 1028 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Start a simple interactive console with TensorFlow available."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import code
import sys
def main(_):
"""Run an interactive console."""
code.interact()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| apache-2.0 |
ApuliaSoftware/odoo | addons/sale_analytic_plans/__openerp__.py | 262 | 1634 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Sales Analytic Distribution',
'version': '1.0',
'category': 'Sales Management',
'description': """
The base module to manage analytic distribution and sales orders.
=================================================================
Using this module you will be able to link analytic accounts to sales orders.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/crm',
'depends': ['sale', 'account_analytic_plans'],
'data': ['sale_analytic_plans_view.xml'],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
postfix/memorpy | memorpy/Locator.py | 2 | 3217 | # Author: Nicolas VERDIER
# This file is part of memorpy.
#
# memorpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# memorpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with memorpy. If not, see <http://www.gnu.org/licenses/>.
import copy
import time
from Address import Address
class Locator(object):
"""
take a memoryworker and a type to search
then you can feed the locator with values and it will reduce the addresses possibilities
"""
def __init__(self, mw, type = 'unknown', start = None, end = None):
self.mw = mw
self.type = type
self.last_iteration = {}
self.last_value = None
self.start = start
self.end = end
def find(self, value, erase_last = True):
return self.feed(value, erase_last)
def feed(self, value, erase_last = True):
self.last_value = value
new_iter = copy.copy(self.last_iteration)
if self.type == 'unknown':
all_types = ['uint',
'int',
'long',
'ulong',
'float',
'double',
'short',
'ushort']
else:
all_types = [self.type]
for type in all_types:
if type not in new_iter:
if self.start is None:
self.start = self.mw.start_offset
if self.end is None:
self.end = self.mw.end_offset
new_iter[type] = [ Address(x, self.mw.process, type) for x in self.mw.mem_search(value, type, start_offset=self.start, end_offset=self.end) ]
else:
l = []
for address in new_iter[type]:
try:
found = self.mw.process.read(address, type)
if int(found) == int(value):
l.append(Address(address, self.mw.process, type))
except Exception as e:
pass
new_iter[type] = l
if erase_last:
del self.last_iteration
self.last_iteration = new_iter
return new_iter
def get_addresses(self):
return self.last_iteration
def diff(self, erase_last = False):
return self.get_modified_addr(erase_last)
def get_modified_addr(self, erase_last = False):
last = self.last_iteration
new = self.feed(self.last_value, erase_last=erase_last)
ret = {}
for type, l in last.iteritems():
typeset = set(new[type])
for addr in l:
if addr not in typeset:
if type not in ret:
ret[type] = []
ret[type].append(addr)
return ret
| gpl-3.0 |
mlperf/training_results_v0.6 | Google/benchmarks/transformer/implementations/tpu-v3-256-transformer/dataset_preproc/data_generators/inspect_tfrecord.py | 7 | 3743 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Inspect a TFRecord file of tensorflow.Example and show tokenizations.
python data_generators/inspect_tfrecord.py \
--logtostderr \
--print_targets \
--subword_text_encoder_filename=$DATA_DIR/vocab.endefr.8192 \
--input_filename=$DATA_DIR/wmt_ende_tokens_8k-train-00000-of-00100
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensor2tensor.data_generators import text_encoder
import tensorflow as tf
tf.flags.DEFINE_string("subword_text_encoder_filename", "",
"SubwordTextEncoder vocabulary file")
tf.flags.DEFINE_string("token_text_encoder_filename", "",
"TokenTextEncoder vocabulary file")
tf.flags.DEFINE_bool("byte_text_encoder", False, "use a ByteTextEncoder")
tf.flags.DEFINE_string("input_filename", "", "input filename")
tf.flags.DEFINE_bool("print_inputs", False, "Print decoded inputs to stdout")
tf.flags.DEFINE_bool("print_targets", False, "Print decoded targets to stdout")
tf.flags.DEFINE_bool("print_all", False, "Print all fields")
FLAGS = tf.flags.FLAGS
def main(_):
"""Convert a file to examples."""
if FLAGS.subword_text_encoder_filename:
encoder = text_encoder.SubwordTextEncoder(
FLAGS.subword_text_encoder_filename)
elif FLAGS.token_text_encoder_filename:
encoder = text_encoder.TokenTextEncoder(FLAGS.token_text_encoder_filename)
elif FLAGS.byte_text_encoder:
encoder = text_encoder.ByteTextEncoder()
else:
encoder = None
reader = tf.python_io.tf_record_iterator(FLAGS.input_filename)
total_sequences = 0
total_input_tokens = 0
total_target_tokens = 0
nonpadding_input_tokens = 0
nonpadding_target_tokens = 0
max_input_length = 0
max_target_length = 0
for record in reader:
x = tf.train.Example()
x.ParseFromString(record)
inputs = [int(i) for i in x.features.feature["inputs"].int64_list.value]
targets = [int(i) for i in x.features.feature["targets"].int64_list.value]
if FLAGS.print_inputs:
print("INPUTS:\n" + encoder.decode(inputs) if encoder else inputs)
if FLAGS.print_targets:
print("TARGETS:\n" + encoder.decode(targets) if encoder else targets)
nonpadding_input_tokens += len(inputs) - inputs.count(0)
nonpadding_target_tokens += len(targets) - targets.count(0)
total_input_tokens += len(inputs)
total_target_tokens += len(targets)
total_sequences += 1
max_input_length = max(max_input_length, len(inputs))
max_target_length = max(max_target_length, len(targets))
if FLAGS.print_all:
for k, v in six.iteritems(x.features.feature):
print("%s: %s" % (k, v.int64_list.value))
print("total_sequences: %d" % total_sequences)
print("total_input_tokens: %d" % total_input_tokens)
print("total_target_tokens: %d" % total_target_tokens)
print("nonpadding_input_tokens: %d" % nonpadding_input_tokens)
print("nonpadding_target_tokens: %d" % nonpadding_target_tokens)
print("max_input_length: %d" % max_input_length)
print("max_target_length: %d" % max_target_length)
if __name__ == "__main__":
tf.app.run()
| apache-2.0 |
xsynergy510x/android_external_chromium_org | tools/perf/page_sets/mse_cases.py | 34 | 1969 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class MseCasesPage(page_module.Page):
def __init__(self, url, page_set):
super(MseCasesPage, self).__init__(url=url, page_set=page_set)
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForJavaScriptCondition('window.__testDone == true')
class MseCasesPageSet(page_set_module.PageSet):
""" Media source extensions perf benchmark """
def __init__(self):
super(MseCasesPageSet, self).__init__()
urls_list = [
'file://mse_cases/startup_test.html?testType=AV',
'file://mse_cases/startup_test.html?testType=AV&useAppendStream=true',
# pylint: disable=C0301
'file://mse_cases/startup_test.html?testType=AV&doNotWaitForBodyOnLoad=true',
# pylint: disable=C0301
'file://mse_cases/startup_test.html?testType=AV&useAppendStream=true&doNotWaitForBodyOnLoad=true',
'file://mse_cases/startup_test.html?testType=V',
'file://mse_cases/startup_test.html?testType=V&useAppendStream=true',
# pylint: disable=C0301
'file://mse_cases/startup_test.html?testType=V&doNotWaitForBodyOnLoad=true',
# pylint: disable=C0301
'file://mse_cases/startup_test.html?testType=V&useAppendStream=true&doNotWaitForBodyOnLoad=true',
'file://mse_cases/startup_test.html?testType=A',
'file://mse_cases/startup_test.html?testType=A&useAppendStream=true',
# pylint: disable=C0301
'file://mse_cases/startup_test.html?testType=A&doNotWaitForBodyOnLoad=true',
# pylint: disable=C0301
'file://mse_cases/startup_test.html?testType=A&useAppendStream=true&doNotWaitForBodyOnLoad=true',
]
for url in urls_list:
self.AddPage(MseCasesPage(url, self))
| bsd-3-clause |
chris-belcher/joinmarket-clientserver | jmdaemon/test/test_message_channel.py | 1 | 14248 | #! /usr/bin/env python
from __future__ import absolute_import
'''test messagechannel management code.'''
import pytest
from jmdaemon import (JMDaemonServerProtocolFactory, MessageChannelCollection)
from jmdaemon.message_channel import MChannelThread
from jmdaemon.orderbookwatch import OrderbookWatch
from jmdaemon.daemon_protocol import JMDaemonServerProtocol
from jmdaemon.protocol import (COMMAND_PREFIX, ORDER_KEYS, NICK_HASH_LENGTH,
NICK_MAX_ENCODED, JM_VERSION, JOINMARKET_NICK_HEADER)
from jmclient import get_log
import os
from jmbase.commands import *
from msgdata import *
import json
import time
import hashlib
import base64
import traceback
import threading
import jmbitcoin as bitcoin
from dummy_mc import DummyMessageChannel
from twisted.internet import reactor
jlog = get_log()
def make_valid_nick(i=0):
nick_priv = hashlib.sha256(chr(i)*16).hexdigest() + '01'
nick_pubkey = bitcoin.privtopub(nick_priv)
nick_pkh_raw = hashlib.sha256(nick_pubkey).digest()[:NICK_HASH_LENGTH]
nick_pkh = bitcoin.changebase(nick_pkh_raw, 256, 58)
#right pad to maximum possible; b58 is not fixed length.
#Use 'O' as one of the 4 not included chars in base58.
nick_pkh += 'O' * (NICK_MAX_ENCODED - len(nick_pkh))
#The constructed length will be 1 + 1 + NICK_MAX_ENCODED
return JOINMARKET_NICK_HEADER + str(JM_VERSION) + nick_pkh
class DummyBox(object):
def encrypt(self, msg):
return msg
def decrypt(self, msg):
return msg
class DaemonForSigns(object):
"""The following functions handle requests and responses
from client for messaging signing and verifying.
"""
def __init__(self, mcc):
self.siglock = threading.Lock()
self.mcc = mcc
self.crypto_boxes = {}
def request_signed_message(self, nick, cmd, msg, msg_to_be_signed, hostid):
with self.siglock:
#Here we have to pretend we signed it and
#send it to privmsg
self.mcc.privmsg(nick, cmd, msg, mc=hostid)
def request_signature_verify(self, msg, fullmsg, sig, pubkey, nick, hashlen,
max_encoded, hostid):
with self.siglock:
#Here we must pretend we verified it and send it to on_verified_privmsg
self.mcc.on_verified_privmsg(nick, fullmsg, hostid)
def get_crypto_box_from_nick(self, nick):
if nick in self.crypto_boxes and self.crypto_boxes[nick] != None:
return self.crypto_boxes[nick][1] # libsodium encryption object
else:
jlog.debug('something wrong, no crypto object, nick=' + nick +
', message will be dropped')
return None
def dummy_on_welcome():
jlog.debug("On welcome called")
def don_error():
jlog.debug("called: " + traceback.extract_stack(None, 2)[0][2])
def don_ioauth(nick, utxo_list, auth_pub, cj_addr,
change_addr, btc_sig):
jlog.debug("onioauth callback")
jlog.debug("Args are: " + ",".join([str(x) for x in nick,
utxo_list, auth_pub, cj_addr,
change_addr, btc_sig]))
def don_sig(nick, sig):
jlog.debug("calledback on-sig")
don_pubkey = don_sig
def don_orderbook_requested(nick, mc):
jlog.debug("called oobr")
def don_commitment_seen(nick, cmt):
jlog.debug("called doncommitmentseen")
jlog.debug("Nick, cmt was: " + str(nick) + " , " + str(cmt))
def don_seen_auth(nick, cr):
jlog.debug("called donseen auth")
jlog.debug("Cr was: " + str(cr))
def don_push_tx(nick, txhex):
jlog.debug("called donpushtx with thex: " + str(txhex))
def don_seen_tx(nick, txhex):
jlog.debug("called donseentx with txhex: " + str(txhex))
def don_commitment_transferred(nick, cmt):
jlog.debug("called doncommitmenttransferred")
def don_order_fill(nick, oid, amount, taker_pk, commit):
jlog.debug("donorderfill called with: " + ",".join(
[str(x) for x in [nick, oid, amount, taker_pk, commit]]))
def test_setup_mc():
ob = OrderbookWatch()
ob.on_welcome = dummy_on_welcome
dmcs = [DummyMessageChannel(None, hostid="hostid"+str(x)) for x in range(3)]
mcc = MessageChannelCollection(dmcs)
#this sets orderbookwatch callbacks
ob.set_msgchan(mcc)
#we want to set all the callbacks, maker and taker
mcc.register_taker_callbacks(don_error, don_pubkey, don_ioauth, don_sig)
mcc.register_maker_callbacks(on_orderbook_requested=don_orderbook_requested,
on_order_fill=don_order_fill,
on_seen_auth=don_seen_auth, on_seen_tx=don_seen_tx,
on_push_tx=don_push_tx,
on_commitment_seen=don_commitment_seen,
on_commitment_transferred=don_commitment_transferred)
mcc.set_nick("testnick")
dummydaemon = DaemonForSigns(mcc)
mcc.set_daemon(dummydaemon)
for mc in dmcs:
mc.on_welcome(mc)
#instead of calling mcc.run, we'll start threads for mcs manually so we
#can probe them
for mc in dmcs:
MChannelThread(mc).start()
for m in dmcs:
m.on_pubmsg("testmaker", "!orderbook")
#receive invalid pubmsgs
for msg in ["!orderbook!orderbook", "!notacommand a b c", "no command prefix",
"!reloffer 0 4000 5000 100"]:
dmcs[2].on_pubmsg("testmaker", msg)
mcc.request_orderbook()
mcc.pubmsg("outward pubmsg")
#now create a verifiable counterparty nick;
#to get it into active state, need to receive an orderbook from it
cp1 = make_valid_nick()
#Simulate order receipt on 2 of 3 msgchans from this nick;
#note that it will have its active chan set to mc "1" because that
#is the last it was seen on:
dmcs[0].on_privmsg(cp1, "!reloffer 0 4000 5000 100 0.2 abc def")
dmcs[1].on_privmsg(cp1, "!reloffer 0 4000 5000 100 0.2 abc def")
time.sleep(0.5)
#send back a response
mcc.privmsg(cp1, "fill", "0")
#trigger failure to find nick in privmsg
mcc.privmsg(cp1+"XXX", "fill", "0")
#trigger check_privmsg decorator
mcc.send_error(cp1, "errormsg")
mcc.push_tx(cp1, "deadbeef")
#kill the chan on which the cp is marked active;
#note dummychannel has no actual shutdown (call it anyway),
#so change its status manually.
dmcs[2].shutdown()
mcc.mc_status[dmcs[1]] = 2
time.sleep(0.5)
#Flush removes references to inactive channels (in this case dmcs[1]).
#Dynamic switching of cp1 should occur to the other seen channel (dmcs[0]).
mcc.flush_nicks()
#force cp1 to be unseen on mc 0:
mcc.unsee_nick(cp1, dmcs[0])
del mcc.active_channels[cp1]
#try sending a privmsg again; this time it should just print a warning,
#as cp1 is not seen anywhere
mcc.send_error(cp1, "error")
#simulate order cancels (even though we have none)
mcc.cancel_orders([0,1,2])
#let cp1 be seen on mc2 without having got into active channels;
#note that this is an illegal pubmsg and is ignored for everything *except*
#nick_seen (what we need here)
dmcs[2].on_pubmsg(cp1, "random")
mcc.send_error(cp1, "error")
#Try using the proper way of setting up privsmgs
#first try without box
mcc.prepare_privmsg(cp1, "auth", "a b c")
dummydaemon.crypto_boxes[cp1] = ["a", DummyBox()]
#now conditions are correct, should succeed:
mcc.prepare_privmsg(cp1, "auth", "a b c")
#try again but this time there is no active channel
del mcc.active_channels[cp1]
mcc.prepare_privmsg(cp1, "auth", "a b c")
#try announcing orders; first public
mcc.announce_orders(t_orderbook)
#try on fake mc
mcc.announce_orders(t_orderbook, new_mc="fakemc")
#direct to one cp
mcc.announce_orders(t_orderbook, nick=cp1)
#direct to one cp on one mc
mcc.announce_orders(t_orderbook, nick=cp1, new_mc=dmcs[0])
#Next, set up 6 counterparties and fill their offers,
#send txs to them
cps = [make_valid_nick(i) for i in range(1, 7)]
#reuse t_chosen_orders data, but swap out the counterparty names
offervals = t_chosen_orders.values()
new_offers = dict(zip(cps, offervals))
#first, pretend they all showed up on all 3 mcs:
for m in dmcs:
for cp in cps:
m.on_privmsg(cp, "!reloffer 0 400000 500000 100 0.002 abc def")
#next, call main fill function
mcc.fill_orders(new_offers, 1000, "dummypubkey", "dummycommit")
#now send a dummy transaction to this same set.
#first fails with no crypto box.
mcc.send_tx(cps, "deadbeef")
#Now initialize the boxes
for c in cps:
dummydaemon.crypto_boxes[c] = ["a", DummyBox()]
mcc.send_tx(cps, "deadbeef")
#try to send the transaction to a wrong cp:
mcc.send_tx(["notrealcp"], "deadbeef")
#At this stage, dmcs0,2 should be "up" and 1 should have been reset to 1
assert mcc.mc_status[dmcs[0]] == 1
assert mcc.mc_status[dmcs[1]] == 1
assert mcc.mc_status[dmcs[2]] == 1
#Not currently used:
#simulate re-connection of dmcs[1] ; note that this code isn't used atm
#mcc.on_connect_trigger(dmcs[1])
#assert mcc.mc_status[dmcs[1]] == 1
#Now trigger disconnection code; each mc one by one; the last should trigger
#on_disconnect callback
for m in dmcs:
mcc.on_disconnect_trigger(m)
#reconnect; effect is all nick references are flushed
for m in dmcs:
mcc.on_connect_trigger(m)
assert mcc.active_channels == {}
#have the cps rearrive
for m in dmcs:
for cp in cps:
m.on_privmsg(cp, "!reloffer 0 4000 5000 100 0.2 abc def")
#####################################################################
#next series of messages are to test various normal and abnormal
#message receipts under normal connection conditions
#####################################################################
#simulate receipt of commitments
#valid
dmcs[0].on_pubmsg(cps[2], "!hp2 deadbeef")
#invalid missing field
dmcs[0].on_pubmsg(cps[2], "!hp2")
#receive commitment via privmsg to trigger commitment_transferred
dmcs[0].on_privmsg(cps[2], "!hp2 deadbeef abc def")
#simulate receipt of order cancellation
#valid
dmcs[0].on_pubmsg(cps[2], "!cancel 2")
#invalid oid
dmcs[0].on_pubmsg(cps[2], "!cancel x")
#too short privmsg (can't even have a signature)
dmcs[0].on_privmsg(cps[2], COMMAND_PREFIX)
#not using correct protocol start character
dmcs[0].on_privmsg(cps[2], "A B C")
#unrecognized command
dmcs[0].on_privmsg(cps[2], "!fakecommand A B C D")
#Perhaps dubious, but currently msg after command must be non-zero
dmcs[0].on_privmsg(cps[2], "!reloffer sig1 sig2")
#Simulating receipt of encrypted messages:
#ioauth
dummy_on_ioauth_msg = "deadbeef:0,deadbeef:1 XauthpubX XcjaddrX XchangeaddrX XbtcsigX"
b64dummyioauth = base64.b64encode(dummy_on_ioauth_msg)
dmcs[0].on_privmsg(cps[3], "!ioauth " + b64dummyioauth + " sig1 sig2")
#Try with a garbage b64 (but decodable); should throw index error at least
dmcs[0].on_privmsg(cps[3], "!ioauth _*_ sig1 sig2")
#Try also for receipt from an unknown counterparty; should fail with no enc box
dmcs[0].on_privmsg("notrealcp", "!ioauth " + b64dummyioauth + " sig1 sig2")
#Try same message from valid cp but with corrupted b64
b64dummyioauth = "999"
dmcs[0].on_privmsg(cps[3], "!ioauth " + b64dummyioauth + " sig1 sig2")
#sig
dummy_on_sig_msg = "dummysig"
b64dummysig = base64.b64encode(dummy_on_sig_msg)
dmcs[0].on_privmsg(cps[3], "!sig " + b64dummysig + " sig1 sig2")
#auth
dummy_auth_msg = "dummyauth"
b64dummyauth = base64.b64encode(dummy_auth_msg)
dmcs[0].on_privmsg(cps[2], "!auth " + b64dummyauth + " sig1 sig2")
#invalid auth (only no message is invalid)
dmcs[0].on_privmsg(cps[3], "!auth " +base64.b64encode("") + " sig1 sig2")
#tx
#valid
dummy_tx = "deadbeefdeadbeef"
b64dummytx = base64.b64encode(dummy_tx)
b642dummytx = base64.b64encode(b64dummytx)
dmcs[0].on_privmsg(cps[2], "!tx " + b642dummytx + " sig1 sig2")
badbase64tx = "999"
badbase64tx2 = base64.b64encode(badbase64tx)
#invalid txhex; here the first round will work (msg decryption), second shouldn't
dmcs[0].on_privmsg(cps[2], "!tx " + badbase64tx2 + " sig1 sig2")
#push
#valid
dmcs[0].on_privmsg(cps[2], "!push " + b642dummytx + " sig1 sig2")
#invalid
dmcs[0].on_privmsg(cps[2], "!push 999 sig1 sig2")
#fill
#valid, no commit
dmcs[0].on_privmsg(cps[4], "!fill 0 4000 dummypub sig1 sig2")
#valid with commit
dmcs[0].on_privmsg(cps[4], "!fill 0 4000 dummypub dummycommit sig1 sig2")
#invalid length
dmcs[0].on_privmsg(cps[4], "!fill 0 sig1 sig2")
#pubkey
dmcs[0].on_privmsg(cps[4], "!pubkey dummypub sig1 sig2")
##############################################################
#End message receipts
##############################################################
#simulate loss of conncetion to cp[0]
for m in dmcs[::-1]:
mcc.on_nick_leave_trigger(cps[0], m)
#call onnickleave for something not in the ac list
mcc.on_nick_leave_trigger("notrealcp", dmcs[0])
#make mcs 0,1 go down so that when cp[1] tries to dynamic switch, it fails
mcc.on_disconnect_trigger(dmcs[0])
mcc.on_disconnect_trigger(dmcs[1])
mcc.on_nick_leave_trigger(cps[1], dmcs[2])
mcc.shutdown()
@pytest.mark.parametrize(
"failuretype, mcindex, wait",
[("shutdown", 0, 1),
("break", 1, 1),
("bad", 1, 1),
])
def test_mc_run(failuretype, mcindex, wait):
ob = OrderbookWatch()
ob.on_welcome = dummy_on_welcome
dmcs = [DummyMessageChannel(None, hostid="hostid"+str(x)) for x in range(3)]
mcc = MessageChannelCollection(dmcs)
#this sets orderbookwatch callbacks
ob.set_msgchan(mcc)
dummydaemon = DaemonForSigns(mcc)
mcc.set_daemon(dummydaemon)
#need to override thread run()
class FIThread(MChannelThread):
def run(self):
self.mc.run()
fi = FIThread(mcc)
fi.start()
time.sleep(wait+0.5)
| gpl-3.0 |
pmisik/buildbot | master/buildbot/test/unit/changes/test_filter.py | 6 | 6044 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import re
from twisted.trial import unittest
from buildbot.changes import filter
from buildbot.test.fake.change import Change
class ChangeFilter(unittest.TestCase):
def setUp(self):
self.results = [] # (got, expected, msg)
self.filt = None
def tearDown(self):
if self.results:
raise RuntimeError("test forgot to call check()")
def setfilter(self, **kwargs):
self.filt = filter.ChangeFilter(**kwargs)
def yes(self, change, msg):
self.results.append((self.filt.filter_change(change), True, msg))
def no(self, change, msg):
self.results.append((self.filt.filter_change(change), False, msg))
def check(self):
errs = []
for r in self.results:
if (r[0] or r[1]) and not (r[0] and r[1]):
errs.append(r[2])
self.results = []
if errs:
self.fail("; ".join(errs))
def test_filter_change_filter_fn(self):
self.setfilter(filter_fn=lambda ch: ch.x > 3)
self.no(Change(x=2), "filter_fn returns False")
self.yes(Change(x=4), "filter_fn returns True")
self.check()
def test_filter_change_filt_str(self):
self.setfilter(project="myproj")
self.no(Change(project="yourproj"),
"non-matching PROJECT returns False")
self.yes(Change(project="myproj"), "matching PROJECT returns True")
self.check()
def test_filter_change_filt_list(self):
self.setfilter(repository=["vc://a", "vc://b"])
self.yes(Change(repository="vc://a"),
"matching REPOSITORY vc://a returns True")
self.yes(Change(repository="vc://b"),
"matching REPOSITORY vc://b returns True")
self.no(Change(repository="vc://c"),
"non-matching REPOSITORY returns False")
self.no(Change(repository=None), "None for REPOSITORY returns False")
self.check()
def test_filter_change_filt_list_None(self):
self.setfilter(branch=["mybr", None])
self.yes(Change(branch="mybr"), "matching BRANCH mybr returns True")
self.yes(Change(branch=None), "matching BRANCH None returns True")
self.no(Change(branch="misc"), "non-matching BRANCH returns False")
self.check()
def test_filter_change_filt_re(self):
self.setfilter(category_re="^a.*")
self.yes(Change(category="albert"), "matching CATEGORY returns True")
self.no(
Change(category="boris"), "non-matching CATEGORY returns False")
self.check()
def test_filter_change_branch_re(self): # regression - see #927
self.setfilter(branch_re="^t.*")
self.yes(Change(branch="trunk"), "matching BRANCH returns True")
self.no(Change(branch="development"),
"non-matching BRANCH returns False")
self.no(Change(branch=None), "branch=None returns False")
self.check()
def test_filter_change_filt_re_compiled(self):
self.setfilter(category_re=re.compile("^b.*", re.I))
self.no(Change(category="albert"),
"non-matching CATEGORY returns False")
self.yes(Change(category="boris"), "matching CATEGORY returns True")
self.yes(
Change(category="Bruce"), "matching CATEGORY returns True, using re.I")
self.check()
def test_filter_change_combination(self):
self.setfilter(project='p', repository='r', branch='b', category='c',
codebase='cb')
self.no(Change(project='x', repository='x', branch='x', category='x'),
"none match -> False")
self.no(Change(project='p', repository='r', branch='b', category='x'),
"three match -> False")
self.no(Change(project='p', repository='r', branch='b', category='c',
codebase='x'), "four match -> False")
self.yes(Change(project='p', repository='r', branch='b', category='c',
codebase='cb'), "all match -> True")
self.check()
def test_filter_change_combination_filter_fn(self):
self.setfilter(project='p', repository='r', branch='b', category='c',
filter_fn=lambda c: c.ff)
self.no(Change(project='x', repository='x', branch='x', category='x', ff=False),
"none match and fn returns False -> False")
self.no(Change(project='p', repository='r', branch='b', category='c', ff=False),
"all match and fn returns False -> False")
self.no(Change(project='x', repository='x', branch='x', category='x', ff=True),
"none match and fn returns True -> False")
self.yes(Change(project='p', repository='r', branch='b', category='c', ff=True),
"all match and fn returns True -> False")
self.check()
def test_filter_props(self):
self.setfilter()
self.filt.checks.update(
self.filt.createChecks(
("ref-updated", None, None, "prop:event.type"),
))
self.yes(
Change(properties={'event.type': 'ref-updated'}), "matching property")
self.no(
Change(properties={'event.type': 'patch-uploaded'}), "non matching property")
self.no(Change(properties={}), "no property")
self.check()
| gpl-2.0 |
demarle/VTK | ThirdParty/Twisted/twisted/trial/test/test_tests.py | 23 | 48837 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for the behaviour of unit tests.
Many tests in this module follow a simple pattern. A mixin is defined which
includes test methods for a certain feature. The mixin is inherited from twice,
once by a class also inheriting from SynchronousTestCase and once from a class
inheriting from TestCase. These two subclasses are named like
I{SynchronousFooTests} and I{AsynchronousFooTests}, where I{Foo} is related to
the name of the mixin. Sometimes the mixin is defined in another module, along
with the synchronous subclass. The mixin is imported into this module to define
the asynchronous subclass.
This pattern allows the same tests to be applied to the two base test case
classes trial provides, ensuring their behavior is the same.
Most new tests should be added in this pattern. Tests for functionality which
is intentionally only provided by TestCase, not SynchronousTestCase, is excepted
of course.
"""
from __future__ import division, absolute_import
import gc, sys, weakref
import unittest as pyunit
from twisted.python.compat import _PY3, NativeStringIO
from twisted.internet import defer, reactor
from twisted.trial import unittest, reporter, util
if not _PY3:
from twisted.trial import runner
from twisted.trial.test import erroneous
from twisted.trial.test.test_suppression import SuppressionMixin
# Skip messages that are used in multiple places:
_PY3PORTNEEDED = "Requires runner and/or reporter to be ported (#5964, #5965)"
class ResultsTestMixin(object):
"""
Provide useful APIs for test cases that are about test cases.
"""
def loadSuite(self, suite):
"""
Load tests from the given test case class and create a new reporter to
use for running it.
"""
self.loader = pyunit.TestLoader()
self.suite = self.loader.loadTestsFromTestCase(suite)
self.reporter = reporter.TestResult()
def test_setUp(self):
"""
test the setup
"""
self.failUnless(self.reporter.wasSuccessful())
self.assertEqual(self.reporter.errors, [])
self.assertEqual(self.reporter.failures, [])
self.assertEqual(self.reporter.skips, [])
def assertCount(self, numTests):
"""
Asserts that the test count is plausable
"""
self.assertEqual(self.suite.countTestCases(), numTests)
self.suite(self.reporter)
self.assertEqual(self.reporter.testsRun, numTests)
class SuccessMixin(object):
"""
Tests for the reporting of successful tests in L{twisted.trial.unittest.TestCase}.
"""
def setUp(self):
"""
Setup our test case
"""
self.result = reporter.TestResult()
def test_successful(self):
"""
A successful test, used by other tests.
"""
def assertSuccessful(self, test, result):
"""
Utility function -- assert there is one success and the state is
plausable
"""
self.assertEqual(result.successes, 1)
self.assertEqual(result.failures, [])
self.assertEqual(result.errors, [])
self.assertEqual(result.expectedFailures, [])
self.assertEqual(result.unexpectedSuccesses, [])
self.assertEqual(result.skips, [])
def test_successfulIsReported(self):
"""
Test that when a successful test is run, it is reported as a success,
and not as any other kind of result.
"""
test = self.__class__('test_successful')
test.run(self.result)
self.assertSuccessful(test, self.result)
def test_defaultIsSuccessful(self):
"""
The test case type can be instantiated with no arguments, run, and
reported as being successful.
"""
test = self.__class__()
test.run(self.result)
self.assertSuccessful(test, self.result)
def test_noReference(self):
"""
Test that no reference is kept on a successful test.
"""
test = self.__class__('test_successful')
ref = weakref.ref(test)
test.run(self.result)
self.assertSuccessful(test, self.result)
del test
gc.collect()
self.assertIdentical(ref(), None)
class SynchronousSuccessTests(SuccessMixin, unittest.SynchronousTestCase):
"""
Tests for the reporting of successful tests in the synchronous case.
"""
class AsynchronousSuccessTests(SuccessMixin, unittest.TestCase):
"""
Tests for the reporting of successful tests in the synchronous case.
"""
class SkipMethodsMixin(ResultsTestMixin):
"""
Tests for the reporting of skipping tests in L{twisted.trial.unittest.TestCase}.
"""
def setUp(self):
"""
Setup our test case
"""
self.loadSuite(self.Skipping)
def test_counting(self):
"""
Assert that there are three tests.
"""
self.assertCount(3)
def test_results(self):
"""
Running a suite in which all methods are individually set to skip
produces a successful result with no recorded errors or failures, all
the skipped methods recorded as skips, and no methods recorded as
successes.
"""
self.suite(self.reporter)
self.assertTrue(self.reporter.wasSuccessful())
self.assertEqual(self.reporter.errors, [])
self.assertEqual(self.reporter.failures, [])
self.assertEqual(len(self.reporter.skips), 3)
self.assertEqual(self.reporter.successes, 0)
def test_setUp(self):
"""
Running a suite in which all methods are skipped by C{setUp} raising
L{SkipTest} produces a successful result with no recorded errors or
failures, all skipped methods recorded as skips, and no methods recorded
as successes.
"""
self.loadSuite(self.SkippingSetUp)
self.suite(self.reporter)
self.assertTrue(self.reporter.wasSuccessful())
self.assertEqual(self.reporter.errors, [])
self.assertEqual(self.reporter.failures, [])
self.assertEqual(len(self.reporter.skips), 2)
self.assertEqual(self.reporter.successes, 0)
def test_reasons(self):
"""
Test that reasons work
"""
self.suite(self.reporter)
prefix = 'test_'
# whiteboxing reporter
for test, reason in self.reporter.skips:
self.assertEqual(test.shortDescription()[len(prefix):],
str(reason))
def test_deprecatedSkipWithoutReason(self):
"""
If a test method raises L{SkipTest} with no reason, a deprecation
warning is emitted.
"""
self.loadSuite(self.DeprecatedReasonlessSkip)
self.suite(self.reporter)
warnings = self.flushWarnings([
self.DeprecatedReasonlessSkip.test_1])
self.assertEqual(1, len(warnings))
self.assertEqual(DeprecationWarning, warnings[0]['category'])
self.assertEqual(
"Do not raise unittest.SkipTest with no arguments! Give a reason "
"for skipping tests!",
warnings[0]['message'])
class SynchronousSkipMethodTests(SkipMethodsMixin, unittest.SynchronousTestCase):
"""
Tests for the reporting of skipping tests in the synchronous case.
See: L{twisted.trial.test.test_tests.SkipMethodsMixin}
"""
from twisted.trial.test.skipping import (
SynchronousSkipping as Skipping,
SynchronousSkippingSetUp as SkippingSetUp,
SynchronousDeprecatedReasonlessSkip as DeprecatedReasonlessSkip)
class AsynchronousSkipMethodTests(SkipMethodsMixin, unittest.TestCase):
"""
Tests for the reporting of skipping tests in the asynchronous case.
See: L{twisted.trial.test.test_tests.SkipMethodsMixin}
"""
from twisted.trial.test.skipping import (
AsynchronousSkipping as Skipping,
AsynchronousSkippingSetUp as SkippingSetUp,
AsynchronousDeprecatedReasonlessSkip as DeprecatedReasonlessSkip)
class SkipClassesMixin(ResultsTestMixin):
"""
Test the class skipping features of L{twisted.trial.unittest.TestCase}.
"""
def setUp(self):
"""
Setup our test case
"""
self.loadSuite(self.SkippedClass)
self.SkippedClass._setUpRan = False
def test_counting(self):
"""
Skipped test methods still contribute to the total test count.
"""
self.assertCount(4)
def test_setUpRan(self):
"""
The C{setUp} method is not called if the class is set to skip.
"""
self.suite(self.reporter)
self.assertFalse(self.SkippedClass._setUpRan)
def test_results(self):
"""
Skipped test methods don't cause C{wasSuccessful} to return C{False},
nor do they contribute to the C{errors} or C{failures} of the reporter,
or to the count of successes. They do, however, add elements to the
reporter's C{skips} list.
"""
self.suite(self.reporter)
self.assertTrue(self.reporter.wasSuccessful())
self.assertEqual(self.reporter.errors, [])
self.assertEqual(self.reporter.failures, [])
self.assertEqual(len(self.reporter.skips), 4)
self.assertEqual(self.reporter.successes, 0)
def test_reasons(self):
"""
Test methods which raise L{unittest.SkipTest} or have their C{skip}
attribute set to something are skipped.
"""
self.suite(self.reporter)
expectedReasons = ['class', 'skip2', 'class', 'class']
# whitebox reporter
reasonsGiven = [reason for test, reason in self.reporter.skips]
self.assertEqual(expectedReasons, reasonsGiven)
class SynchronousSkipClassTests(SkipClassesMixin, unittest.SynchronousTestCase):
"""
Test the class skipping features in the synchronous case.
See: L{twisted.trial.test.test_tests.SkipClassesMixin}
"""
from twisted.trial.test.skipping import (
SynchronousSkippedClass as SkippedClass)
class AsynchronousSkipClassTests(SkipClassesMixin, unittest.TestCase):
"""
Test the class skipping features in the asynchronous case.
See: L{twisted.trial.test.test_tests.SkipClassesMixin}
"""
from twisted.trial.test.skipping import (
AsynchronousSkippedClass as SkippedClass)
class TodoMixin(ResultsTestMixin):
"""
Tests for the individual test method I{expected failure} features of
L{twisted.trial.unittest.TestCase}.
"""
def setUp(self):
"""
Setup our test case
"""
self.loadSuite(self.Todo)
def test_counting(self):
"""
Ensure that we've got three test cases.
"""
self.assertCount(3)
def test_results(self):
"""
Running a suite in which all methods are individually marked as expected
to fail produces a successful result with no recorded errors, failures,
or skips, all methods which fail and were expected to fail recorded as
C{expectedFailures}, and all methods which pass but which were expected
to fail recorded as C{unexpectedSuccesses}. Additionally, no tests are
recorded as successes.
"""
self.suite(self.reporter)
self.assertTrue(self.reporter.wasSuccessful())
self.assertEqual(self.reporter.errors, [])
self.assertEqual(self.reporter.failures, [])
self.assertEqual(self.reporter.skips, [])
self.assertEqual(len(self.reporter.expectedFailures), 2)
self.assertEqual(len(self.reporter.unexpectedSuccesses), 1)
self.assertEqual(self.reporter.successes, 0)
def test_expectedFailures(self):
"""
Ensure that expected failures are handled properly.
"""
self.suite(self.reporter)
expectedReasons = ['todo1', 'todo2']
reasonsGiven = [ r.reason
for t, e, r in self.reporter.expectedFailures ]
self.assertEqual(expectedReasons, reasonsGiven)
def test_unexpectedSuccesses(self):
"""
Ensure that unexpected successes are caught.
"""
self.suite(self.reporter)
expectedReasons = ['todo3']
reasonsGiven = [ r.reason
for t, r in self.reporter.unexpectedSuccesses ]
self.assertEqual(expectedReasons, reasonsGiven)
def test_expectedSetUpFailure(self):
"""
C{setUp} is excluded from the failure expectation defined by a C{todo}
attribute on a test method.
"""
self.loadSuite(self.SetUpTodo)
self.suite(self.reporter)
self.assertFalse(self.reporter.wasSuccessful())
self.assertEqual(len(self.reporter.errors), 1)
self.assertEqual(self.reporter.failures, [])
self.assertEqual(self.reporter.skips, [])
self.assertEqual(len(self.reporter.expectedFailures), 0)
self.assertEqual(len(self.reporter.unexpectedSuccesses), 0)
self.assertEqual(self.reporter.successes, 0)
def test_expectedTearDownFailure(self):
"""
C{tearDown} is excluded from the failure expectation defined by a C{todo}
attribute on a test method.
"""
self.loadSuite(self.TearDownTodo)
self.suite(self.reporter)
self.assertFalse(self.reporter.wasSuccessful())
self.assertEqual(len(self.reporter.errors), 1)
self.assertEqual(self.reporter.failures, [])
self.assertEqual(self.reporter.skips, [])
self.assertEqual(len(self.reporter.expectedFailures), 0)
# This seems strange, since tearDown raised an exception. However, the
# test method did complete without error. The tearDown error is
# reflected in the errors list, checked above.
self.assertEqual(len(self.reporter.unexpectedSuccesses), 1)
self.assertEqual(self.reporter.successes, 0)
class SynchronousTodoTests(TodoMixin, unittest.SynchronousTestCase):
"""
Test the class skipping features in the synchronous case.
See: L{twisted.trial.test.test_tests.TodoMixin}
"""
from twisted.trial.test.skipping import (
SynchronousTodo as Todo,
SynchronousSetUpTodo as SetUpTodo,
SynchronousTearDownTodo as TearDownTodo)
class AsynchronousTodoTests(TodoMixin, unittest.TestCase):
"""
Test the class skipping features in the asynchronous case.
See: L{twisted.trial.test.test_tests.TodoMixin}
"""
from twisted.trial.test.skipping import (
AsynchronousTodo as Todo,
AsynchronousSetUpTodo as SetUpTodo,
AsynchronousTearDownTodo as TearDownTodo)
class ClassTodoMixin(ResultsTestMixin):
"""
Tests for the class-wide I{expected failure} features of
L{twisted.trial.unittest.TestCase}.
"""
def setUp(self):
"""
Setup our test case
"""
self.loadSuite(self.TodoClass)
def test_counting(self):
"""
Ensure that we've got four test cases.
"""
self.assertCount(4)
def test_results(self):
"""
Running a suite in which an entire class is marked as expected to fail
produces a successful result with no recorded errors, failures, or
skips, all methods which fail and were expected to fail recorded as
C{expectedFailures}, and all methods which pass but which were expected
to fail recorded as C{unexpectedSuccesses}. Additionally, no tests are
recorded as successes.
"""
self.suite(self.reporter)
self.assertTrue(self.reporter.wasSuccessful())
self.assertEqual(self.reporter.errors, [])
self.assertEqual(self.reporter.failures, [])
self.assertEqual(self.reporter.skips, [])
self.assertEqual(len(self.reporter.expectedFailures), 2)
self.assertEqual(len(self.reporter.unexpectedSuccesses), 2)
self.assertEqual(self.reporter.successes, 0)
def test_expectedFailures(self):
"""
Ensure that expected failures are handled properly.
"""
self.suite(self.reporter)
expectedReasons = ['method', 'class']
reasonsGiven = [ r.reason
for t, e, r in self.reporter.expectedFailures ]
self.assertEqual(expectedReasons, reasonsGiven)
def test_unexpectedSuccesses(self):
"""
Ensure that unexpected successes are caught.
"""
self.suite(self.reporter)
expectedReasons = ['method', 'class']
reasonsGiven = [ r.reason
for t, r in self.reporter.unexpectedSuccesses ]
self.assertEqual(expectedReasons, reasonsGiven)
class SynchronousClassTodoTests(ClassTodoMixin, unittest.SynchronousTestCase):
"""
Tests for the class-wide I{expected failure} features in the synchronous case.
See: L{twisted.trial.test.test_tests.ClassTodoMixin}
"""
from twisted.trial.test.skipping import (
SynchronousTodoClass as TodoClass)
class AsynchronousClassTodoTests(ClassTodoMixin, unittest.TestCase):
"""
Tests for the class-wide I{expected failure} features in the asynchronous case.
See: L{twisted.trial.test.test_tests.ClassTodoMixin}
"""
from twisted.trial.test.skipping import (
AsynchronousTodoClass as TodoClass)
class StrictTodoMixin(ResultsTestMixin):
"""
Tests for the I{expected failure} features of
L{twisted.trial.unittest.TestCase} in which the exact failure which is
expected is indicated.
"""
def setUp(self):
"""
Setup our test case
"""
self.loadSuite(self.StrictTodo)
def test_counting(self):
"""
Assert there are seven test cases
"""
self.assertCount(7)
def test_results(self):
"""
A test method which is marked as expected to fail with a particular
exception is only counted as an expected failure if it does fail with
that exception, not if it fails with some other exception.
"""
self.suite(self.reporter)
self.assertFalse(self.reporter.wasSuccessful())
self.assertEqual(len(self.reporter.errors), 2)
self.assertEqual(len(self.reporter.failures), 1)
self.assertEqual(len(self.reporter.expectedFailures), 3)
self.assertEqual(len(self.reporter.unexpectedSuccesses), 1)
self.assertEqual(self.reporter.successes, 0)
self.assertEqual(self.reporter.skips, [])
def test_expectedFailures(self):
"""
Ensure that expected failures are handled properly.
"""
self.suite(self.reporter)
expectedReasons = ['todo1', 'todo2', 'todo5']
reasonsGotten = [ r.reason
for t, e, r in self.reporter.expectedFailures ]
self.assertEqual(expectedReasons, reasonsGotten)
def test_unexpectedSuccesses(self):
"""
Ensure that unexpected successes are caught.
"""
self.suite(self.reporter)
expectedReasons = [([RuntimeError], 'todo7')]
reasonsGotten = [ (r.errors, r.reason)
for t, r in self.reporter.unexpectedSuccesses ]
self.assertEqual(expectedReasons, reasonsGotten)
class SynchronousStrictTodoTests(StrictTodoMixin, unittest.SynchronousTestCase):
"""
Tests for the expected failure case when the exact failure that is expected
is indicated in the synchronous case
See: L{twisted.trial.test.test_tests.StrictTodoMixin}
"""
from twisted.trial.test.skipping import (
SynchronousStrictTodo as StrictTodo)
class AsynchronousStrictTodoTests(StrictTodoMixin, unittest.TestCase):
"""
Tests for the expected failure case when the exact failure that is expected
is indicated in the asynchronous case
See: L{twisted.trial.test.test_tests.StrictTodoMixin}
"""
from twisted.trial.test.skipping import (
AsynchronousStrictTodo as StrictTodo)
class TestReactorCleanup(unittest.SynchronousTestCase):
"""
Tests for cleanup and reporting of reactor event sources left behind by test
methods.
"""
if _PY3:
skip = _PY3PORTNEEDED
def setUp(self):
"""
Setup our test case
"""
self.result = reporter.Reporter(NativeStringIO())
self.loader = runner.TestLoader()
def test_leftoverSockets(self):
"""
Trial reports a L{util.DirtyReactorAggregateError} if a test leaves
sockets behind.
"""
suite = self.loader.loadMethod(
erroneous.SocketOpenTest.test_socketsLeftOpen)
suite.run(self.result)
self.failIf(self.result.wasSuccessful())
# socket cleanup happens at end of class's tests.
# all the tests in the class are successful, even if the suite
# fails
self.assertEqual(self.result.successes, 1)
failure = self.result.errors[0][1]
self.failUnless(failure.check(util.DirtyReactorAggregateError))
def test_leftoverPendingCalls(self):
"""
Trial reports a L{util.DirtyReactorAggregateError} and fails the test
if a test leaves a L{DelayedCall} hanging.
"""
suite = erroneous.ReactorCleanupTests('test_leftoverPendingCalls')
suite.run(self.result)
self.failIf(self.result.wasSuccessful())
failure = self.result.errors[0][1]
self.assertEqual(self.result.successes, 0)
self.failUnless(failure.check(util.DirtyReactorAggregateError))
class FixtureMixin(object):
"""
Tests for broken fixture helper methods (e.g. setUp, tearDown).
"""
def setUp(self):
"""
Setup our test case
"""
self.reporter = reporter.Reporter()
self.loader = pyunit.TestLoader()
def test_brokenSetUp(self):
"""
When setUp fails, the error is recorded in the result object.
"""
suite = self.loader.loadTestsFromTestCase(self.TestFailureInSetUp)
suite.run(self.reporter)
self.assertTrue(len(self.reporter.errors) > 0)
self.assertIsInstance(
self.reporter.errors[0][1].value, erroneous.FoolishError)
self.assertEqual(0, self.reporter.successes)
def test_brokenTearDown(self):
"""
When tearDown fails, the error is recorded in the result object.
"""
suite = self.loader.loadTestsFromTestCase(self.TestFailureInTearDown)
suite.run(self.reporter)
errors = self.reporter.errors
self.assertTrue(len(errors) > 0)
self.assertIsInstance(errors[0][1].value, erroneous.FoolishError)
self.assertEqual(0, self.reporter.successes)
class SynchronousFixtureTest(FixtureMixin, unittest.SynchronousTestCase):
"""
Tests for broken fixture helper methods in the synchronous case
See: L{twisted.trial.test.test_tests.FixtureMixin}
"""
from twisted.trial.test.erroneous import (
SynchronousTestFailureInSetUp as TestFailureInSetUp,
SynchronousTestFailureInTearDown as TestFailureInTearDown)
class AsynchronousFixtureTest(FixtureMixin, unittest.TestCase):
"""
Tests for broken fixture helper methods in the asynchronous case
See: L{twisted.trial.test.test_tests.FixtureMixin}
"""
from twisted.trial.test.erroneous import (
AsynchronousTestFailureInSetUp as TestFailureInSetUp,
AsynchronousTestFailureInTearDown as TestFailureInTearDown)
class AsynchronousSuppressionTest(SuppressionMixin, unittest.TestCase):
"""
Tests for the warning suppression features of
L{twisted.trial.unittest.TestCase}
See L{twisted.trial.test.test_suppression.SuppressionMixin}
"""
from twisted.trial.test.suppression import (
AsynchronousTestSetUpSuppression as TestSetUpSuppression,
AsynchronousTestTearDownSuppression as TestTearDownSuppression,
AsynchronousTestSuppression as TestSuppression,
AsynchronousTestSuppression2 as TestSuppression2)
class GCMixin:
"""
I provide a few mock tests that log setUp, tearDown, test execution and
garbage collection. I'm used to test whether gc.collect gets called.
"""
if _PY3:
skip = _PY3PORTNEEDED
class BasicTest(unittest.SynchronousTestCase):
"""
Mock test to run.
"""
def setUp(self):
"""
Mock setUp
"""
self._log('setUp')
def test_foo(self):
"""
Mock test case
"""
self._log('test')
def tearDown(self):
"""
Mock tear tearDown
"""
self._log('tearDown')
def _log(self, msg):
"""
Log function
"""
self._collectCalled.append(msg)
def collect(self):
"""Fake gc.collect"""
self._log('collect')
def setUp(self):
"""
Setup our test case
"""
self._collectCalled = []
self.BasicTest._log = self._log
self._oldCollect = gc.collect
gc.collect = self.collect
def tearDown(self):
"""
Tear down the test
"""
gc.collect = self._oldCollect
class TestGarbageCollectionDefault(GCMixin, unittest.SynchronousTestCase):
"""
By default, tests should not force garbage collection.
"""
def test_collectNotDefault(self):
"""
By default, tests should not force garbage collection.
"""
test = self.BasicTest('test_foo')
result = reporter.TestResult()
test.run(result)
self.assertEqual(self._collectCalled, ['setUp', 'test', 'tearDown'])
class TestGarbageCollection(GCMixin, unittest.SynchronousTestCase):
"""
Test that, when force GC, it works.
"""
def test_collectCalled(self):
"""
test gc.collect is called before and after each test.
"""
test = TestGarbageCollection.BasicTest('test_foo')
test = unittest._ForceGarbageCollectionDecorator(test)
result = reporter.TestResult()
test.run(result)
self.assertEqual(
self._collectCalled,
['collect', 'setUp', 'test', 'tearDown', 'collect'])
class TestUnhandledDeferred(unittest.SynchronousTestCase):
"""
Test what happens when we have an unhandled deferred left around after
a test.
"""
if _PY3:
skip = _PY3PORTNEEDED
def setUp(self):
"""
Setup our test case
"""
from twisted.trial.test import weird
# test_unhandledDeferred creates a cycle. we need explicit control of gc
gc.disable()
self.test1 = unittest._ForceGarbageCollectionDecorator(
weird.TestBleeding('test_unhandledDeferred'))
def test_isReported(self):
"""
Forcing garbage collection should cause unhandled Deferreds to be
reported as errors.
"""
result = reporter.TestResult()
self.test1(result)
self.assertEqual(len(result.errors), 1,
'Unhandled deferred passed without notice')
def test_doesntBleed(self):
"""
Forcing garbage collection in the test should mean that there are
no unreachable cycles immediately after the test completes.
"""
result = reporter.TestResult()
self.test1(result)
self.flushLoggedErrors() # test1 logs errors that get caught be us.
# test1 created unreachable cycle.
# it & all others should have been collected by now.
n = gc.collect()
self.assertEqual(n, 0, 'unreachable cycle still existed')
# check that last gc.collect didn't log more errors
x = self.flushLoggedErrors()
self.assertEqual(len(x), 0, 'Errors logged after gc.collect')
def tearDown(self):
"""
Tear down the test
"""
gc.collect()
gc.enable()
self.flushLoggedErrors()
class AddCleanupMixin(object):
"""
Test the addCleanup method of TestCase.
"""
def setUp(self):
"""
Setup our test case
"""
super(AddCleanupMixin, self).setUp()
self.result = reporter.TestResult()
self.test = self.AddCleanup()
def test_addCleanupCalledIfSetUpFails(self):
"""
Callables added with C{addCleanup} are run even if setUp fails.
"""
self.test.setUp = self.test.brokenSetUp
self.test.addCleanup(self.test.append, 'foo')
self.test.run(self.result)
self.assertEqual(['setUp', 'foo'], self.test.log)
def test_addCleanupCalledIfSetUpSkips(self):
"""
Callables added with C{addCleanup} are run even if setUp raises
L{SkipTest}. This allows test authors to reliably provide clean up
code using C{addCleanup}.
"""
self.test.setUp = self.test.skippingSetUp
self.test.addCleanup(self.test.append, 'foo')
self.test.run(self.result)
self.assertEqual(['setUp', 'foo'], self.test.log)
def test_addCleanupCalledInReverseOrder(self):
"""
Callables added with C{addCleanup} should be called before C{tearDown}
in reverse order of addition.
"""
self.test.addCleanup(self.test.append, "foo")
self.test.addCleanup(self.test.append, 'bar')
self.test.run(self.result)
self.assertEqual(['setUp', 'runTest', 'bar', 'foo', 'tearDown'],
self.test.log)
def test_errorInCleanupIsCaptured(self):
"""
Errors raised in cleanup functions should be treated like errors in
C{tearDown}. They should be added as errors and fail the test. Skips,
todos and failures are all treated as errors.
"""
self.test.addCleanup(self.test.fail, 'foo')
self.test.run(self.result)
self.failIf(self.result.wasSuccessful())
self.assertEqual(1, len(self.result.errors))
[(test, error)] = self.result.errors
self.assertEqual(test, self.test)
self.assertEqual(error.getErrorMessage(), 'foo')
def test_cleanupsContinueRunningAfterError(self):
"""
If a cleanup raises an error then that does not stop the other
cleanups from being run.
"""
self.test.addCleanup(self.test.append, 'foo')
self.test.addCleanup(self.test.fail, 'bar')
self.test.run(self.result)
self.assertEqual(['setUp', 'runTest', 'foo', 'tearDown'],
self.test.log)
self.assertEqual(1, len(self.result.errors))
[(test, error)] = self.result.errors
self.assertEqual(test, self.test)
self.assertEqual(error.getErrorMessage(), 'bar')
def test_multipleErrorsReported(self):
"""
If more than one cleanup fails, then the test should fail with more
than one error.
"""
self.test.addCleanup(self.test.fail, 'foo')
self.test.addCleanup(self.test.fail, 'bar')
self.test.run(self.result)
self.assertEqual(['setUp', 'runTest', 'tearDown'],
self.test.log)
self.assertEqual(2, len(self.result.errors))
[(test1, error1), (test2, error2)] = self.result.errors
self.assertEqual(test1, self.test)
self.assertEqual(test2, self.test)
self.assertEqual(error1.getErrorMessage(), 'bar')
self.assertEqual(error2.getErrorMessage(), 'foo')
class SynchronousAddCleanupTests(AddCleanupMixin, unittest.SynchronousTestCase):
"""
Test the addCleanup method of TestCase in the synchronous case
See: L{twisted.trial.test.test_tests.AddCleanupMixin}
"""
from twisted.trial.test.skipping import SynchronousAddCleanup as AddCleanup
class AsynchronousAddCleanupTests(AddCleanupMixin, unittest.TestCase):
"""
Test the addCleanup method of TestCase in the asynchronous case
See: L{twisted.trial.test.test_tests.AddCleanupMixin}
"""
from twisted.trial.test.skipping import AsynchronousAddCleanup as AddCleanup
def test_addCleanupWaitsForDeferreds(self):
"""
If an added callable returns a L{Deferred}, then the test should wait
until that L{Deferred} has fired before running the next cleanup
method.
"""
def cleanup(message):
d = defer.Deferred()
reactor.callLater(0, d.callback, message)
return d.addCallback(self.test.append)
self.test.addCleanup(self.test.append, 'foo')
self.test.addCleanup(cleanup, 'bar')
self.test.run(self.result)
self.assertEqual(['setUp', 'runTest', 'bar', 'foo', 'tearDown'],
self.test.log)
class SuiteClearingMixin(object):
"""
Tests for our extension that allows us to clear out a L{TestSuite}.
"""
if _PY3:
skip = _PY3PORTNEEDED
def test_clearSuite(self):
"""
Calling L{unittest._clearSuite} on a populated L{TestSuite} removes
all tests.
"""
suite = unittest.TestSuite()
suite.addTest(self.TestCase())
# Double check that the test suite actually has something in it.
self.assertEqual(1, suite.countTestCases())
unittest._clearSuite(suite)
self.assertEqual(0, suite.countTestCases())
def test_clearPyunitSuite(self):
"""
Calling L{unittest._clearSuite} on a populated standard library
L{TestSuite} removes all tests.
This test is important since C{_clearSuite} operates by mutating
internal variables.
"""
pyunit = __import__('unittest')
suite = pyunit.TestSuite()
suite.addTest(self.TestCase())
# Double check that the test suite actually has something in it.
self.assertEqual(1, suite.countTestCases())
unittest._clearSuite(suite)
self.assertEqual(0, suite.countTestCases())
class SynchronousSuiteClearingTests(SuiteClearingMixin, unittest.SynchronousTestCase):
"""
Tests for our extension that allows us to clear out a L{TestSuite} in the
synchronous case.
See L{twisted.trial.test.test_tests.SuiteClearingMixin}
"""
TestCase = unittest.SynchronousTestCase
class AsynchronousSuiteClearingTests(SuiteClearingMixin, unittest.TestCase):
"""
Tests for our extension that allows us to clear out a L{TestSuite} in the
asynchronous case.
See L{twisted.trial.test.test_tests.SuiteClearingMixin}
"""
TestCase = unittest.TestCase
class TestDecoratorMixin(object):
"""
Tests for our test decoration features.
"""
if _PY3:
skip = _PY3PORTNEEDED
def assertTestsEqual(self, observed, expected):
"""
Assert that the given decorated tests are equal.
"""
self.assertEqual(observed.__class__, expected.__class__,
"Different class")
observedOriginal = getattr(observed, '_originalTest', None)
expectedOriginal = getattr(expected, '_originalTest', None)
self.assertIdentical(observedOriginal, expectedOriginal)
if observedOriginal is expectedOriginal is None:
self.assertIdentical(observed, expected)
def assertSuitesEqual(self, observed, expected):
"""
Assert that the given test suites with decorated tests are equal.
"""
self.assertEqual(observed.__class__, expected.__class__,
"Different class")
self.assertEqual(len(observed._tests), len(expected._tests),
"Different number of tests.")
for observedTest, expectedTest in zip(observed._tests,
expected._tests):
if getattr(observedTest, '_tests', None) is not None:
self.assertSuitesEqual(observedTest, expectedTest)
else:
self.assertTestsEqual(observedTest, expectedTest)
def test_usesAdaptedReporterWithRun(self):
"""
For decorated tests, C{run} uses a result adapter that preserves the
test decoration for calls to C{addError}, C{startTest} and the like.
See L{reporter._AdaptedReporter}.
"""
test = self.TestCase()
decoratedTest = unittest.TestDecorator(test)
# Move to top in ticket #5964:
from twisted.trial.test.test_reporter import LoggingReporter
result = LoggingReporter()
decoratedTest.run(result)
self.assertTestsEqual(result.test, decoratedTest)
def test_usesAdaptedReporterWithCall(self):
"""
For decorated tests, C{__call__} uses a result adapter that preserves
the test decoration for calls to C{addError}, C{startTest} and the
like.
See L{reporter._AdaptedReporter}.
"""
test = self.TestCase()
decoratedTest = unittest.TestDecorator(test)
# Move to top in ticket #5964:
from twisted.trial.test.test_reporter import LoggingReporter
result = LoggingReporter()
decoratedTest(result)
self.assertTestsEqual(result.test, decoratedTest)
def test_decorateSingleTest(self):
"""
Calling L{decorate} on a single test case returns the test case
decorated with the provided decorator.
"""
test = self.TestCase()
decoratedTest = unittest.decorate(test, unittest.TestDecorator)
self.assertTestsEqual(unittest.TestDecorator(test), decoratedTest)
def test_decorateTestSuite(self):
"""
Calling L{decorate} on a test suite will return a test suite with
each test decorated with the provided decorator.
"""
test = self.TestCase()
suite = unittest.TestSuite([test])
decoratedTest = unittest.decorate(suite, unittest.TestDecorator)
self.assertSuitesEqual(
decoratedTest, unittest.TestSuite([unittest.TestDecorator(test)]))
def test_decorateInPlaceMutatesOriginal(self):
"""
Calling L{decorate} on a test suite will mutate the original suite.
"""
test = self.TestCase()
suite = unittest.TestSuite([test])
decoratedTest = unittest.decorate(
suite, unittest.TestDecorator)
self.assertSuitesEqual(
decoratedTest, unittest.TestSuite([unittest.TestDecorator(test)]))
self.assertSuitesEqual(
suite, unittest.TestSuite([unittest.TestDecorator(test)]))
def test_decorateTestSuiteReferences(self):
"""
When decorating a test suite in-place, the number of references to the
test objects in that test suite should stay the same.
Previously, L{unittest.decorate} recreated a test suite, so the
original suite kept references to the test objects. This test is here
to ensure the problem doesn't reappear again.
"""
getrefcount = getattr(sys, 'getrefcount', None)
if getrefcount is None:
raise unittest.SkipTest(
"getrefcount not supported on this platform")
test = self.TestCase()
suite = unittest.TestSuite([test])
count1 = getrefcount(test)
decoratedTest = unittest.decorate(suite, unittest.TestDecorator)
count2 = getrefcount(test)
self.assertEqual(count1, count2)
def test_decorateNestedTestSuite(self):
"""
Calling L{decorate} on a test suite with nested suites will return a
test suite that maintains the same structure, but with all tests
decorated.
"""
test = self.TestCase()
suite = unittest.TestSuite([unittest.TestSuite([test])])
decoratedTest = unittest.decorate(suite, unittest.TestDecorator)
expected = unittest.TestSuite(
[unittest.TestSuite([unittest.TestDecorator(test)])])
self.assertSuitesEqual(decoratedTest, expected)
def test_decorateDecoratedSuite(self):
"""
Calling L{decorate} on a test suite with already-decorated tests
decorates all of the tests in the suite again.
"""
test = self.TestCase()
decoratedTest = unittest.decorate(test, unittest.TestDecorator)
redecoratedTest = unittest.decorate(decoratedTest,
unittest.TestDecorator)
self.assertTestsEqual(redecoratedTest,
unittest.TestDecorator(decoratedTest))
def test_decoratePreservesSuite(self):
"""
Tests can be in non-standard suites. L{decorate} preserves the
non-standard suites when it decorates the tests.
"""
test = self.TestCase()
suite = runner.DestructiveTestSuite([test])
decorated = unittest.decorate(suite, unittest.TestDecorator)
self.assertSuitesEqual(
decorated,
runner.DestructiveTestSuite([unittest.TestDecorator(test)]))
class SynchronousTestDecoratorTests(TestDecoratorMixin, unittest.SynchronousTestCase):
"""
Tests for our test decoration features in the synchronous case.
See L{twisted.trial.test.test_tests.TestDecoratorMixin}
"""
TestCase = unittest.SynchronousTestCase
class AsynchronousTestDecoratorTests(TestDecoratorMixin, unittest.TestCase):
"""
Tests for our test decoration features in the asynchronous case.
See L{twisted.trial.test.test_tests.TestDecoratorMixin}
"""
TestCase = unittest.TestCase
class MonkeyPatchMixin(object):
"""
Tests for the patch() helper method in L{unittest.TestCase}.
"""
def setUp(self):
"""
Setup our test case
"""
self.originalValue = 'original'
self.patchedValue = 'patched'
self.objectToPatch = self.originalValue
self.test = self.TestCase()
def test_patch(self):
"""
Calling C{patch()} on a test monkey patches the specified object and
attribute.
"""
self.test.patch(self, 'objectToPatch', self.patchedValue)
self.assertEqual(self.objectToPatch, self.patchedValue)
def test_patchRestoredAfterRun(self):
"""
Any monkey patches introduced by a test using C{patch()} are reverted
after the test has run.
"""
self.test.patch(self, 'objectToPatch', self.patchedValue)
self.test.run(reporter.Reporter())
self.assertEqual(self.objectToPatch, self.originalValue)
def test_revertDuringTest(self):
"""
C{patch()} return a L{monkey.MonkeyPatcher} object that can be used to
restore the original values before the end of the test.
"""
patch = self.test.patch(self, 'objectToPatch', self.patchedValue)
patch.restore()
self.assertEqual(self.objectToPatch, self.originalValue)
def test_revertAndRepatch(self):
"""
The returned L{monkey.MonkeyPatcher} object can re-apply the patch
during the test run.
"""
patch = self.test.patch(self, 'objectToPatch', self.patchedValue)
patch.restore()
patch.patch()
self.assertEqual(self.objectToPatch, self.patchedValue)
def test_successivePatches(self):
"""
Successive patches are applied and reverted just like a single patch.
"""
self.test.patch(self, 'objectToPatch', self.patchedValue)
self.assertEqual(self.objectToPatch, self.patchedValue)
self.test.patch(self, 'objectToPatch', 'second value')
self.assertEqual(self.objectToPatch, 'second value')
self.test.run(reporter.Reporter())
self.assertEqual(self.objectToPatch, self.originalValue)
class SynchronousMonkeyPatchTests(MonkeyPatchMixin, unittest.SynchronousTestCase):
"""
Tests for the patch() helper method in the synchronous case.
See L{twisted.trial.test.test_tests.MonkeyPatchMixin}
"""
TestCase = unittest.SynchronousTestCase
class AsynchronousMonkeyPatchTests(MonkeyPatchMixin, unittest.TestCase):
"""
Tests for the patch() helper method in the asynchronous case.
See L{twisted.trial.test.test_tests.MonkeyPatchMixin}
"""
TestCase = unittest.TestCase
class IterateTestsMixin(object):
"""
L{_iterateTests} returns a list of all test cases in a test suite or test
case.
"""
if _PY3:
skip = _PY3PORTNEEDED
def test_iterateTestCase(self):
"""
L{_iterateTests} on a single test case returns a list containing that
test case.
"""
test = self.TestCase()
self.assertEqual([test], list(unittest._iterateTests(test)))
def test_iterateSingletonTestSuite(self):
"""
L{_iterateTests} on a test suite that contains a single test case
returns a list containing that test case.
"""
test = self.TestCase()
suite = runner.TestSuite([test])
self.assertEqual([test], list(unittest._iterateTests(suite)))
def test_iterateNestedTestSuite(self):
"""
L{_iterateTests} returns tests that are in nested test suites.
"""
test = self.TestCase()
suite = runner.TestSuite([runner.TestSuite([test])])
self.assertEqual([test], list(unittest._iterateTests(suite)))
def test_iterateIsLeftToRightDepthFirst(self):
"""
L{_iterateTests} returns tests in left-to-right, depth-first order.
"""
test = self.TestCase()
suite = runner.TestSuite([runner.TestSuite([test]), self])
self.assertEqual([test, self], list(unittest._iterateTests(suite)))
class SynchronousIterateTestsTests(IterateTestsMixin, unittest.SynchronousTestCase):
"""
Check that L{_iterateTests} returns a list of all test cases in a test suite
or test case for synchronous tests.
See L{twisted.trial.test.test_tests.IterateTestsMixin}
"""
TestCase = unittest.SynchronousTestCase
class AsynchronousIterateTestsTests(IterateTestsMixin, unittest.TestCase):
"""
Check that L{_iterateTests} returns a list of all test cases in a test suite
or test case for asynchronous tests.
See L{twisted.trial.test.test_tests.IterateTestsMixin}
"""
TestCase = unittest.TestCase
class TrialGeneratorFunctionTests(unittest.SynchronousTestCase):
"""
Tests for generator function methods in test cases.
"""
def test_errorOnGeneratorFunction(self):
"""
In a TestCase, a test method which is a generator function is reported
as an error, as such a method will never run assertions.
"""
class GeneratorTestCase(unittest.TestCase):
"""
A fake TestCase for testing purposes.
"""
def test_generator(self):
"""
A method which is also a generator function, for testing
purposes.
"""
self.fail('this should never be reached')
yield
testCase = GeneratorTestCase('test_generator')
result = reporter.TestResult()
testCase.run(result)
self.assertEqual(len(result.failures), 0)
self.assertEqual(len(result.errors), 1)
self.assertEqual(
result.errors[0][1].value.args[0],
'<bound method GeneratorTestCase.test_generator of <twisted.trial.'
'test.test_tests.GeneratorTestCase testMethod=test_generator>> is '
'a generator function and therefore will never run')
def test_synchronousTestCaseErrorOnGeneratorFunction(self):
"""
In a SynchronousTestCase, a test method which is a generator function
is reported as an error, as such a method will never run assertions.
"""
class GeneratorSynchronousTestCase(unittest.SynchronousTestCase):
"""
A fake SynchronousTestCase for testing purposes.
"""
def test_generator(self):
"""
A method which is also a generator function, for testing
purposes.
"""
self.fail('this should never be reached')
yield
testCase = GeneratorSynchronousTestCase('test_generator')
result = reporter.TestResult()
testCase.run(result)
self.assertEqual(len(result.failures), 0)
self.assertEqual(len(result.errors), 1)
self.assertEqual(
result.errors[0][1].value.args[0],
'<bound method GeneratorSynchronousTestCase.test_generator of '
'<twisted.trial.test.test_tests.GeneratorSynchronousTestCase '
'testMethod=test_generator>> is a generator function and '
'therefore will never run')
| bsd-3-clause |
lbishal/scikit-learn | sklearn/neighbors/tests/test_nearest_centroid.py | 305 | 4121 | """
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
| bsd-3-clause |
nicobustillos/odoo | addons/web_graph/controllers/main.py | 251 | 3561 | from openerp import http
import simplejson
from openerp.http import request, serialize_exception as _serialize_exception
from cStringIO import StringIO
from collections import deque
try:
import xlwt
except ImportError:
xlwt = None
class TableExporter(http.Controller):
@http.route('/web_graph/check_xlwt', type='json', auth='none')
def check_xlwt(self):
return xlwt is not None
@http.route('/web_graph/export_xls', type='http', auth="user")
def export_xls(self, data, token):
jdata = simplejson.loads(data)
nbr_measures = jdata['nbr_measures']
workbook = xlwt.Workbook()
worksheet = workbook.add_sheet(jdata['title'][:30])
header_bold = xlwt.easyxf("font: bold on; pattern: pattern solid, fore_colour gray25;")
header_plain = xlwt.easyxf("pattern: pattern solid, fore_colour gray25;")
bold = xlwt.easyxf("font: bold on;")
# Step 1: writing headers
headers = jdata['headers']
# x,y: current coordinates
# carry: queue containing cell information when a cell has a >= 2 height
# and the drawing code needs to add empty cells below
x, y, carry = 1, 0, deque()
for i, header_row in enumerate(headers):
worksheet.write(i,0, '', header_plain)
for header in header_row:
while (carry and carry[0]['x'] == x):
cell = carry.popleft()
for i in range(nbr_measures):
worksheet.write(y, x+i, '', header_plain)
if cell['height'] > 1:
carry.append({'x': x, 'height':cell['height'] - 1})
x = x + nbr_measures
style = header_plain if 'expanded' in header else header_bold
for i in range(header['width']):
worksheet.write(y, x + i, header['title'] if i == 0 else '', style)
if header['height'] > 1:
carry.append({'x': x, 'height':header['height'] - 1})
x = x + header['width'];
while (carry and carry[0]['x'] == x):
cell = carry.popleft()
for i in range(nbr_measures):
worksheet.write(y, x+i, '', header_plain)
if cell['height'] > 1:
carry.append({'x': x, 'height':cell['height'] - 1})
x = x + nbr_measures
x, y = 1, y + 1
# Step 2: measure row
if nbr_measures > 1:
worksheet.write(y,0, '', header_plain)
for measure in jdata['measure_row']:
style = header_bold if measure['is_bold'] else header_plain
worksheet.write(y, x, measure['text'], style);
x = x + 1
y = y + 1
# Step 3: writing data
x = 0
for row in jdata['rows']:
worksheet.write(y, x, row['indent'] * ' ' + row['title'], header_plain)
for cell in row['cells']:
x = x + 1
if cell.get('is_bold', False):
worksheet.write(y, x, cell['value'], bold)
else:
worksheet.write(y, x, cell['value'])
x, y = 0, y + 1
response = request.make_response(None,
headers=[('Content-Type', 'application/vnd.ms-excel'),
('Content-Disposition', 'attachment; filename=table.xls;')],
cookies={'fileToken': token})
workbook.save(response.stream)
return response
| agpl-3.0 |
cfe-lab/Kive | kive/portal/migrations/0001_initial.py | 1 | 1120 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='StagedFile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('uploaded_file', models.FileField(help_text='Uploaded file held for further server-side processing', upload_to=b'StagedFiles', verbose_name=b'Uploaded file')),
('date_uploaded', models.DateTimeField(help_text='Date and time of upload', verbose_name=b'Upload date', auto_now_add=True)),
('user', models.ForeignKey(
help_text='User that uploaded this file',
to=settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)),
],
options={
},
bases=(models.Model,),
),
]
| bsd-3-clause |
chr1sb/Sick-Beard | lib/hachoir_parser/program/exe_res.py | 90 | 15312 | """
Parser for resource of Microsoft Windows Portable Executable (PE).
Documentation:
- Wine project
VS_FIXEDFILEINFO structure, file include/winver.h
Author: Victor Stinner
Creation date: 2007-01-19
"""
from lib.hachoir_core.field import (FieldSet, ParserError, Enum,
Bit, Bits, SeekableFieldSet,
UInt16, UInt32, TimestampUnix32,
RawBytes, PaddingBytes, NullBytes, NullBits,
CString, String)
from lib.hachoir_core.text_handler import textHandler, filesizeHandler, hexadecimal
from lib.hachoir_core.tools import createDict, paddingSize, alignValue, makePrintable
from lib.hachoir_core.error import HACHOIR_ERRORS
from lib.hachoir_parser.common.win32 import BitmapInfoHeader
MAX_DEPTH = 5
MAX_INDEX_PER_HEADER = 300
MAX_NAME_PER_HEADER = MAX_INDEX_PER_HEADER
class Version(FieldSet):
static_size = 32
def createFields(self):
yield textHandler(UInt16(self, "minor", "Minor version number"), hexadecimal)
yield textHandler(UInt16(self, "major", "Major version number"), hexadecimal)
def createValue(self):
return self["major"].value + float(self["minor"].value) / 10000
MAJOR_OS_NAME = {
1: "DOS",
2: "OS/2 16-bit",
3: "OS/2 32-bit",
4: "Windows NT",
}
MINOR_OS_BASE = 0
MINOR_OS_NAME = {
0: "Base",
1: "Windows 16-bit",
2: "Presentation Manager 16-bit",
3: "Presentation Manager 32-bit",
4: "Windows 32-bit",
}
FILETYPE_DRIVER = 3
FILETYPE_FONT = 4
FILETYPE_NAME = {
1: "Application",
2: "DLL",
3: "Driver",
4: "Font",
5: "VXD",
7: "Static library",
}
DRIVER_SUBTYPE_NAME = {
1: "Printer",
2: "Keyboard",
3: "Language",
4: "Display",
5: "Mouse",
6: "Network",
7: "System",
8: "Installable",
9: "Sound",
10: "Communications",
}
FONT_SUBTYPE_NAME = {
1: "Raster",
2: "Vector",
3: "TrueType",
}
class VersionInfoBinary(FieldSet):
def createFields(self):
yield textHandler(UInt32(self, "magic", "File information magic (0xFEEF04BD)"), hexadecimal)
if self["magic"].value != 0xFEEF04BD:
raise ParserError("EXE resource: invalid file info magic")
yield Version(self, "struct_ver", "Structure version (1.0)")
yield Version(self, "file_ver_ms", "File version MS")
yield Version(self, "file_ver_ls", "File version LS")
yield Version(self, "product_ver_ms", "Product version MS")
yield Version(self, "product_ver_ls", "Product version LS")
yield textHandler(UInt32(self, "file_flags_mask"), hexadecimal)
yield Bit(self, "debug")
yield Bit(self, "prerelease")
yield Bit(self, "patched")
yield Bit(self, "private_build")
yield Bit(self, "info_inferred")
yield Bit(self, "special_build")
yield NullBits(self, "reserved", 26)
yield Enum(textHandler(UInt16(self, "file_os_major"), hexadecimal), MAJOR_OS_NAME)
yield Enum(textHandler(UInt16(self, "file_os_minor"), hexadecimal), MINOR_OS_NAME)
yield Enum(textHandler(UInt32(self, "file_type"), hexadecimal), FILETYPE_NAME)
field = textHandler(UInt32(self, "file_subfile"), hexadecimal)
if field.value == FILETYPE_DRIVER:
field = Enum(field, DRIVER_SUBTYPE_NAME)
elif field.value == FILETYPE_FONT:
field = Enum(field, FONT_SUBTYPE_NAME)
yield field
yield TimestampUnix32(self, "date_ms")
yield TimestampUnix32(self, "date_ls")
class VersionInfoNode(FieldSet):
TYPE_STRING = 1
TYPE_NAME = {
0: "binary",
1: "string",
}
def __init__(self, parent, name, is_32bit=True):
FieldSet.__init__(self, parent, name)
self._size = alignValue(self["size"].value, 4) * 8
self.is_32bit = is_32bit
def createFields(self):
yield UInt16(self, "size", "Node size (in bytes)")
yield UInt16(self, "data_size")
yield Enum(UInt16(self, "type"), self.TYPE_NAME)
yield CString(self, "name", charset="UTF-16-LE")
size = paddingSize(self.current_size//8, 4)
if size:
yield NullBytes(self, "padding[]", size)
size = self["data_size"].value
if size:
if self["type"].value == self.TYPE_STRING:
if self.is_32bit:
size *= 2
yield String(self, "value", size, charset="UTF-16-LE", truncate="\0")
elif self["name"].value == "VS_VERSION_INFO":
yield VersionInfoBinary(self, "value", size=size*8)
if self["value/file_flags_mask"].value == 0:
self.is_32bit = False
else:
yield RawBytes(self, "value", size)
while 12 <= (self.size - self.current_size) // 8:
yield VersionInfoNode(self, "node[]", self.is_32bit)
size = (self.size - self.current_size) // 8
if size:
yield NullBytes(self, "padding[]", size)
def createDescription(self):
text = "Version info node: %s" % self["name"].value
if self["type"].value == self.TYPE_STRING and "value" in self:
text += "=%s" % self["value"].value
return text
def parseVersionInfo(parent):
yield VersionInfoNode(parent, "node[]")
def parseIcon(parent):
yield BitmapInfoHeader(parent, "bmp_header")
size = (parent.size - parent.current_size) // 8
if size:
yield RawBytes(parent, "raw", size)
class WindowsString(FieldSet):
def createFields(self):
yield UInt16(self, "length", "Number of 16-bit characters")
size = self["length"].value * 2
if size:
yield String(self, "text", size, charset="UTF-16-LE")
def createValue(self):
if "text" in self:
return self["text"].value
else:
return u""
def createDisplay(self):
return makePrintable(self.value, "UTF-8", to_unicode=True, quote='"')
def parseStringTable(parent):
while not parent.eof:
yield WindowsString(parent, "string[]")
RESOURCE_TYPE = {
1: ("cursor[]", "Cursor", None),
2: ("bitmap[]", "Bitmap", None),
3: ("icon[]", "Icon", parseIcon),
4: ("menu[]", "Menu", None),
5: ("dialog[]", "Dialog", None),
6: ("string_table[]", "String table", parseStringTable),
7: ("font_dir[]", "Font directory", None),
8: ("font[]", "Font", None),
9: ("accelerators[]", "Accelerators", None),
10: ("raw_res[]", "Unformatted resource data", None),
11: ("message_table[]", "Message table", None),
12: ("group_cursor[]", "Group cursor", None),
14: ("group_icon[]", "Group icon", None),
16: ("version_info", "Version information", parseVersionInfo),
}
class Entry(FieldSet):
static_size = 16*8
def __init__(self, parent, name, inode=None):
FieldSet.__init__(self, parent, name)
self.inode = inode
def createFields(self):
yield textHandler(UInt32(self, "rva"), hexadecimal)
yield filesizeHandler(UInt32(self, "size"))
yield UInt32(self, "codepage")
yield NullBytes(self, "reserved", 4)
def createDescription(self):
return "Entry #%u: offset=%s size=%s" % (
self.inode["offset"].value, self["rva"].display, self["size"].display)
class NameOffset(FieldSet):
def createFields(self):
yield UInt32(self, "name")
yield Bits(self, "offset", 31)
yield Bit(self, "is_name")
class IndexOffset(FieldSet):
TYPE_DESC = createDict(RESOURCE_TYPE, 1)
def __init__(self, parent, name, res_type=None):
FieldSet.__init__(self, parent, name)
self.res_type = res_type
def createFields(self):
yield Enum(UInt32(self, "type"), self.TYPE_DESC)
yield Bits(self, "offset", 31)
yield Bit(self, "is_subdir")
def createDescription(self):
if self["is_subdir"].value:
return "Sub-directory: %s at %s" % (self["type"].display, self["offset"].value)
else:
return "Index: ID %s at %s" % (self["type"].display, self["offset"].value)
class ResourceContent(FieldSet):
def __init__(self, parent, name, entry, size=None):
FieldSet.__init__(self, parent, name, size=entry["size"].value*8)
self.entry = entry
res_type = self.getResType()
if res_type in RESOURCE_TYPE:
self._name, description, self._parser = RESOURCE_TYPE[res_type]
else:
self._parser = None
def getResID(self):
return self.entry.inode["offset"].value
def getResType(self):
return self.entry.inode.res_type
def createFields(self):
if self._parser:
for field in self._parser(self):
yield field
else:
yield RawBytes(self, "content", self.size//8)
def createDescription(self):
return "Resource #%u content: type=%s" % (
self.getResID(), self.getResType())
class Header(FieldSet):
static_size = 16*8
def createFields(self):
yield NullBytes(self, "options", 4)
yield TimestampUnix32(self, "creation_date")
yield UInt16(self, "maj_ver", "Major version")
yield UInt16(self, "min_ver", "Minor version")
yield UInt16(self, "nb_name", "Number of named entries")
yield UInt16(self, "nb_index", "Number of indexed entries")
def createDescription(self):
text = "Resource header"
info = []
if self["nb_name"].value:
info.append("%u name" % self["nb_name"].value)
if self["nb_index"].value:
info.append("%u index" % self["nb_index"].value)
if self["creation_date"].value:
info.append(self["creation_date"].display)
if info:
return "%s: %s" % (text, ", ".join(info))
else:
return text
class Name(FieldSet):
def createFields(self):
yield UInt16(self, "length")
size = min(self["length"].value, 255)
if size:
yield String(self, "name", size, charset="UTF-16LE")
class Directory(FieldSet):
def __init__(self, parent, name, res_type=None):
FieldSet.__init__(self, parent, name)
nb_entries = self["header/nb_name"].value + self["header/nb_index"].value
self._size = Header.static_size + nb_entries * 64
self.res_type = res_type
def createFields(self):
yield Header(self, "header")
if MAX_NAME_PER_HEADER < self["header/nb_name"].value:
raise ParserError("EXE resource: invalid number of name (%s)"
% self["header/nb_name"].value)
if MAX_INDEX_PER_HEADER < self["header/nb_index"].value:
raise ParserError("EXE resource: invalid number of index (%s)"
% self["header/nb_index"].value)
hdr = self["header"]
for index in xrange(hdr["nb_name"].value):
yield NameOffset(self, "name[]")
for index in xrange(hdr["nb_index"].value):
yield IndexOffset(self, "index[]", self.res_type)
def createDescription(self):
return self["header"].description
class PE_Resource(SeekableFieldSet):
def __init__(self, parent, name, section, size):
SeekableFieldSet.__init__(self, parent, name, size=size)
self.section = section
def parseSub(self, directory, name, depth):
indexes = []
for index in directory.array("index"):
if index["is_subdir"].value:
indexes.append(index)
#indexes.sort(key=lambda index: index["offset"].value)
for index in indexes:
self.seekByte(index["offset"].value)
if depth == 1:
res_type = index["type"].value
else:
res_type = directory.res_type
yield Directory(self, name, res_type)
def createFields(self):
# Parse directories
depth = 0
subdir = Directory(self, "root")
yield subdir
subdirs = [subdir]
alldirs = [subdir]
while subdirs:
depth += 1
if MAX_DEPTH < depth:
self.error("EXE resource: depth too high (%s), stop parsing directories" % depth)
break
newsubdirs = []
for index, subdir in enumerate(subdirs):
name = "directory[%u][%u][]" % (depth, index)
try:
for field in self.parseSub(subdir, name, depth):
if field.__class__ == Directory:
newsubdirs.append(field)
yield field
except HACHOIR_ERRORS, err:
self.error("Unable to create directory %s: %s" % (name, err))
subdirs = newsubdirs
alldirs.extend(subdirs)
# Create resource list
resources = []
for directory in alldirs:
for index in directory.array("index"):
if not index["is_subdir"].value:
resources.append(index)
# Parse entries
entries = []
for resource in resources:
offset = resource["offset"].value
if offset is None:
continue
self.seekByte(offset)
entry = Entry(self, "entry[]", inode=resource)
yield entry
entries.append(entry)
entries.sort(key=lambda entry: entry["rva"].value)
# Parse resource content
for entry in entries:
try:
offset = self.section.rva2file(entry["rva"].value)
padding = self.seekByte(offset, relative=False)
if padding:
yield padding
yield ResourceContent(self, "content[]", entry)
except HACHOIR_ERRORS, err:
self.warning("Error when parsing entry %s: %s" % (entry.path, err))
size = (self.size - self.current_size) // 8
if size:
yield PaddingBytes(self, "padding_end", size)
class NE_VersionInfoNode(FieldSet):
TYPE_STRING = 1
TYPE_NAME = {
0: "binary",
1: "string",
}
def __init__(self, parent, name):
FieldSet.__init__(self, parent, name)
self._size = alignValue(self["size"].value, 4) * 8
def createFields(self):
yield UInt16(self, "size", "Node size (in bytes)")
yield UInt16(self, "data_size")
yield CString(self, "name", charset="ISO-8859-1")
size = paddingSize(self.current_size//8, 4)
if size:
yield NullBytes(self, "padding[]", size)
size = self["data_size"].value
if size:
if self["name"].value == "VS_VERSION_INFO":
yield VersionInfoBinary(self, "value", size=size*8)
else:
yield String(self, "value", size, charset="ISO-8859-1")
while 12 <= (self.size - self.current_size) // 8:
yield NE_VersionInfoNode(self, "node[]")
size = (self.size - self.current_size) // 8
if size:
yield NullBytes(self, "padding[]", size)
def createDescription(self):
text = "Version info node: %s" % self["name"].value
# if self["type"].value == self.TYPE_STRING and "value" in self:
# text += "=%s" % self["value"].value
return text
| gpl-3.0 |
armisael/django-guardian | example_project/settings.py | 11 | 2406 | import os
import sys
from django.conf import global_settings
abspath = lambda *p: os.path.abspath(os.path.join(*p))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
PROJECT_ROOT = abspath(os.path.dirname(__file__))
GUARDIAN_MODULE_PATH = abspath(PROJECT_ROOT, '..')
sys.path.insert(0, GUARDIAN_MODULE_PATH)
sys.path.insert(0, PROJECT_ROOT)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': abspath(PROJECT_ROOT, '.hidden.db'),
'TEST_NAME': ':memory:',
},
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.messages',
'django.contrib.staticfiles',
'guardian',
'guardian.tests',
'posts',
)
if 'GRAPPELLI' in os.environ:
try:
__import__('grappelli')
INSTALLED_APPS = ('grappelli',) + INSTALLED_APPS
except ImportError:
print "django-grappelli not installed"
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.transaction.TransactionMiddleware',
)
STATIC_ROOT = abspath(PROJECT_ROOT, '..', 'public', 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = abspath(PROJECT_ROOT, 'media')
MEDIA_URL = '/media/'
ADMIN_MEDIA_PREFIX = STATIC_URL + 'grappelli/'
ROOT_URLCONF = 'example_project.urls'
TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
'django.core.context_processors.request',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'templates'),
)
SITE_ID = 1
USE_I18N = True
USE_L10N = True
LOGIN_REDIRECT_URL = '/'
TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'guardian.backends.ObjectPermissionBackend',
)
ANONYMOUS_USER_ID = -1
# Neede as some models (located at guardian/tests/models.py)
# are not migrated for tests
SOUTH_TESTS_MIGRATE = False
try:
from conf.localsettings import *
except ImportError:
pass
| bsd-2-clause |
denilsonsa/django-guardian | docs/exts.py | 109 | 1178 |
def setup(app):
app.add_crossref_type(
directivename = "admin",
rolename = "admin",
indextemplate = "pair: %s; admin",
)
app.add_crossref_type(
directivename = "command",
rolename = "command",
indextemplate = "pair: %s; command",
)
app.add_crossref_type(
directivename = "form",
rolename = "form",
indextemplate = "pair: %s; form",
)
app.add_crossref_type(
directivename = "manager",
rolename = "manager",
indextemplate = "pair: %s; manager",
)
app.add_crossref_type(
directivename = "mixin",
rolename = "mixin",
indextemplate = "pair: %s; mixin",
)
app.add_crossref_type(
directivename = "model",
rolename = "model",
indextemplate = "pair: %s; model",
)
app.add_crossref_type(
directivename = "setting",
rolename = "setting",
indextemplate = "pair: %s; setting",
)
app.add_crossref_type(
directivename = "shortcut",
rolename = "shortcut",
indextemplate = "pair: %s; shortcut",
)
| bsd-2-clause |
makinacorpus/Geotrek | geotrek/infrastructure/filters.py | 1 | 1679 | from django.contrib.contenttypes.models import ContentType
from django.utils.translation import gettext_lazy as _
from django_filters import CharFilter, MultipleChoiceFilter
from geotrek.authent.filters import StructureRelatedFilterSet
from .models import Infrastructure, INFRASTRUCTURE_TYPES
from geotrek.maintenance.models import Intervention
from geotrek.zoning.filters import ZoningFilterSet
class InfrastructureFilterSet(ZoningFilterSet, StructureRelatedFilterSet):
name = CharFilter(label=_('Name'), lookup_expr='icontains')
description = CharFilter(label=_('Description'), lookup_expr='icontains')
implantation_year = MultipleChoiceFilter(choices=Infrastructure.objects.implantation_year_choices())
intervention_year = MultipleChoiceFilter(label=_("Intervention year"), method='filter_intervention_year',
choices=Intervention.objects.year_choices())
category = MultipleChoiceFilter(label=_("Category"), field_name='type__type',
choices=INFRASTRUCTURE_TYPES)
class Meta(StructureRelatedFilterSet.Meta):
model = Infrastructure
fields = StructureRelatedFilterSet.Meta.fields + [
'category', 'type', 'condition', 'implantation_year',
'intervention_year', 'published'
]
def filter_intervention_year(self, qs, name, value):
infrastructure_ct = ContentType.objects.get_for_model(Infrastructure)
interventions = Intervention.objects.filter(target_type=infrastructure_ct, date__year__in=value) \
.values_list('target_id', flat=True)
return qs.filter(id__in=interventions).distinct()
| bsd-2-clause |
akashlevy/Lyff | lyff_lambda/boto/awslambda/exceptions.py | 135 | 1411 | # Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.exception import BotoServerError
class InvalidRequestContentException(BotoServerError):
pass
class ResourceNotFoundException(BotoServerError):
pass
class InvalidParameterValueException(BotoServerError):
pass
class ServiceException(BotoServerError):
pass
| mit |
coderbone/SickRage | lib/httplib2/iri2uri.py | 885 | 3850 | """
iri2uri
Converts an IRI to a URI.
"""
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = []
__version__ = "1.0.0"
__license__ = "MIT"
__history__ = """
"""
import urlparse
# Convert an IRI to a URI following the rules in RFC 3987
#
# The characters we need to enocde and escape are defined in the spec:
#
# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD
# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF
# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD
# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD
# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD
# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD
# / %xD0000-DFFFD / %xE1000-EFFFD
escape_range = [
(0xA0, 0xD7FF ),
(0xE000, 0xF8FF ),
(0xF900, 0xFDCF ),
(0xFDF0, 0xFFEF),
(0x10000, 0x1FFFD ),
(0x20000, 0x2FFFD ),
(0x30000, 0x3FFFD),
(0x40000, 0x4FFFD ),
(0x50000, 0x5FFFD ),
(0x60000, 0x6FFFD),
(0x70000, 0x7FFFD ),
(0x80000, 0x8FFFD ),
(0x90000, 0x9FFFD),
(0xA0000, 0xAFFFD ),
(0xB0000, 0xBFFFD ),
(0xC0000, 0xCFFFD),
(0xD0000, 0xDFFFD ),
(0xE1000, 0xEFFFD),
(0xF0000, 0xFFFFD ),
(0x100000, 0x10FFFD)
]
def encode(c):
retval = c
i = ord(c)
for low, high in escape_range:
if i < low:
break
if i >= low and i <= high:
retval = "".join(["%%%2X" % ord(o) for o in c.encode('utf-8')])
break
return retval
def iri2uri(uri):
"""Convert an IRI to a URI. Note that IRIs must be
passed in a unicode strings. That is, do not utf-8 encode
the IRI before passing it into the function."""
if isinstance(uri ,unicode):
(scheme, authority, path, query, fragment) = urlparse.urlsplit(uri)
authority = authority.encode('idna')
# For each character in 'ucschar' or 'iprivate'
# 1. encode as utf-8
# 2. then %-encode each octet of that utf-8
uri = urlparse.urlunsplit((scheme, authority, path, query, fragment))
uri = "".join([encode(c) for c in uri])
return uri
if __name__ == "__main__":
import unittest
class Test(unittest.TestCase):
def test_uris(self):
"""Test that URIs are invariant under the transformation."""
invariant = [
u"ftp://ftp.is.co.za/rfc/rfc1808.txt",
u"http://www.ietf.org/rfc/rfc2396.txt",
u"ldap://[2001:db8::7]/c=GB?objectClass?one",
u"mailto:John.Doe@example.com",
u"news:comp.infosystems.www.servers.unix",
u"tel:+1-816-555-1212",
u"telnet://192.0.2.16:80/",
u"urn:oasis:names:specification:docbook:dtd:xml:4.1.2" ]
for uri in invariant:
self.assertEqual(uri, iri2uri(uri))
def test_iri(self):
""" Test that the right type of escaping is done for each part of the URI."""
self.assertEqual("http://xn--o3h.com/%E2%98%84", iri2uri(u"http://\N{COMET}.com/\N{COMET}"))
self.assertEqual("http://bitworking.org/?fred=%E2%98%84", iri2uri(u"http://bitworking.org/?fred=\N{COMET}"))
self.assertEqual("http://bitworking.org/#%E2%98%84", iri2uri(u"http://bitworking.org/#\N{COMET}"))
self.assertEqual("#%E2%98%84", iri2uri(u"#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")))
self.assertNotEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode('utf-8')))
unittest.main()
| gpl-3.0 |
dbbhattacharya/kitsune | vendor/packages/logilab-common/html.py | 6 | 5361 | # copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""render a tree in HTML.
"""
__docformat__ = "restructuredtext en"
def render_HTML_tree(tree, selected_node=None, render_node=None, caption=None):
"""
Generate a pure HTML representation of a tree given as an instance
of a logilab.common.tree.Node
selected_node is the currently selected node (if any) which will
have its surrounding <div> have id="selected" (which default
to a bold border libe with the default CSS).
render_node is a function that should take a Node content (Node.id)
as parameter and should return a string (what will be displayed
in the cell).
Warning: proper rendering of the generated html code depends on html_tree.css
"""
tree_depth = tree.depth_down()
if render_node is None:
render_node = str
# helper function that build a matrix from the tree, like:
# +------+-----------+-----------+
# | root | child_1_1 | child_2_1 |
# | root | child_1_1 | child_2_2 |
# | root | child_1_2 | |
# | root | child_1_3 | child_2_3 |
# | root | child_1_3 | child_2_4 |
# +------+-----------+-----------+
# from:
# root -+- child_1_1 -+- child_2_1
# | |
# | +- child_2_2
# +- child_1_2
# |
# +- child1_3 -+- child_2_3
# |
# +- child_2_2
def build_matrix(path, matrix):
if path[-1].is_leaf():
matrix.append(path[:])
else:
for child in path[-1].children:
build_matrix(path[:] + [child], matrix)
matrix = []
build_matrix([tree], matrix)
# make all lines in the matrix have the same number of columns
for line in matrix:
line.extend([None]*(tree_depth-len(line)))
for i in range(len(matrix)-1, 0, -1):
prev_line, line = matrix[i-1:i+1]
for j in range(len(line)):
if line[j] == prev_line[j]:
line[j] = None
# We build the matrix of link types (between 2 cells on a line of the matrix)
# link types are :
link_types = {(True, True, True ): 1, # T
(False, False, True ): 2, # |
(False, True, True ): 3, # + (actually, vert. bar with horiz. bar on the right)
(False, True, False): 4, # L
(True, True, False): 5, # -
}
links = []
for i, line in enumerate(matrix):
links.append([])
for j in range(tree_depth-1):
cell_11 = line[j] is not None
cell_12 = line[j+1] is not None
cell_21 = line[j+1] is not None and line[j+1].next_sibling() is not None
link_type = link_types.get((cell_11, cell_12, cell_21), 0)
if link_type == 0 and i > 0 and links[i-1][j] in (1, 2, 3):
link_type = 2
links[-1].append(link_type)
# We can now generate the HTML code for the <table>
s = u'<table class="tree">\n'
if caption:
s += '<caption>%s</caption>\n' % caption
for i, link_line in enumerate(links):
line = matrix[i]
s += '<tr>'
for j, link_cell in enumerate(link_line):
cell = line[j]
if cell:
if cell.id == selected_node:
s += '<td class="tree_cell" rowspan="2"><div class="selected tree_cell">%s</div></td>' % (render_node(cell.id))
else:
s += '<td class="tree_cell" rowspan="2"><div class="tree_cell">%s</div></td>' % (render_node(cell.id))
else:
s += '<td rowspan="2"> </td>'
s += '<td class="tree_cell_%d_1"> </td>' % link_cell
s += '<td class="tree_cell_%d_2"> </td>' % link_cell
cell = line[-1]
if cell:
if cell.id == selected_node:
s += '<td class="tree_cell" rowspan="2"><div class="selected tree_cell">%s</div></td>' % (render_node(cell.id))
else:
s += '<td class="tree_cell" rowspan="2"><div class="tree_cell">%s</div></td>' % (render_node(cell.id))
else:
s += '<td rowspan="2"> </td>'
s += '</tr>\n'
if link_line:
s += '<tr>'
for j, link_cell in enumerate(link_line):
s += '<td class="tree_cell_%d_3"> </td>' % link_cell
s += '<td class="tree_cell_%d_4"> </td>' % link_cell
s += '</tr>\n'
s += '</table>'
return s
| bsd-3-clause |
javier3407/Plugin.Video.javierhay.tv | framescrape.py | 10 | 5130 | # -*- coding: utf-8 -*-
#--------------------------------------------------------
# creado por quequeQ para PalcoTV
# (http://forum.rojadirecta.es/member.php?1370946-quequeQ)
# (http://xbmcspain.com/foro/miembro/quequino/)
# Version 0.0.3 (01.11.2014)
#--------------------------------------------------------
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
#--------------------------------------------------------
import os,sys,urlparse,urllib,urllib2,re,shutil,zipfile,cookielib
import xbmc,xbmcgui,xbmcaddon,xbmcplugin
import plugintools,ioncube,nstream
from plugintools import *
from nstream import *
from ioncube import *
home = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.palcotv/', ''))
art = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.palcotv/art', ''))
playlists = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.palcotv/playlists', ''))
tmp = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.palcotv/tmp', ''))
tools = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.palcotv/resources/tools', ''))
icon = art + 'icon.png'
fanart = 'fanart.jpg'
def frame_parserl(params):
url = params.get("url")
print "START="+params.get("url")
if params.get("title")=="[COLOR=red]Pon[COLOR=yellow]Tu[COLOR=red]Canal[/COLOR][/COLOR][/COLOR]" :
pattern1 = 'popUp\(\'([^\']+).*src="([^"]+)'
pattern2 = "http://canalesgratis.me/canales/"#http://canalesgratis.me/canales/ant3op2.php
pattern3 = ".php"
else :#PonLaTele
pattern1 = 'popUp\(\'([^\']+).*src="([^"]+)'
pattern2 = "http://verdirectotv.com/canales/"
pattern3 = ".html"
request_headers=[]
request_headers.append(["User-Agent","Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)"])
body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers)
data=body
ref = url
matches = find_multiple_matches_multi(data,pattern1)
i=0
for scrapedurl, scrapedthumbnail in matches:
thumb = scrapedthumbnail
url = urlparse.urljoin( params.get("url") , scrapedurl.strip() )
import string
title = url.replace(pattern2,"").replace(pattern3,"").replace("-"," ");title = string.capwords(title)
if i%2==0:
p1 = title[0]
p2 = "[COLOR=red]"+title[0]+"[/COLOR]"
title = title.replace(p1,p2);
else:
p1 = title[0]
p2 = "[COLOR=yellow]"+title[0]+"[/COLOR]"
title = title.replace(p1,p2);
i+=1
msg = "Resolviendo enlace ... "
uri = url+'@'+title+'@'+ref
plugintools.add_item( action="frame_parser2" , title=title , url=uri ,thumbnail=thumb ,fanart=thumb , isPlayable=True, folder=False )
def frame_parser2(params):
#regex='<iframe.*?src="([^\'"]*).*?<\/iframe>|"window\.open\(\'([^\']+)'#en futuras versiones
regex='<iframe.*?src="([^\'"]*).*?<\/iframe>'
url,title,thumbnail = params.get("url"),params.get("title"),params.get("thumbnail")
url=url.split('@');title=url[1];ref=url[2];url=url[0];
body='';bodyi=[];urli=[];bodyy='';enctrdiy=[];enctrdi=[];urlo=[url];
i=0;j=len(urlo);urli=[url];
while i < j:
ref=urli[i];
url=urlo[i];
try:
curl_frame(url,ref,body,bodyi,bodyy,urli);
except:
pass
bodyy=' '.join([str(y) for y in bodyi]);
enctrd=find_multiple_matches_multi(bodyy,regex);enctrd=list(set(enctrd))
for q in enctrd:
if q not in urlo:urlo[len(urlo):]=[q];urli[len(urli):]=[url];
j=len(urlo);i+=1;
try:
jscalpe(bodyy,url,ref)
except:
pass
print "LIST OF URL's=",urli;
#jscalpe(bodyy,url,ref)#cambiar comment con linea jscalpe anterior
def jscalpe(bodyy,url,ref):
p=('m3u8','freelivetv','freetvcast','goo\.gl','vercosasgratis','byetv','9stream','castalba','direct2watch','kbps','flashstreaming','cast247','ilive','freebroadcast','flexstream','mips','veemi','yocast','yukons','ilive','iguide','ucaster','ezcast','plusligaonline','tvonlinegratis');z=len(p);
for i in range(0, z):
regex='<script.*?('+str(p[i])+').*?<\/script>'
caster=[];
enctrd=plugintools.find_single_match(bodyy,regex);
#!!!Quita el "if" de abajo para ver todo los "enctrd" encontrados de cada "p" caster !!!
if len(enctrd)>0:
caster=''.join(map(str,enctrd));
print caster;
import re
r = re.compile('(<script.*?(?=>)>(.*?)(?=<))?.*?src=\'?"?(.*?'+caster+'[^\'",;]+)', re.VERBOSE)
res = re.findall(r,bodyy);
if caster.find('m3u8') >=0:
r = 'file=(.*?m3u8)'
res = plugintools.find_single_match(bodyy,r);
res=filter(None,res);res=str(res);script='';
nstream(url,ref,caster,res,script)
else:
res=filter(None,res);#print res
res=list(set(res));
script=''.join(map(str,res));
nstream(url,ref,caster,res,script)
def curl_frame(url,ref,body,bodyi,bodyy,urli):
request_headers=[];
request_headers.append(["User-Agent","Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)"])
request_headers.append(["Referer",ref])
try:
body,response_headers=plugintools.read_body_and_headers(url, headers=request_headers);
bodyi+=([body]);
except:
#ACTIVATE LINE (print in try) to view headers and errors!!!
pass
def find_multiple_matches_multi(text,pattern):
matches = re.findall(pattern,text, re.MULTILINE)
return matches | gpl-3.0 |
arokem/PyEMMA | pyemma/util/units.py | 2 | 5242 |
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Free University
# Berlin, 14195 Berlin, Germany.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__author__ = 'noe'
import numpy as np
class TimeUnit(object):
_UNIT_STEP = -1
_UNIT_FS = 0
_UNIT_PS = 1
_UNIT_NS = 2
_UNIT_US = 3
_UNIT_MS = 4
_UNIT_S = 5
_unit_names = ['fs','ps','ns','us','ms','s']
def __init__(self, unit = '1 step'):
"""
Initializes the time unit object
Parameters
----------
unit : str
Description of a physical time unit. By default '1 step', i.e. there is no physical time unit.
Specify by a number, whitespace and unit. Permitted units are (* is an arbitrary string):
'fs', 'femtosecond*'
'ps', 'picosecond*'
'ns', 'nanosecond*'
'us', 'microsecond*'
'ms', 'millisecond*'
's', 'second*'
"""
if isinstance(unit, TimeUnit): # copy constructor
self._factor = unit._factor
self._unit = unit._unit
else: # construct from string
lunit = unit.lower()
words = lunit.split(' ')
if len(words) == 1:
self._factor = 1.0
unitstring = words[0]
elif len(words) == 2:
self._factor = float(words[0])
unitstring = words[1]
else:
raise ValueError('Illegal input string: '+str(unit))
if unitstring == 'step':
self._unit = self._UNIT_STEP
elif unitstring == 'fs' or unitstring.startswith('femtosecond'):
self._unit = self._UNIT_FS
elif unitstring == 'ps' or unitstring.startswith('picosecond'):
self._unit = self._UNIT_PS
elif unitstring == 'ns' or unitstring.startswith('nanosecond'):
self._unit = self._UNIT_NS
elif unitstring == 'us' or unitstring.startswith('microsecond'):
self._unit = self._UNIT_US
elif unitstring == 'ms' or unitstring.startswith('millisecond'):
self._unit = self._UNIT_MS
elif unitstring == 's' or unitstring.startswith('second'):
self._unit = self._UNIT_S
else:
raise ValueError('Time unit is not understood: '+unit)
def __str__(self):
if self._unit == -1:
return str(self._factor)+' step'
else:
return str(self._factor)+' '+self._unit_names[self._unit]
@property
def dt(self):
return self._factor
@property
def unit(self):
return self._unit
def get_scaled(self, factor):
""" Get a new time unit, scaled by the given factor """
import copy
res = copy.deepcopy(self)
res._factor *= factor
return res
def rescale_around1(self, times):
"""
Suggests a rescaling factor and new physical time unit to balance the given time multiples around 1.
Parameters
----------
times : float array
array of times in multiple of the present elementary unit
"""
if self._unit == self._UNIT_STEP:
return times, 'step' # nothing to do
m = np.mean(times)
mult = 1.0
cur_unit = self._unit
# numbers are too small. Making them larger and reducing the unit:
if (m < 0.001):
while mult*m < 0.001 and cur_unit >= 0:
mult *= 1000
cur_unit -= 1
return mult*times, self._unit_names[cur_unit]
# numbers are too large. Making them smaller and increasing the unit:
if (m > 1000):
while mult*m > 1000 and cur_unit <= 5:
mult /= 1000
cur_unit += 1
return mult*times, self._unit_names[cur_unit]
# nothing to do
return times, self._unit | bsd-2-clause |
jseabold/statsmodels | statsmodels/sandbox/examples/thirdparty/findow_1.py | 5 | 2569 | # -*- coding: utf-8 -*-
"""A quick look at volatility of stock returns for 2009
Just an exercise to find my way around the pandas methods.
Shows the daily rate of return, the square of it (volatility) and
a 5 day moving average of the volatility.
No guarantee for correctness.
Assumes no missing values.
colors of lines in graphs are not great
uses DataFrame and WidePanel to hold data downloaded from yahoo using matplotlib.
I have not figured out storage, so the download happens at each run
of the script.
Created on Sat Jan 30 16:30:18 2010
Author: josef-pktd
"""
import os
from statsmodels.compat.python import lzip
import numpy as np
import matplotlib.finance as fin
import matplotlib.pyplot as plt
import datetime as dt
import pandas as pd
def getquotes(symbol, start, end):
# Taken from the no-longer-existent pandas.examples.finance
quotes = fin.quotes_historical_yahoo(symbol, start, end)
dates, open, close, high, low, volume = lzip(*quotes)
data = {
'open' : open,
'close' : close,
'high' : high,
'low' : low,
'volume' : volume
}
dates = pd.Index([dt.datetime.fromordinal(int(d)) for d in dates])
return pd.DataFrame(data, index=dates)
start_date = dt.datetime(2007, 1, 1)
end_date = dt.datetime(2009, 12, 31)
dj30 = ['MMM', 'AA', 'AXP', 'T', 'BAC', 'BA', 'CAT', 'CVX', 'CSCO',
'KO', 'DD', 'XOM', 'GE', 'HPQ', 'HD', 'INTC', 'IBM', 'JNJ',
'JPM', 'KFT', 'MCD', 'MRK', 'MSFT', 'PFE', 'PG', 'TRV',
'UTX', 'VZ', 'WMT', 'DIS']
mysym = ['msft', 'ibm', 'goog']
indexsym = ['gspc', 'dji']
# download data
dmall = {}
for sy in dj30:
dmall[sy] = getquotes(sy, start_date, end_date)
# combine into WidePanel
pawp = pd.WidePanel.fromDict(dmall)
print(pawp.values.shape)
# select closing prices
paclose = pawp.getMinorXS('close')
# take log and first difference over time
paclose_ratereturn = paclose.apply(np.log).diff()
if not os.path.exists('dj30rr'):
#if pandas is updated, then sometimes unpickling fails, and need to save again
paclose_ratereturn.save('dj30rr')
plt.figure()
paclose_ratereturn.plot()
plt.title('daily rate of return')
# square the returns
paclose_ratereturn_vol = paclose_ratereturn.apply(lambda x:np.power(x,2))
plt.figure()
plt.title('volatility (with 5 day moving average')
paclose_ratereturn_vol.plot()
# use convolution to get moving average
paclose_ratereturn_vol_mov = paclose_ratereturn_vol.apply(
lambda x:np.convolve(x,np.ones(5)/5.,'same'))
paclose_ratereturn_vol_mov.plot()
#plt.show()
| bsd-3-clause |
fhfuih/MCEdit-Unified | albow/file_opener.py | 2 | 5561 | #-# This is not an albow component.
#-# It should be moved back to MCEdit root folder, since it does not defines GUI base widgets.
import os
import logging
import panels
import pymclevel
import albow
import mcplatform
from config import config
from albow.translate import _
class FileOpener(albow.Widget):
is_gl_container = True
def __init__(self, mcedit, *args, **kwargs):
kwargs['rect'] = mcedit.rect
albow.Widget.__init__(self, *args, **kwargs)
self.anchor = 'tlbr'
self.mcedit = mcedit
self.root = self.get_root()
#-# Translation live update
self.buildWidgets()
def buildWidgets(self):
for w in self.subwidgets:
w.set_parent(None)
helpColumn = []
self.root.movementLabel = label = albow.Label(_("{0}/{1}/{2}/{3}/{4}/{5} to move").format(
_(config.keys.forward.get()),
_(config.keys.left.get()),
_(config.keys.back.get()),
_(config.keys.right.get()),
_(config.keys.up.get()),
_(config.keys.down.get()),
), doNotTranslate=True)
label.anchor = 'whrt'
label.align = 'r'
helpColumn.append(label)
def addHelp(text, dnt=False):
label = albow.Label(text, doNotTranslate=dnt)
label.anchor = 'whrt'
label.align = "r"
helpColumn.append(label)
return label
self.root.slowDownLabel = addHelp(_("{0} to slow down").format(_(config.keys.brake.get())), dnt=True)
self.camCont = addHelp("Right-click to toggle camera control")
self.toolDist = addHelp("Mousewheel to control tool distance")
self.root.detailsLabel = addHelp(_("Hold {0} for details").format(_(config.keys.showBlockInfo.get())), dnt=True)
self.helpColumn = helpColumn = albow.Column(helpColumn, align="r")
helpColumn.topright = self.topright
helpColumn.anchor = "whrt"
# helpColumn.is_gl_container = True
self.add(helpColumn)
keysColumn = [albow.Label("")]
buttonsColumn = [panels.ControlPanel.getHeader()]
shortnames = []
for world in self.mcedit.recentWorlds():
shortname = os.path.basename(world)
try:
if pymclevel.MCInfdevOldLevel.isLevel(world):
lev = pymclevel.MCInfdevOldLevel(world, readonly=True)
shortname = lev.LevelName
if lev.LevelName != lev.displayName:
shortname = u"{0} ({1})".format(lev.LevelName, lev.displayName)
except Exception as e:
logging.warning(
'Couldn\'t get name from recent world: {0!r}'.format(e))
if shortname == "level.dat":
shortname = os.path.basename(os.path.dirname(world))
if len(shortname) > 40:
shortname = shortname[:37] + "..."
shortnames.append(shortname)
hotkeys = ([(config.keys.newWorld.get(), 'Create New World', self.createNewWorld),
(config.keys.quickLoad.get(), 'Quick Load', self.mcedit.editor.askLoadWorld),
(config.keys.open.get(), 'Open...', self.promptOpenAndLoad)] + [
('F{0}'.format(i + 1), shortnames[i], self.createLoadButtonHandler(world))
for i, world in enumerate(self.mcedit.recentWorlds())])
self.root.commandRow = commandRow = albow.HotkeyColumn(hotkeys, keysColumn, buttonsColumn, translateButtons=range(3))
commandRow.anchor = 'lrh'
sideColumn1 = self.mcedit.makeSideColumn1()
sideColumn1.anchor = 'wh'
spaceLabel = albow.Label("")
spaceLabel.anchor = 'wh'
sideColumn2 = self.mcedit.makeSideColumn2()
sideColumn2.anchor = 'wh'
contentRow = albow.Row((commandRow, albow.Column((sideColumn1, spaceLabel, sideColumn2))))
contentRow.center = self.center
contentRow.anchor = "rh"
self.contentRow = contentRow
self.add(contentRow)
self.invalidate()
# self.shrink_wrap()
def set_update_ui(self, v):
albow.Widget.set_update_ui(self, v)
if v:
self.buildWidgets()
#-#
def gl_draw_self(self, root, offset):
self.mcedit.editor.drawStars()
def idleevent(self):
self.mcedit.editor.doWorkUnit(onMenu=True)
def key_down(self, evt):
keyname = self.root.getKey(evt)
if keyname == 'Alt-F4':
raise SystemExit
if keyname in ('F1', 'F2', 'F3', 'F4', 'F5'):
self.mcedit.loadRecentWorldNumber(int(keyname[1]))
if keyname == config.keys.quickLoad.get():
self.mcedit.editor.askLoadWorld()
if keyname == config.keys.newWorld.get():
self.createNewWorld()
if keyname == config.keys.open.get():
self.promptOpenAndLoad()
if keyname == config.keys.quit.get():
self.mcedit.confirm_quit()
self.root.fix_sticky_ctrl()
def promptOpenAndLoad(self):
#!# Bad! But used to test the file chooser.
# try:
filename = mcplatform.askOpenFile(schematics=True)
if filename:
self.mcedit.loadFile(filename)
# except Exception, e:
# logging.error('Error during proptOpenAndLoad: {0!r}'.format(e))
def createNewWorld(self):
self.parent.createNewWorld()
def createLoadButtonHandler(self, filename):
return lambda: self.mcedit.loadFile(filename)
| isc |
ceibal-tatu/sugar | extensions/deviceicon/frame.py | 2 | 2377 | # Copyright (C) 2012, OLPC
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import logging
from gettext import gettext as _
from gi.repository import GConf
from sugar3.graphics.tray import TrayIcon
from sugar3.graphics.toolbutton import ToolButton
from sugar3.graphics.palette import Palette
from sugar3.graphics.xocolor import XoColor
from jarabe.frame.frameinvoker import FrameWidgetInvoker
import jarabe.frame
_ICON_NAME = 'module-keyboard'
_HAS_MALIIT = False
try:
from gi.repository import Maliit
except ImportError:
logging.debug('Frame: can not create OSK icon: Maliit is not installed.')
_HAS_MALIIT = False
else:
_HAS_MALIIT = True
class DeviceView(TrayIcon):
FRAME_POSITION_RELATIVE = 103
def __init__(self):
client = GConf.Client.get_default()
self._color = XoColor(client.get_string('/desktop/sugar/user/color'))
TrayIcon.__init__(self, icon_name=_ICON_NAME, xo_color=self._color)
self._input_method = Maliit.InputMethod()
self.connect('button-release-event', self.__button_release_event_cb)
self.set_palette_invoker(FrameWidgetInvoker(self))
def create_palette(self):
palette = Palette(_('Show my keyboard'))
palette.set_group_id('frame')
return palette
def __button_release_event_cb(self, widget, event):
self.props.palette.popdown(immediate=True)
self._input_method.show()
frame = jarabe.frame.get_view()
frame.hide()
def setup(tray):
return
# Disable the option for now, as manual invocation
# of the OSK has many unresolved corner cases, see
# http://dev.laptop.org/ticket/12281
if _HAS_MALIIT:
tray.add_device(DeviceView())
| gpl-2.0 |
cpennington/edx-platform | openedx/core/djangoapps/content/course_overviews/migrations/0015_historicalcourseoverview.py | 4 | 4587 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-05-30 21:13
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
import opaque_keys.edx.django.models
import simple_history.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('course_overviews', '0014_courseoverview_certificate_available_date'),
]
operations = [
migrations.CreateModel(
name='HistoricalCourseOverview',
fields=[
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('version', models.IntegerField()),
('id', opaque_keys.edx.django.models.CourseKeyField(db_index=True, max_length=255)),
('_location', opaque_keys.edx.django.models.UsageKeyField(max_length=255)),
('org', models.TextField(default=u'outdated_entry', max_length=255)),
('display_name', models.TextField(null=True)),
('display_number_with_default', models.TextField()),
('display_org_with_default', models.TextField()),
('start', models.DateTimeField(null=True)),
('end', models.DateTimeField(null=True)),
('advertised_start', models.TextField(null=True)),
('announcement', models.DateTimeField(null=True)),
('course_image_url', models.TextField()),
('social_sharing_url', models.TextField(null=True)),
('end_of_course_survey_url', models.TextField(null=True)),
('certificates_display_behavior', models.TextField(null=True)),
('certificates_show_before_end', models.BooleanField(default=False)),
('cert_html_view_enabled', models.BooleanField(default=False)),
('has_any_active_web_certificate', models.BooleanField(default=False)),
('cert_name_short', models.TextField()),
('cert_name_long', models.TextField()),
('certificate_available_date', models.DateTimeField(default=None, null=True)),
('lowest_passing_grade', models.DecimalField(decimal_places=2, max_digits=5, null=True)),
('days_early_for_beta', models.FloatField(null=True)),
('mobile_available', models.BooleanField(default=False)),
('visible_to_staff_only', models.BooleanField(default=False)),
('_pre_requisite_courses_json', models.TextField()),
('enrollment_start', models.DateTimeField(null=True)),
('enrollment_end', models.DateTimeField(null=True)),
('enrollment_domain', models.TextField(null=True)),
('invitation_only', models.BooleanField(default=False)),
('max_student_enrollments_allowed', models.IntegerField(null=True)),
('catalog_visibility', models.TextField(null=True)),
('short_description', models.TextField(null=True)),
('course_video_url', models.TextField(null=True)),
('effort', models.TextField(null=True)),
('self_paced', models.BooleanField(default=False)),
('marketing_url', models.TextField(null=True)),
('eligible_for_financial_aid', models.BooleanField(default=True)),
('language', models.TextField(null=True)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
'verbose_name': 'historical course overview',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
]
| agpl-3.0 |
palladius/gcloud | packages/gsutil/boto/tests/integration/sts/test_session_token.py | 4 | 2425 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Tests for Session Tokens
"""
import unittest
import time
import os
from boto.sts.connection import STSConnection
from boto.sts.credentials import Credentials
from boto.s3.connection import S3Connection
class SessionTokenTest (unittest.TestCase):
sts = True
def test_session_token(self):
print '--- running Session Token tests ---'
c = STSConnection()
# Create a session token
token = c.get_session_token()
# Save session token to a file
token.save('token.json')
# Now load up a copy of that token
token_copy = Credentials.load('token.json')
assert token_copy.access_key == token.access_key
assert token_copy.secret_key == token.secret_key
assert token_copy.session_token == token.session_token
assert token_copy.expiration == token.expiration
assert token_copy.request_id == token.request_id
os.unlink('token.json')
assert not token.is_expired()
# Try using the session token with S3
s3 = S3Connection(aws_access_key_id=token.access_key,
aws_secret_access_key=token.secret_key,
security_token=token.session_token)
buckets = s3.get_all_buckets()
print '--- tests completed ---'
| gpl-3.0 |
Zhongqilong/mykbengineer | kbe/src/lib/python/Lib/distutils/command/install_egg_info.py | 103 | 2594 | """distutils.command.install_egg_info
Implements the Distutils 'install_egg_info' command, for installing
a package's PKG-INFO metadata."""
from distutils.cmd import Command
from distutils import log, dir_util
import os, sys, re
class install_egg_info(Command):
"""Install an .egg-info file for the package"""
description = "Install package's PKG-INFO metadata as an .egg-info file"
user_options = [
('install-dir=', 'd', "directory to install to"),
]
def initialize_options(self):
self.install_dir = None
def finalize_options(self):
self.set_undefined_options('install_lib',('install_dir','install_dir'))
basename = "%s-%s-py%s.egg-info" % (
to_filename(safe_name(self.distribution.get_name())),
to_filename(safe_version(self.distribution.get_version())),
sys.version[:3]
)
self.target = os.path.join(self.install_dir, basename)
self.outputs = [self.target]
def run(self):
target = self.target
if os.path.isdir(target) and not os.path.islink(target):
dir_util.remove_tree(target, dry_run=self.dry_run)
elif os.path.exists(target):
self.execute(os.unlink,(self.target,),"Removing "+target)
elif not os.path.isdir(self.install_dir):
self.execute(os.makedirs, (self.install_dir,),
"Creating "+self.install_dir)
log.info("Writing %s", target)
if not self.dry_run:
with open(target, 'w', encoding='UTF-8') as f:
self.distribution.metadata.write_pkg_file(f)
def get_outputs(self):
return self.outputs
# The following routines are taken from setuptools' pkg_resources module and
# can be replaced by importing them from pkg_resources once it is included
# in the stdlib.
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""Convert an arbitrary string to a standard version string
Spaces become dots, and all other non-alphanumeric characters become
dashes, with runs of multiple dashes condensed to a single dash.
"""
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
| lgpl-3.0 |
Hani-K/H-Vitamin2_trelte | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
Sinsoftomorrow/android_kernel_lge_g3 | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
halvertoluke/edx-platform | openedx/core/djangoapps/profile_images/tests/test_images.py | 111 | 7026 | """
Test cases for image processing functions in the profile image package.
"""
from contextlib import closing
from itertools import product
import os
from tempfile import NamedTemporaryFile
import unittest
from django.conf import settings
from django.core.files.uploadedfile import UploadedFile
from django.test import TestCase
from django.test.utils import override_settings
import ddt
import mock
from PIL import Image
from ..images import (
FILE_UPLOAD_TOO_LARGE,
FILE_UPLOAD_TOO_SMALL,
FILE_UPLOAD_BAD_TYPE,
FILE_UPLOAD_BAD_EXT,
FILE_UPLOAD_BAD_MIMETYPE,
create_profile_images,
ImageValidationError,
remove_profile_images,
validate_uploaded_image,
)
from .helpers import make_image_file, make_uploaded_file
@ddt.ddt
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Profile Image API is only supported in LMS')
class TestValidateUploadedImage(TestCase):
"""
Test validate_uploaded_image
"""
def check_validation_result(self, uploaded_file, expected_failure_message):
"""
Internal DRY helper.
"""
if expected_failure_message is not None:
with self.assertRaises(ImageValidationError) as ctx:
validate_uploaded_image(uploaded_file)
self.assertEqual(ctx.exception.message, expected_failure_message)
else:
validate_uploaded_image(uploaded_file)
self.assertEqual(uploaded_file.tell(), 0)
@ddt.data(
(99, FILE_UPLOAD_TOO_SMALL),
(100, ),
(1024, ),
(1025, FILE_UPLOAD_TOO_LARGE),
)
@ddt.unpack
@override_settings(PROFILE_IMAGE_MIN_BYTES=100, PROFILE_IMAGE_MAX_BYTES=1024)
def test_file_size(self, upload_size, expected_failure_message=None):
"""
Ensure that files outside the accepted size range fail validation.
"""
with make_uploaded_file(
dimensions=(1, 1), extension=".png", content_type="image/png", force_size=upload_size
) as uploaded_file:
self.check_validation_result(uploaded_file, expected_failure_message)
@ddt.data(
(".gif", "image/gif"),
(".jpg", "image/jpeg"),
(".jpeg", "image/jpeg"),
(".png", "image/png"),
(".bmp", "image/bmp", FILE_UPLOAD_BAD_TYPE),
(".tif", "image/tiff", FILE_UPLOAD_BAD_TYPE),
)
@ddt.unpack
def test_extension(self, extension, content_type, expected_failure_message=None):
"""
Ensure that files whose extension is not supported fail validation.
"""
with make_uploaded_file(extension=extension, content_type=content_type) as uploaded_file:
self.check_validation_result(uploaded_file, expected_failure_message)
def test_extension_mismatch(self):
"""
Ensure that validation fails when the file extension does not match the
file data.
"""
# make a bmp, try to fool the function into thinking it's a jpeg
with make_image_file(extension=".bmp") as bmp_file:
with closing(NamedTemporaryFile(suffix=".jpeg")) as fake_jpeg_file:
fake_jpeg_file.write(bmp_file.read())
fake_jpeg_file.seek(0)
uploaded_file = UploadedFile(
fake_jpeg_file,
content_type="image/jpeg",
size=os.path.getsize(fake_jpeg_file.name)
)
with self.assertRaises(ImageValidationError) as ctx:
validate_uploaded_image(uploaded_file)
self.assertEqual(ctx.exception.message, FILE_UPLOAD_BAD_EXT)
def test_content_type(self):
"""
Ensure that validation fails when the content_type header and file
extension do not match
"""
with make_uploaded_file(extension=".jpeg", content_type="image/gif") as uploaded_file:
with self.assertRaises(ImageValidationError) as ctx:
validate_uploaded_image(uploaded_file)
self.assertEqual(ctx.exception.message, FILE_UPLOAD_BAD_MIMETYPE)
@ddt.ddt
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Profile Image API is only supported in LMS')
class TestGenerateProfileImages(TestCase):
"""
Test create_profile_images
"""
@ddt.data(
*product(
["gif", "jpg", "png"],
[(1, 1), (10, 10), (100, 100), (1000, 1000), (1, 10), (10, 100), (100, 1000), (1000, 999)],
)
)
@ddt.unpack
def test_generation(self, image_type, dimensions):
"""
Ensure that regardless of the input format or dimensions, the outcome
of calling the function is square jpeg files with explicitly-requested
dimensions being saved to the profile image storage backend.
"""
extension = "." + image_type
content_type = "image/" + image_type
requested_sizes = {
10: "ten.jpg",
100: "hundred.jpg",
1000: "thousand.jpg",
}
mock_storage = mock.Mock()
with make_uploaded_file(dimensions=dimensions, extension=extension, content_type=content_type) as uploaded_file:
with mock.patch(
"openedx.core.djangoapps.profile_images.images.get_profile_image_storage",
return_value=mock_storage,
):
create_profile_images(uploaded_file, requested_sizes)
names_and_files = [v[0] for v in mock_storage.save.call_args_list]
actual_sizes = {}
for name, file_ in names_and_files:
# get the size of the image file and ensure it's square jpeg
with closing(Image.open(file_)) as image_obj:
width, height = image_obj.size
self.assertEqual(width, height)
self.assertEqual(image_obj.format, 'JPEG')
actual_sizes[width] = name
self.assertEqual(requested_sizes, actual_sizes)
mock_storage.save.reset_mock()
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Profile Image API is only supported in LMS')
class TestRemoveProfileImages(TestCase):
"""
Test remove_profile_images
"""
def test_remove(self):
"""
Ensure that the outcome of calling the function is that the named images
are deleted from the profile image storage backend.
"""
requested_sizes = {
10: "ten.jpg",
100: "hundred.jpg",
1000: "thousand.jpg",
}
mock_storage = mock.Mock()
with mock.patch(
"openedx.core.djangoapps.profile_images.images.get_profile_image_storage",
return_value=mock_storage,
):
remove_profile_images(requested_sizes)
deleted_names = [v[0][0] for v in mock_storage.delete.call_args_list]
self.assertEqual(requested_sizes.values(), deleted_names)
mock_storage.save.reset_mock()
| agpl-3.0 |
paddyvishnubhatt/cryptocurrency | lib/flask/testing.py | 121 | 5630 | # -*- coding: utf-8 -*-
"""
flask.testing
~~~~~~~~~~~~~
Implements test support helpers. This module is lazily imported
and usually not used in production environments.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import werkzeug
from contextlib import contextmanager
from werkzeug.test import Client, EnvironBuilder
from flask import _request_ctx_stack
try:
from werkzeug.urls import url_parse
except ImportError:
from urlparse import urlsplit as url_parse
def make_test_environ_builder(app, path='/', base_url=None, *args, **kwargs):
"""Creates a new test builder with some application defaults thrown in."""
http_host = app.config.get('SERVER_NAME')
app_root = app.config.get('APPLICATION_ROOT')
if base_url is None:
url = url_parse(path)
base_url = 'http://%s/' % (url.netloc or http_host or 'localhost')
if app_root:
base_url += app_root.lstrip('/')
if url.netloc:
path = url.path
if url.query:
path += '?' + url.query
return EnvironBuilder(path, base_url, *args, **kwargs)
class FlaskClient(Client):
"""Works like a regular Werkzeug test client but has some knowledge about
how Flask works to defer the cleanup of the request context stack to the
end of a ``with`` body when used in a ``with`` statement. For general
information about how to use this class refer to
:class:`werkzeug.test.Client`.
.. versionchanged:: 0.12
`app.test_client()` includes preset default environment, which can be
set after instantiation of the `app.test_client()` object in
`client.environ_base`.
Basic usage is outlined in the :ref:`testing` chapter.
"""
preserve_context = False
def __init__(self, *args, **kwargs):
super(FlaskClient, self).__init__(*args, **kwargs)
self.environ_base = {
"REMOTE_ADDR": "127.0.0.1",
"HTTP_USER_AGENT": "werkzeug/" + werkzeug.__version__
}
@contextmanager
def session_transaction(self, *args, **kwargs):
"""When used in combination with a ``with`` statement this opens a
session transaction. This can be used to modify the session that
the test client uses. Once the ``with`` block is left the session is
stored back.
::
with client.session_transaction() as session:
session['value'] = 42
Internally this is implemented by going through a temporary test
request context and since session handling could depend on
request variables this function accepts the same arguments as
:meth:`~flask.Flask.test_request_context` which are directly
passed through.
"""
if self.cookie_jar is None:
raise RuntimeError('Session transactions only make sense '
'with cookies enabled.')
app = self.application
environ_overrides = kwargs.setdefault('environ_overrides', {})
self.cookie_jar.inject_wsgi(environ_overrides)
outer_reqctx = _request_ctx_stack.top
with app.test_request_context(*args, **kwargs) as c:
sess = app.open_session(c.request)
if sess is None:
raise RuntimeError('Session backend did not open a session. '
'Check the configuration')
# Since we have to open a new request context for the session
# handling we want to make sure that we hide out own context
# from the caller. By pushing the original request context
# (or None) on top of this and popping it we get exactly that
# behavior. It's important to not use the push and pop
# methods of the actual request context object since that would
# mean that cleanup handlers are called
_request_ctx_stack.push(outer_reqctx)
try:
yield sess
finally:
_request_ctx_stack.pop()
resp = app.response_class()
if not app.session_interface.is_null_session(sess):
app.save_session(sess, resp)
headers = resp.get_wsgi_headers(c.request.environ)
self.cookie_jar.extract_wsgi(c.request.environ, headers)
def open(self, *args, **kwargs):
kwargs.setdefault('environ_overrides', {}) \
['flask._preserve_context'] = self.preserve_context
kwargs.setdefault('environ_base', self.environ_base)
as_tuple = kwargs.pop('as_tuple', False)
buffered = kwargs.pop('buffered', False)
follow_redirects = kwargs.pop('follow_redirects', False)
builder = make_test_environ_builder(self.application, *args, **kwargs)
return Client.open(self, builder,
as_tuple=as_tuple,
buffered=buffered,
follow_redirects=follow_redirects)
def __enter__(self):
if self.preserve_context:
raise RuntimeError('Cannot nest client invocations')
self.preserve_context = True
return self
def __exit__(self, exc_type, exc_value, tb):
self.preserve_context = False
# on exit we want to clean up earlier. Normally the request context
# stays preserved until the next request in the same thread comes
# in. See RequestGlobals.push() for the general behavior.
top = _request_ctx_stack.top
if top is not None and top.preserved:
top.pop()
| apache-2.0 |
rooi/CouchPotatoServer | couchpotato/core/helpers/encoding.py | 4 | 3217 | from couchpotato.core.logger import CPLog
from string import ascii_letters, digits
from urllib import quote_plus
import os
import re
import traceback
import unicodedata
log = CPLog(__name__)
def toSafeString(original):
valid_chars = "-_.() %s%s" % (ascii_letters, digits)
cleanedFilename = unicodedata.normalize('NFKD', toUnicode(original)).encode('ASCII', 'ignore')
valid_string = ''.join(c for c in cleanedFilename if c in valid_chars)
return ' '.join(valid_string.split())
def simplifyString(original):
string = stripAccents(original.lower())
string = toSafeString(' '.join(re.split('\W+', string)))
split = re.split('\W+|_', string.lower())
return toUnicode(' '.join(split))
def toUnicode(original, *args):
try:
if isinstance(original, unicode):
return original
else:
try:
return unicode(original, *args)
except:
try:
return ek(original, *args)
except:
raise
except:
log.error('Unable to decode value "%s..." : %s ', (repr(original)[:20], traceback.format_exc()))
ascii_text = str(original).encode('string_escape')
return toUnicode(ascii_text)
def ss(original, *args):
u_original = toUnicode(original, *args)
try:
from couchpotato.environment import Env
return u_original.encode(Env.get('encoding'))
except Exception, e:
log.debug('Failed ss encoding char, force UTF8: %s', e)
return u_original.encode('UTF-8')
def sp(path, *args):
# Standardise encoding, normalise case, path and strip trailing '/' or '\'
if not path or len(path) == 0:
return path
# convert windows path (from remote box) to *nix path
if os.path.sep == '/' and '\\' in path:
path = '/' + path.replace(':', '').replace('\\', '/')
path = os.path.normpath(ss(path, *args))
# Remove any trailing path separators
if path != os.path.sep:
path = path.rstrip(os.path.sep)
# Add a trailing separator in case it is a root folder on windows (crashes guessit)
if len(path) == 2 and path[1] == ':':
path = path + os.path.sep
# Replace *NIX ambiguous '//' at the beginning of a path with '/' (crashes guessit)
path = re.sub('^//', '/', path)
return path
def ek(original, *args):
if isinstance(original, (str, unicode)):
try:
from couchpotato.environment import Env
return original.decode(Env.get('encoding'))
except UnicodeDecodeError:
raise
return original
def isInt(value):
try:
int(value)
return True
except ValueError:
return False
def stripAccents(s):
return ''.join((c for c in unicodedata.normalize('NFD', toUnicode(s)) if unicodedata.category(c) != 'Mn'))
def tryUrlencode(s):
new = u''
if isinstance(s, dict):
for key, value in s.iteritems():
new += u'&%s=%s' % (key, tryUrlencode(value))
return new[1:]
else:
for letter in ss(s):
try:
new += quote_plus(letter)
except:
new += letter
return new
| gpl-3.0 |
Acidburn0zzz/libyal | libyal-build.py | 1 | 66489 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Script to automate creating builds of libyal libraries."""
from __future__ import print_function
import abc
import argparse
import fileinput
import glob
import io
import json
import logging
import os
import platform
import re
import shutil
import subprocess
import sys
import tarfile
import urllib2
try:
import ConfigParser as configparser
except ImportError:
import configparser # pylint: disable=import-error
# pylint: disable=logging-format-interpolation
# Since os.path.abspath() uses the current working directory (cwd)
# os.path.abspath(__file__) will point to a different location if
# cwd has been changed. Hence we preserve the absolute location of __file__.
__file__ = os.path.abspath(__file__)
LIBYAL_LIBRARIES_CORE = frozenset([
'libbfio',
'libcdata',
'libcdatetime',
'libcdirectory',
'libcerror',
'libclocale',
'libcnotify',
'libcfile',
'libcpath',
'libcsplit',
'libcstring',
'libcsystem',
'libcthreads',
])
LIBYAL_LIBRARIES_DATA_TYPES = frozenset([
'libcaes',
'libfcache',
'libfdata',
'libfdatetime',
'libfguid',
'libfmapi',
'libfole',
'libfusn',
'libfvalue',
'libfwevt',
'libfwnt',
'libfwsi',
'libfwps',
'libhmac',
'libuna',
])
class ConfigError(Exception):
"""Class that defines a configuration error."""
class BuildConfiguration(object):
"""Class that contains the build configuration."""
def __init__(self):
"""Initializes the project configuation."""
super(BuildConfiguration, self).__init__()
self.library_names = []
def _GetConfigValue(self, config_parser, section_name, value_name):
"""Retrieves a value from the config parser.
Args:
config_parser: the configuration parser (instance of ConfigParser).
section_name: the name of the section that contains the value.
value_name: the name of the value.
Returns:
An object containing the value.
"""
return json.loads(config_parser.get(section_name, value_name))
def ReadFromFile(self, filename):
"""Reads the configuration from file.
Args:
filename: the configuration filename.
"""
# TODO: replace by:
# config_parser = configparser. ConfigParser(interpolation=None)
config_parser = configparser.RawConfigParser()
config_parser.read([filename])
self.library_names = self._GetConfigValue(
config_parser, 'libraries', 'names')
class DownloadHelper(object):
"""Class that helps in downloading."""
def __init__(self):
"""Initializes the download helper."""
super(DownloadHelper, self).__init__()
self._cached_url = ''
self._cached_page_content = ''
def Download(self, project_name, project_version):
"""Downloads the project for a given project name and version.
Args:
project_name: the name of the project.
project_version: the version of the project.
Returns:
The filename if successful also if the file was already downloaded
or None on error.
"""
download_url = self.GetDownloadUrl(project_name, project_version)
if not download_url:
logging.warning(u'Unable to determine download URL for: {0:s}'.format(
project_name))
return
return self.DownloadFile(download_url)
def DownloadFile(self, download_url):
"""Downloads a file from the URL.
The filename is extracted from the last part of the URL.
Args:
download_url: the URL where to download the file.
Returns:
The filename if successful also if the file was already downloaded
or None on error.
"""
_, _, filename = download_url.rpartition(u'/')
if not os.path.exists(filename):
logging.info(u'Downloading: {0:s}'.format(download_url))
url_object = urllib2.urlopen(download_url)
if url_object.code != 200:
return
file_object = open(filename, 'wb')
file_object.write(url_object.read())
file_object.close()
return filename
def DownloadPageContent(self, download_url):
"""Downloads the page content from the URL and caches it.
Args:
download_url: the URL where to download the page content.
Returns:
The page content if successful, None otherwise.
"""
if not download_url:
return
if self._cached_url != download_url:
url_object = urllib2.urlopen(download_url)
if url_object.code != 200:
return
self._cached_page_content = url_object.read()
self._cached_url = download_url
return self._cached_page_content
@abc.abstractmethod
def GetDownloadUrl(self, project_name, project_version):
"""Retrieves the download URL for a given project name and version.
Args:
project_name: the name of the project.
project_version: the version of the project.
Returns:
The download URL of the project or None on error.
"""
class GithubReleasesDownloadHelper(DownloadHelper):
"""Class that helps in downloading a project with GitHub releases."""
def __init__(self, organization):
"""Initializes the download helper.
Args:
organization: the github organization or user name.
"""
super(GithubReleasesDownloadHelper, self).__init__()
self.organization = organization
def GetLatestVersion(self, project_name):
"""Retrieves the latest version number for a given project name.
Args:
project_name: the name of the project.
Returns:
The latest version number or 0 on error.
"""
download_url = u'https://github.com/{0:s}/{1:s}/releases'.format(
self.organization, project_name)
page_content = self.DownloadPageContent(download_url)
if not page_content:
return 0
# The format of the project download URL is:
# /{organization}/{project name}/releases/download/{git tag}/
# {project name}{status-}{version}.tar.gz
# Note that the status is optional and will be: beta, alpha or experimental.
expression_string = (
u'/{0:s}/{1:s}/releases/download/[^/]*/{1:s}-[a-z-]*([0-9]+)'
u'[.]tar[.]gz').format(self.organization, project_name)
matches = re.findall(expression_string, page_content)
if not matches:
return 0
return int(max(matches))
def GetDownloadUrl(self, project_name, project_version):
"""Retrieves the download URL for a given project name and version.
Args:
project_name: the name of the project.
project_version: the version of the project.
Returns:
The download URL of the project or None on error.
"""
download_url = u'https://github.com/{0:s}/{1:s}/releases'.format(
self.organization, project_name)
page_content = self.DownloadPageContent(download_url)
if not page_content:
return
# The format of the project download URL is:
# /{organization}/{project name}/releases/download/{git tag}/
# {project name}{status-}{version}.tar.gz
# Note that the status is optional and will be: beta, alpha or experimental.
expression_string = (
u'/{0:s}/{1:s}/releases/download/[^/]*/{1:s}-[a-z-]*{2!s}'
u'[.]tar[.]gz').format(self.organization, project_name, project_version)
matches = re.findall(expression_string, page_content)
if len(matches) != 1:
# Try finding a match without the status in case the project provides
# multiple versions with a different status.
expression_string = (
u'/{0:s}/{1:s}/releases/download/[^/]*/{1:s}-*{2!s}'
u'[.]tar[.]gz').format(
self.organization, project_name, project_version)
matches = re.findall(expression_string, page_content)
if not matches or len(matches) != 1:
return
return u'https://github.com{0:s}'.format(matches[0])
class GoogleDriveDownloadHelper(DownloadHelper):
"""Class that helps in downloading a Google Drive hosted project."""
@abc.abstractmethod
def GetGoogleDriveDownloadsUrl(self, project_name):
"""Retrieves the Google Drive Download URL.
Args:
project_name: the name of the project.
Returns:
The downloads URL or None on error.
"""
def GetLatestVersion(self, project_name):
"""Retrieves the latest version number for a given project name.
Args:
project_name: the name of the project.
Returns:
The latest version number or 0 on error.
"""
download_url = self.GetGoogleDriveDownloadsUrl(project_name)
page_content = self.DownloadPageContent(download_url)
if not page_content:
return 0
# The format of the project download URL is:
# /host/{random string}/{project name}-{status-}{version}.tar.gz
# Note that the status is optional and will be: beta, alpha or experimental.
expression_string = u'/host/[^/]*/{0:s}-[a-z-]*([0-9]+)[.]tar[.]gz'.format(
project_name)
matches = re.findall(expression_string, page_content)
if not matches:
return 0
return int(max(matches))
def GetDownloadUrl(self, project_name, project_version):
"""Retrieves the download URL for a given project name and version.
Args:
project_name: the name of the project.
project_version: the version of the project.
Returns:
The download URL of the project or None on error.
"""
download_url = self.GetGoogleDriveDownloadsUrl(project_name)
page_content = self.DownloadPageContent(download_url)
if not page_content:
return
# The format of the project download URL is:
# /host/{random string}/{project name}-{status-}{version}.tar.gz
# Note that the status is optional and will be: beta, alpha or experimental.
expression_string = u'/host/[^/]*/{0:s}-[a-z-]*{1!s}[.]tar[.]gz'.format(
project_name, project_version)
matches = re.findall(expression_string, page_content)
if len(matches) != 1:
# Try finding a match without the status in case the project provides
# multiple versions with a different status.
expression_string = u'/host/[^/]*/{0:s}-{1!s}[.]tar[.]gz'.format(
project_name, project_version)
matches = re.findall(expression_string, page_content)
if not matches or len(matches) != 1:
return
return u'https://googledrive.com{0:s}'.format(matches[0])
# TODO: Merge with LibyalGithubReleasesDownloadHelper when Google Drive
# support is no longer needed.
class LibyalGitHubDownloadHelper(DownloadHelper):
"""Class that helps in downloading a libyal GitHub project."""
def __init__(self):
"""Initializes the download helper."""
super(LibyalGitHubDownloadHelper, self).__init__()
self._download_helper = None
def GetWikiConfigurationSourcePackageUrl(self, project_name):
"""Retrieves the source package URL from the libyal wiki configuration.
Args:
project_name: the name of the project.
Returns:
The source package URL or None on error.
"""
download_url = (
u'https://raw.githubusercontent.com/libyal/{0:s}/master/'
u'{0:s}-wiki.ini').format(project_name)
page_content = self.DownloadPageContent(download_url)
if not page_content:
return
# TODO: replace by:
# config_parser = configparser. ConfigParser(interpolation=None)
config_parser = configparser.RawConfigParser()
config_parser.readfp(io.BytesIO(page_content))
return json.loads(config_parser.get('source_package', 'url'))
def GetLatestVersion(self, project_name):
"""Retrieves the latest version number for a given project name.
Args:
project_name: the name of the project.
Returns:
The latest version number or 0 on error.
"""
if not self._download_helper:
download_url = self.GetWikiConfigurationSourcePackageUrl(project_name)
if download_url.startswith('https://github.com'):
self._download_helper = LibyalGithubReleasesDownloadHelper()
elif download_url.startswith('https://googledrive.com'):
self._download_helper = LibyalGoogleDriveDownloadHelper(download_url)
return self._download_helper.GetLatestVersion(project_name)
def GetDownloadUrl(self, project_name, project_version):
"""Retrieves the download URL for a given project name and version.
Args:
project_name: the name of the project.
project_version: the version of the project.
Returns:
The download URL of the project or None on error.
"""
if not self._download_helper:
download_url = self.GetWikiConfigurationSourcePackageUrl(project_name)
if download_url.startswith('https://github.com'):
self._download_helper = LibyalGithubReleasesDownloadHelper()
elif download_url.startswith('https://googledrive.com'):
self._download_helper = LibyalGoogleDriveDownloadHelper(download_url)
return self._download_helper.GetDownloadUrl(project_name, project_version)
class LibyalGoogleDriveDownloadHelper(GoogleDriveDownloadHelper):
"""Class that helps in downloading a libyal project with Google Drive."""
def __init__(self, google_drive_url):
"""Initializes the download helper.
Args:
google_drive_url: the project Google Drive URL.
"""
super(LibyalGoogleDriveDownloadHelper, self).__init__()
self._google_drive_url = google_drive_url
def GetGoogleDriveDownloadsUrl(self, project_name):
"""Retrieves the Download URL from the GitHub project page.
Args:
project_name: the name of the project.
Returns:
The downloads URL or None on error.
"""
return self._google_drive_url
class LibyalGithubReleasesDownloadHelper(GithubReleasesDownloadHelper):
"""Class that helps in downloading a libyal project with GitHub releases."""
def __init__(self):
"""Initializes the download helper."""
super(LibyalGithubReleasesDownloadHelper, self).__init__('libyal')
class SourceHelper(object):
"""Base class that helps in managing the source code."""
def __init__(self, project_name):
"""Initializes the source helper.
Args:
project_name: the name of the project.
"""
super(SourceHelper, self).__init__()
self.project_name = project_name
@abc.abstractmethod
def Create(self):
"""Creates the source directory.
Returns:
The name of the source directory if successful or None on error.
"""
class LibyalGitRepositoryHelper(SourceHelper):
"""Class that manages the source code from a git repository."""
def Clean(self):
"""Removes a previous version of the source directory."""
if os.path.exists(self.project_name):
logging.info(u'Removing: {0:s}'.format(self.project_name))
shutil.rmtree(self.project_name)
def Create(self):
"""Creates the source directory from the git repository.
Returns:
The name of the source directory if successful or None on error.
"""
if not self.project_name:
return
source_directory = self.project_name
command = u'git clone https://github.com/libyal/{0:s}.git'.format(
self.project_name)
exit_code = subprocess.call(
u'{0:s}'.format(command), shell=True)
if exit_code != 0:
logging.error(u'Running: "{0:s}" failed.'.format(command))
return
command = u'./synclibs.sh'
exit_code = subprocess.call(
u'(cd {0:s} && {1:s})'.format(source_directory, command), shell=True)
if exit_code != 0:
logging.error(u'Running: "{0:s}" failed.'.format(command))
return
command = u'./autogen.sh'
exit_code = subprocess.call(
u'(cd {0:s} && {1:s})'.format(source_directory, command), shell=True)
if exit_code != 0:
logging.error(u'Running: "{0:s}" failed.'.format(command))
return
command = u'./configure'
exit_code = subprocess.call(
u'(cd {0:s} && {1:s})'.format(source_directory, command), shell=True)
if exit_code != 0:
logging.error(u'Running: "{0:s}" failed.'.format(command))
return
return source_directory
class SourcePackageHelper(SourceHelper):
"""Class that manages the source code from a source package."""
ENCODING = 'utf-8'
def __init__(self, project_name):
"""Initializes the source package helper.
Args:
project_name: the name of the project.
"""
super(SourcePackageHelper, self).__init__(project_name)
self._download_helper = LibyalGitHubDownloadHelper()
self._project_version = None
self._source_filename = None
@property
def project_version(self):
"""The project version."""
if not self._project_version:
self._project_version = self._download_helper.GetLatestVersion(
self.project_name)
return self._project_version
def Clean(self):
"""Removes previous versions of source packages and directories."""
if not self.project_version:
return
filenames_to_ignore = re.compile(
u'^{0:s}-.*{1!s}'.format(self.project_name, self.project_version))
# Remove previous versions of source packages in the format:
# project-*.tar.gz
filenames = glob.glob(u'{0:s}-*.tar.gz'.format(self.project_name))
for filename in filenames:
if not filenames_to_ignore.match(filename):
logging.info(u'Removing: {0:s}'.format(filename))
os.remove(filename)
# Remove previous versions of source directories in the format:
# project-{version}
filenames = glob.glob(u'{0:s}-*'.format(self.project_name))
for filename in filenames:
if os.path.isdir(filename) and not filenames_to_ignore.match(filename):
logging.info(u'Removing: {0:s}'.format(filename))
shutil.rmtree(filename)
def Create(self):
"""Creates the source directory from the source package.
Returns:
The name of the source directory if successful or None on error.
"""
if not self._source_filename:
_ = self.Download()
if not self._source_filename or not os.path.exists(self._source_filename):
return
archive = tarfile.open(self._source_filename, 'r:gz', encoding='utf-8')
directory_name = ''
for tar_info in archive.getmembers():
filename = getattr(tar_info, 'name', None)
try:
filename = filename.decode(self.ENCODING)
except UnicodeDecodeError:
logging.warning(
u'Unable to decode filename in tar file: {0:s}'.format(
self._source_filename))
continue
if filename is None:
logging.warning(u'Missing filename in tar file: {0:s}'.format(
self._source_filename))
continue
if not directory_name:
# Note that this will set directory name to an empty string
# if filename start with a /.
directory_name, _, _ = filename.partition(u'/')
if not directory_name or directory_name.startswith(u'..'):
logging.error(
u'Unsuppored directory name in tar file: {0:s}'.format(
self._source_filename))
return
if os.path.exists(directory_name):
break
logging.info(u'Extracting: {0:s}'.format(self._source_filename))
elif not filename.startswith(directory_name):
logging.warning(
u'Skipping: {0:s} in tar file: {1:s}'.format(
filename, self._source_filename))
continue
archive.extract(tar_info)
archive.close()
return directory_name
def Download(self):
"""Downloads the source package.
Returns:
The filename of the source package if successful also if the file was
already downloaded or None on error.
"""
if not self._source_filename:
if not self.project_version:
return
self._source_filename = self._download_helper.Download(
self.project_name, self.project_version)
return self._source_filename
class BuildHelper(object):
"""Base class that helps in building."""
LOG_FILENAME = u'build.log'
@abc.abstractmethod
def Build(self, source_helper):
"""Builds the source.
Args:
source_helper: the source helper (instance of SourceHelper).
Returns:
True if the build was successful, False otherwise.
"""
class DpkgBuildHelper(BuildHelper):
"""Class that helps in building dpkg packages (.deb)."""
# pylint: disable=abstract-method
# TODO: determine BUILD_DEPENDENCIES from the build files?
# TODO: what about flex, byacc?
_BUILD_DEPENDENCIES = frozenset([
u'git',
u'build-essential',
u'autotools-dev',
u'autoconf',
u'automake',
u'autopoint',
u'libtool',
u'gettext',
u'flex',
u'byacc',
u'debhelper',
u'devscripts',
u'dpkg-dev',
u'fakeroot',
u'quilt',
u'zlib1g-dev',
u'libbz2-dev',
u'libssl-dev',
u'libfuse-dev',
u'python-dev',
u'python-setuptools',
])
def _BuildPrepare(
self, source_directory, project_name, project_version, version_suffix,
distribution, architecture):
"""Make the necassary preperations before building the dpkg packages.
Args:
source_directory: the name of the source directory.
project_name: the name of the project.
project_version: the version of the project.
version_suffix: the version suffix.
distribution: the distribution.
architecture: the architecture.
Returns:
True if the preparations were successful, False otherwise.
"""
# Script to run before building, e.g. to change the dpkg packaging files.
if os.path.exists(u'prep-dpkg.sh'):
command = u'sh ../prep-dpkg.sh {0:s} {1!s} {2:s} {3:s} {4:s}'.format(
project_name, project_version, version_suffix, distribution,
architecture)
exit_code = subprocess.call(
u'(cd {0:s} && {1:s})'.format(source_directory, command), shell=True)
if exit_code != 0:
logging.error(u'Running: "{0:s}" failed.'.format(command))
return False
return True
def _BuildFinalize(
self, source_directory, project_name, project_version, version_suffix,
distribution, architecture):
"""Make the necassary finalizations after building the dpkg packages.
Args:
source_directory: the name of the source directory.
project_name: the name of the project.
project_version: the version of the project.
version_suffix: the version suffix.
distribution: the distribution.
architecture: the architecture.
Returns:
True if the finalizations were successful, False otherwise.
"""
# Script to run after building, e.g. to automatically upload the dpkg
# package files to an apt repository.
if os.path.exists(u'post-dpkg.sh'):
command = u'sh ../post-dpkg.sh {0:s} {1!s} {2:s} {3:s} {4:s}'.format(
project_name, project_version, version_suffix, distribution,
architecture)
exit_code = subprocess.call(
u'(cd {0:s} && {1:s})'.format(source_directory, command), shell=True)
if exit_code != 0:
logging.error(u'Running: "{0:s}" failed.'.format(command))
return False
return True
@classmethod
def CheckBuildDependencies(cls):
"""Checks if the build dependencies are met.
Returns:
A list of package names that need to be installed or an empty list.
"""
missing_packages = []
for package_name in cls._BUILD_DEPENDENCIES:
if not cls.CheckIsInstalled(package_name):
missing_packages.append(package_name)
return missing_packages
@classmethod
def CheckIsInstalled(cls, package_name):
"""Checks if a package is installed.
Args:
package_name: the name of the package.
Returns:
A boolean value containing true if the package is installed
false otherwise.
"""
command = u'dpkg-query -l {0:s} >/dev/null 2>&1'.format(package_name)
exit_code = subprocess.call(command, shell=True)
return exit_code == 0
class LibyalDpkgBuildHelper(DpkgBuildHelper):
"""Class that helps in building libyal dpkg packages (.deb)."""
def __init__(self):
"""Initializes the build helper."""
super(LibyalDpkgBuildHelper, self).__init__()
self.architecture = platform.machine()
self.distribution = ''
self.version_suffix = ''
if self.architecture == 'i686':
self.architecture = 'i386'
elif self.architecture == 'x86_64':
self.architecture = 'amd64'
def Build(self, source_helper):
"""Builds the dpkg packages.
Args:
source_helper: the source helper (instance of SourceHelper).
Returns:
True if the build was successful, False otherwise.
"""
source_filename = source_helper.Download()
logging.info(u'Building deb of: {0:s}'.format(source_filename))
# dpkg-buildpackage wants an source package filename without
# the status indication and orig indication.
deb_orig_source_filename = u'{0:s}_{1!s}.orig.tar.gz'.format(
source_helper.project_name, source_helper.project_version)
shutil.copy(source_filename, deb_orig_source_filename)
source_directory = source_helper.Create()
if not source_directory:
logging.error(
u'Extraction of source package: {0:s} failed'.format(source_filename))
return False
dpkg_directory = os.path.join(source_directory, u'dpkg')
if not os.path.exists(dpkg_directory):
dpkg_directory = os.path.join(source_directory, u'config', u'dpkg')
if not os.path.exists(dpkg_directory):
logging.error(u'Missing dpkg sub directory in: {0:s}'.format(
source_directory))
return False
debian_directory = os.path.join(source_directory, u'debian')
# If there is a debian directory remove it and recreate it from
# the dpkg directory.
if os.path.exists(debian_directory):
logging.info(u'Removing: {0:s}'.format(debian_directory))
shutil.rmtree(debian_directory)
shutil.copytree(dpkg_directory, debian_directory)
if not self._BuildPrepare(
source_directory, source_helper.project_name,
source_helper.project_version, self.version_suffix, self.distribution,
self.architecture):
return False
command = u'dpkg-buildpackage -uc -us -rfakeroot > {0:s} 2>&1'.format(
os.path.join(u'..', self.LOG_FILENAME))
exit_code = subprocess.call(
u'(cd {0:s} && {1:s})'.format(source_directory, command), shell=True)
if exit_code != 0:
logging.error(u'Running: "{0:s}" failed.'.format(command))
return False
if not self._BuildFinalize(
source_directory, source_helper.project_name,
source_helper.project_version, self.version_suffix, self.distribution,
self.architecture):
return False
return True
def Clean(self, source_helper):
"""Cleans the dpkg packages in the current directory.
Args:
source_helper: the source helper (instance of SourceHelper).
"""
filenames_to_ignore = re.compile(u'^{0:s}_{1!s}.orig.tar.gz'.format(
source_helper.project_name, source_helper.project_version))
# Remove files of previous versions in the format:
# project_version.orig.tar.gz
filenames = glob.glob(
u'{0:s}_[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9].orig.tar.gz'.format(
source_helper.project_name))
for filename in filenames:
if not filenames_to_ignore.match(filename):
logging.info(u'Removing: {0:s}'.format(filename))
os.remove(filename)
filenames_to_ignore = re.compile(u'^{0:s}[-_].*{1!s}'.format(
source_helper.project_name, source_helper.project_version))
# Remove files of previous versions in the format:
# project[-_]version-1_architecture.*
filenames = glob.glob(
u'{0:s}[-_]*[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]-1_'
u'{1:s}.*'.format(source_helper.project_name, self.architecture))
for filename in filenames:
if not filenames_to_ignore.match(filename):
logging.info(u'Removing: {0:s}'.format(filename))
os.remove(filename)
# Remove files of previous versions in the format:
# project[-_]*version-1.*
filenames = glob.glob(
u'{0:s}[-_]*[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]-1.*'.format(
source_helper.project_name))
for filename in filenames:
if not filenames_to_ignore.match(filename):
logging.info(u'Removing: {0:s}'.format(filename))
os.remove(filename)
def GetOutputFilename(self, source_helper):
"""Retrieves the filename of one of the resulting files.
Args:
source_helper: the source helper (instance of SourceHelper).
Returns:
A filename of one of the resulting dpkg packages.
"""
return u'{0:s}_{1!s}-1_{2:s}.deb'.format(
source_helper.project_name, source_helper.project_version,
self.architecture)
class LibyalSourceDpkgBuildHelper(DpkgBuildHelper):
"""Class that helps in building libyal source dpkg packages (.deb)."""
def __init__(self):
"""Initializes the build helper."""
super(LibyalSourceDpkgBuildHelper, self).__init__()
self.architecture = 'source'
self.distribution = 'trusty'
self.version_suffix = 'ppa1'
def Build(self, source_helper):
"""Builds the dpkg packages.
Args:
source_helper: the source helper (instance of SourceHelper).
Returns:
True if the build was successful, False otherwise.
"""
source_filename = source_helper.Download()
logging.info(u'Building source deb of: {0:s}'.format(source_filename))
# dpkg-buildpackage wants an source package filename without
# the status indication and orig indication.
deb_orig_source_filename = u'{0:s}_{1!s}.orig.tar.gz'.format(
source_helper.project_name, source_helper.project_version)
shutil.copy(source_filename, deb_orig_source_filename)
source_directory = source_helper.Create()
if not source_directory:
logging.error(
u'Extraction of source package: {0:s} failed'.format(source_filename))
return False
dpkg_directory = os.path.join(source_directory, u'dpkg')
if not os.path.exists(dpkg_directory):
dpkg_directory = os.path.join(source_directory, u'config', u'dpkg')
if not os.path.exists(dpkg_directory):
logging.error(u'Missing dpkg sub directory in: {0:s}'.format(
source_directory))
return False
debian_directory = os.path.join(source_directory, u'debian')
# If there is a debian directory remove it and recreate it from
# the dpkg directory.
if os.path.exists(debian_directory):
logging.info(u'Removing: {0:s}'.format(debian_directory))
shutil.rmtree(debian_directory)
shutil.copytree(dpkg_directory, debian_directory)
if not self._BuildPrepare(
source_directory, source_helper.project_name,
source_helper.project_version, self.version_suffix, self.distribution,
self.architecture):
return False
command = u'debuild -S -sa > {0:s} 2>&1'.format(
os.path.join(u'..', self.LOG_FILENAME))
exit_code = subprocess.call(
u'(cd {0:s} && {1:s})'.format(source_directory, command), shell=True)
if exit_code != 0:
logging.error(u'Running: "{0:s}" failed.'.format(command))
return False
if not self._BuildFinalize(
source_directory, source_helper.project_name,
source_helper.project_version, self.version_suffix, self.distribution,
self.architecture):
return False
return True
def Clean(self, source_helper):
"""Cleans the dpkg packages in the current directory.
Args:
source_helper: the source helper (instance of SourceHelper).
"""
filenames_to_ignore = re.compile(u'^{0:s}_{1!s}.orig.tar.gz'.format(
source_helper.project_name, source_helper.project_version))
# Remove files of previous versions in the format:
# project_version.orig.tar.gz
filenames = glob.glob(
u'{0:s}_[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9].orig.tar.gz'.format(
source_helper.project_name))
for filename in filenames:
if not filenames_to_ignore.match(filename):
logging.info(u'Removing: {0:s}'.format(filename))
os.remove(filename)
filenames_to_ignore = re.compile(u'^{0:s}[-_].*{1!s}'.format(
source_helper.project_name, source_helper.project_version))
# Remove files of previous versions in the format:
# project[-_]version-1suffix~distribution_architecture.*
filenames = glob.glob((
u'{0:s}[-_]*[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]'
u'-1{1:s}~{2:s}_{3:s}.*').format(
source_helper.project_name, self.version_suffix, self.distribution,
self.architecture))
for filename in filenames:
if not filenames_to_ignore.match(filename):
logging.info(u'Removing: {0:s}'.format(filename))
os.remove(filename)
# Remove files of previous versions in the format:
# project[-_]*version-1suffix~distribution.*
filenames = glob.glob((
u'{0:s}[-_]*[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]'
u'-1{1:s}~{2:s}.*').format(
source_helper.project_name, self.version_suffix, self.distribution))
for filename in filenames:
if not filenames_to_ignore.match(filename):
logging.info(u'Removing: {0:s}'.format(filename))
os.remove(filename)
def GetOutputFilename(self, source_helper):
"""Retrieves the filename of one of the resulting files.
Args:
source_helper: the source helper (instance of SourceHelper).
Returns:
A filename of one of the resulting dpkg packages.
"""
return u'{0:s}_{1!s}-1{2:s}~{3:s}_{4:s}.changes'.format(
source_helper.project_name, source_helper.project_version,
self.version_suffix, self.distribution, self.architecture)
class MakeBuildHelper(BuildHelper):
"""Class that helps in building using make."""
def Build(self, source_helper):
"""Builds the source using make.
Args:
source_helper: the source helper (instance of SourceHelper).
Returns:
True if the build was successful, False otherwise.
"""
source_directory = source_helper.project_name
command = u'./configure'
exit_code = subprocess.call(
u'(cd {0:s} && {1:s})'.format(source_directory, command), shell=True)
if exit_code != 0:
logging.error(u'Running: "{0:s}" failed.'.format(command))
return False
command = u'make'
exit_code = subprocess.call(
u'(cd {0:s} && {1:s})'.format(source_directory, command), shell=True)
if exit_code != 0:
logging.error(u'Running: "{0:s}" failed.'.format(command))
return False
return True
class MsiBuildHelper(BuildHelper):
"""Class that helps in building Microsoft Installer packages (.msi)."""
# pylint: disable=abstract-method
LOG_FILENAME = u'msbuild.log'
def __init__(self):
"""Initializes the build helper."""
super(MsiBuildHelper, self).__init__()
self.architecture = platform.machine()
if self.architecture == 'x86':
self.architecture = 'win32'
elif self.architecture == 'AMD64':
self.architecture = 'win-amd64'
class LibyalMsiBuildHelper(MsiBuildHelper):
"""Class that helps in building Microsoft Installer packages (.msi)."""
def __init__(self):
"""Initializes the build helper.
Raises:
RuntimeError: if the Visual Studio version could be determined or
msvscpp-convert.py could not be found.
"""
super(LibyalMsiBuildHelper, self).__init__()
if 'VS90COMNTOOLS' in os.environ:
self.version = '2008'
elif 'VS100COMNTOOLS' in os.environ:
self.version = '2010'
elif 'VS110COMNTOOLS' in os.environ:
self.version = '2012'
elif 'VS120COMNTOOLS' in os.environ:
self.version = '2013'
else:
raise RuntimeError(u'Unable to determine Visual Studio version.')
if self.version != '2008':
self._msvscpp_convert = os.path.join(
os.path.dirname(__file__), u'msvscpp-convert.py')
if not os.path.exists(self._msvscpp_convert):
raise RuntimeError(u'Unable to find msvscpp-convert.py')
def _BuildPrepare(self, source_directory):
"""Prepares the source for building with Visual Studio.
Args:
source_directory: the name of the source directory.
"""
# For the vs2008 build make sure the binary is XP compatible,
# by setting WINVER to 0x0501. For the vs2010 build WINVER is
# set to 0x0600 (Windows Vista).
# WINVER is set in common\config_winapi.h or common\config_msc.h.
config_filename = os.path.join(
source_directory, u'common', u'config_winapi.h')
# If the WINAPI configuration file is not available use
# the MSC compiler configuration file instead.
if not os.path.exists(config_filename):
config_filename = os.path.join(
source_directory, u'common', u'config_msc.h')
# Add a line to the config file that sets WINVER.
parsing_mode = 0
for line in fileinput.input(config_filename, inplace=1):
# Remove trailing whitespace and end-of-line characters.
line = line.rstrip()
if parsing_mode != 2 or line:
if parsing_mode == 1:
if self.version == '2008':
if not line.startswith('#define WINVER 0x0501'):
print('#define WINVER 0x0501')
print('')
else:
if not line.startswith('#define WINVER 0x0600'):
print('#define WINVER 0x0600')
print('')
parsing_mode = 2
elif line.startswith('#define _CONFIG_'):
parsing_mode = 1
print(line)
def _ConvertSolutionFiles(self, source_directory):
"""Converts the Visual Studio solution and project files.
Args:
source_directory: the name of the source directory.
"""
os.chdir(source_directory)
solution_filenames = glob.glob(os.path.join(u'msvscpp', u'*.sln'))
if len(solution_filenames) != 1:
logging.error(u'Unable to find Visual Studio solution file')
return False
solution_filename = solution_filenames[0]
if not os.path.exists(u'vs2008'):
command = u'{0:s} {1:s} --to {2:s} {3:s}'.format(
sys.executable, self._msvscpp_convert, self.version,
solution_filename)
exit_code = subprocess.call(command, shell=False)
if exit_code != 0:
logging.error(u'Running: "{0:s}" failed.'.format(command))
return False
# Note that setup.py needs the Visual Studio solution directory
# to be named: msvscpp. So replace the Visual Studio 2008 msvscpp
# solution directory with the converted one.
os.rename(u'msvscpp', u'vs2008')
os.rename(u'vs{0:s}'.format(self.version), u'msvscpp')
os.chdir(u'..')
def Build(self, source_helper):
"""Builds using Visual Studio.
Args:
source_helper: the source helper (instance of SourceHelper).
Returns:
True if the build was successful, False otherwise.
"""
source_filename = source_helper.Download()
logging.info(u'Building: {0:s} with Visual Studio {1:s}'.format(
source_filename, self.version))
source_directory = source_helper.Create()
if not source_directory:
logging.error(
u'Extraction of source package: {0:s} failed'.format(source_filename))
return False
# Search common locations for MSBuild.exe
if self.version == '2008':
msbuild = u'{0:s}:{1:s}{2:s}'.format(
u'C', os.sep, os.path.join(
u'Windows', u'Microsoft.NET', u'Framework', u'v3.5',
u'MSBuild.exe'))
# Note that MSBuild in .NET 3.5 does not support vs2010 solution files
# and MSBuild in .NET 4.0 is needed instead.
elif self.version in ['2010', '2012', '2013']:
msbuild = u'{0:s}:{1:s}{2:s}'.format(
u'C', os.sep, os.path.join(
u'Windows', u'Microsoft.NET', u'Framework', u'v4.0.30319',
u'MSBuild.exe'))
if not os.path.exists(msbuild):
logging.error(u'Unable to find MSBuild.exe')
return False
if self.version == '2008':
if not os.environ['VS90COMNTOOLS']:
logging.error(u'Missing VS90COMNTOOLS environment variable.')
return False
elif self.version == '2010':
if not os.environ['VS100COMNTOOLS']:
logging.error(u'Missing VS100COMNTOOLS environment variable.')
return False
elif self.version == '2012':
if not os.environ['VS110COMNTOOLS']:
logging.error(u'Missing VS110COMNTOOLS environment variable.')
return False
elif self.version == '2013':
if not os.environ['VS120COMNTOOLS']:
logging.error(u'Missing VS120COMNTOOLS environment variable.')
return False
# For the Visual Studio builds later than 2008 the convert the 2008
# solution and project files need to be converted to the newer version.
if self.version in ['2010', '2012', '2013']:
self._ConvertSolutionFiles(source_directory)
self._BuildPrepare(source_directory)
# Detect architecture based on Visual Studion Platform environment
# variable. If not set the platform with default to Win32.
msvscpp_platform = os.environ.get('Platform', None)
if not msvscpp_platform:
msvscpp_platform = os.environ.get('TARGET_CPU', None)
if not msvscpp_platform or msvscpp_platform == 'x86':
msvscpp_platform = 'Win32'
if msvscpp_platform not in ['Win32', 'x64']:
logging.error(u'Unsupported build platform: {0:s}'.format(
msvscpp_platform))
return False
if self.version == '2008' and msvscpp_platform == 'x64':
logging.error(u'Unsupported 64-build platform for vs2008.')
return False
solution_filenames = glob.glob(os.path.join(
source_directory, u'msvscpp', u'*.sln'))
if len(solution_filenames) != 1:
logging.error(u'Unable to find Visual Studio solution file')
return False
solution_filename = solution_filenames[0]
command = (
u'{0:s} /p:Configuration=Release /p:Platform={1:s} /noconsolelogger '
u'/fileLogger /maxcpucount {2:s}').format(
msbuild, msvscpp_platform, solution_filename)
exit_code = subprocess.call(command, shell=False)
if exit_code != 0:
logging.error(u'Running: "{0:s}" failed.'.format(command))
return False
python_module_name, _, _ = source_directory.partition(u'-')
python_module_name = u'py{0:s}'.format(python_module_name[3:])
python_module_directory = os.path.join(
source_directory, python_module_name)
python_module_dist_directory = os.path.join(
python_module_directory, u'dist')
if not os.path.exists(python_module_dist_directory):
build_directory = os.path.join(u'..', u'..')
os.chdir(python_module_directory)
# Setup.py uses VS90COMNTOOLS which is vs2008 specific
# so we need to set it for the other Visual Studio versions.
if self.version == '2010':
os.environ['VS90COMNTOOLS'] = os.environ['VS100COMNTOOLS']
elif self.version == '2012':
os.environ['VS90COMNTOOLS'] = os.environ['VS110COMNTOOLS']
elif self.version == '2013':
os.environ['VS90COMNTOOLS'] = os.environ['VS120COMNTOOLS']
# TODO: append to log file?
command = u'{0:s} setup.py bdist_msi'.format(sys.executable)
exit_code = subprocess.call(command, shell=False)
if exit_code != 0:
logging.error(u'Running: "{0:s}" failed.'.format(command))
return False
# Move the msi to the build directory.
msi_filename = glob.glob(os.path.join(
u'dist', u'{0:s}-*.msi'.format(python_module_name)))
logging.info(u'Moving: {0:s}'.format(msi_filename[0]))
shutil.move(msi_filename[0], build_directory)
os.chdir(build_directory)
return True
def Clean(self, unused_source_helper):
"""Cleans the Visual Studio build directory.
Args:
source_helper: the source helper (instance of SourceHelper).
"""
def GetOutputFilename(self, source_helper):
"""Retrieves the filename of one of the resulting files.
Args:
source_helper: the source helper (instance of SourceHelper).
Returns:
A filename of one of the resulting build directory.
"""
source_directory = u'{0:s}-{1!s}'.format(
source_helper.project_name, source_helper.project_version)
return os.path.join(source_directory, u'msvscpp', u'Release')
class PkgBuildHelper(BuildHelper):
"""Class that helps in building MacOS-X packages (.pkg)."""
# pylint: disable=abstract-method
def __init__(self):
"""Initializes the build helper."""
super(PkgBuildHelper, self).__init__()
self._pkgbuild = os.path.join(u'/', u'usr', u'bin', u'pkgbuild')
def _BuildDmg(self, pkg_filename, dmg_filename):
"""Builds the distributable disk image (.dmg) from the pkg.
Args:
pkg_filename: the name of the pkg file (which is technically
a directory).
dmg_filename: the name of the dmg file.
Returns:
True if the build was successful, False otherwise.
"""
command = (
u'hdiutil create {0:s} -srcfolder {1:s} -fs HFS+').format(
dmg_filename, pkg_filename)
exit_code = subprocess.call(command, shell=True)
if exit_code != 0:
logging.error(u'Running: "{0:s}" failed.'.format(command))
return False
return True
def _BuildPkg(
self, source_directory, project_identifier, project_version,
pkg_filename):
"""Builds the distributable disk image (.dmg) from the pkg.
Args:
source_directory: the name of the source directory.
project_identifier: the project identifier.
project_version: the version of the project.
pkg_filename: the name of the pkg file (which is technically
a directory).
Returns:
True if the build was successful, False otherwise.
"""
command = (
u'{0:s} --root {1:s}/tmp/ --identifier {2:s} '
u'--version {3!s} --ownership recommended {4:s}').format(
self._pkgbuild, source_directory, project_identifier,
project_version, pkg_filename)
exit_code = subprocess.call(command, shell=True)
if exit_code != 0:
logging.error(u'Running: "{0:s}" failed.'.format(command))
return False
return True
def Clean(self, source_helper):
"""Cleans the MacOS-X packages in the current directory.
Args:
source_helper: the source helper (instance of SourceHelper).
"""
filenames_to_ignore = re.compile(u'^{0:s}-.*{1!s}'.format(
source_helper.project_name, source_helper.project_version))
# Remove files of previous versions in the format:
# project-*version.dmg
filenames = glob.glob(u'{0:s}-*.dmg'.format(source_helper.project_name))
for filename in filenames:
if not filenames_to_ignore.match(filename):
logging.info(u'Removing: {0:s}'.format(filename))
os.remove(filename)
# Remove files of previous versions in the format:
# project-*version.pkg
filenames = glob.glob(u'{0:s}-*.pkg'.format(source_helper.project_name))
for filename in filenames:
if not filenames_to_ignore.match(filename):
logging.info(u'Removing: {0:s}'.format(filename))
os.remove(filename)
def GetOutputFilename(self, source_helper):
"""Retrieves the filename of one of the resulting files.
Args:
source_helper: the source helper (instance of SourceHelper).
Returns:
A filename of one of the resulting rpms.
"""
return u'{0:s}-{1!s}.dmg'.format(
source_helper.project_name, source_helper.project_version)
class LibyalPkgBuildHelper(PkgBuildHelper):
"""Class that helps in building MacOS-X packages (.pkg)."""
def Build(self, source_helper):
"""Builds the pkg package and distributable disk image (.dmg).
Args:
source_helper: the source helper (instance of SourceHelper).
Returns:
True if the build was successful, False otherwise.
"""
source_filename = source_helper.Download()
logging.info(u'Building pkg of: {0:s}'.format(source_filename))
source_directory = source_helper.Create()
if not source_directory:
logging.error(
u'Extraction of source package: {0:s} failed'.format(source_filename))
return False
dmg_filename = u'{0:s}-{1!s}.dmg'.format(
source_helper.project_name, source_helper.project_version)
pkg_filename = u'{0:s}-{1!s}.pkg'.format(
source_helper.project_name, source_helper.project_version)
log_filename = os.path.join(u'..', self.LOG_FILENAME)
sdks_path = os.path.join(
u'/', u'Applications', u'Xcode.app', u'Contents', u'Developer',
u'Platforms', u'MacOSX.platform', u'Developer', u'SDKs')
for sub_path in [u'MacOSX10.7.sdk', u'MacOSX10.8.sdk', u'MacOSX10.9.sdk']:
sdk_path = os.path.join(sdks_path, sub_path)
if os.path.isdir(sub_path):
break
if sdk_path:
cflags = u'CFLAGS="-isysroot {0:s}"'.format(sdk_path)
ldflags = u'LDFLAGS="-Wl,-syslibroot,{0:s}"'.format(sdk_path)
else:
cflags = u''
ldflags = u''
if not os.path.exists(pkg_filename):
if cflags and ldflags:
command = (
u'{0:s} {1:s} ./configure --prefix=/usr --enable-python '
u'--with-pyprefix --disable-dependency-tracking > {2:s} '
u'2>&1').format(cflags, ldflags, log_filename)
else:
command = (
u'./configure --prefix=/usr --enable-python --with-pyprefix '
u'> {0:s} 2>&1').format(log_filename)
exit_code = subprocess.call(
u'(cd {0:s} && {1:s})'.format(source_directory, command), shell=True)
if exit_code != 0:
logging.error(u'Running: "{0:s}" failed.'.format(command))
return False
command = u'make >> {0:s} 2>&1'.format(log_filename)
exit_code = subprocess.call(
u'(cd {0:s} && {1:s})'.format(source_directory, command), shell=True)
if exit_code != 0:
logging.error(u'Running: "{0:s}" failed.'.format(command))
return False
command = u'make install DESTDIR={0:s}/tmp >> {1:s} 2>&1'.format(
os.path.abspath(source_directory), log_filename)
exit_code = subprocess.call(
u'(cd {0:s} && {1:s})'.format(source_directory, command), shell=True)
if exit_code != 0:
logging.error(u'Running: "{0:s}" failed.'.format(command))
return False
share_doc_path = os.path.join(
source_directory, u'tmp', u'usr', u'share', u'doc',
source_helper.project_name)
if not os.path.exists(share_doc_path):
os.makedirs(share_doc_path)
shutil.copy(os.path.join(source_directory, u'AUTHORS'), share_doc_path)
shutil.copy(os.path.join(source_directory, u'COPYING'), share_doc_path)
shutil.copy(os.path.join(source_directory, u'NEWS'), share_doc_path)
shutil.copy(os.path.join(source_directory, u'README'), share_doc_path)
project_identifier = u'com.github.libyal.{0:s}'.format(
source_helper.project_name)
if not self._BuildPkg(
source_directory, project_identifier, source_helper.project_version,
pkg_filename):
return False
if not self._BuildDmg(pkg_filename, dmg_filename):
return False
return True
class RpmBuildHelper(BuildHelper):
"""Class that helps in building rpm packages (.rpm)."""
# pylint: disable=abstract-method
# TODO: determine BUILD_DEPENDENCIES from the build files?
_BUILD_DEPENDENCIES = frozenset([
'git',
'binutils',
'autoconf',
'automake',
'libtool',
'gettext-devel',
'make',
'pkgconfig',
'gcc',
'flex',
'byacc',
'zlib-devel',
'bzip2-devel',
'openssl-devel',
'fuse-devel',
'rpm-build',
'python-devel',
])
def __init__(self):
"""Initializes the build helper."""
super(RpmBuildHelper, self).__init__()
self.architecture = platform.machine()
self.rpmbuild_path = os.path.join(u'~', u'rpmbuild')
self.rpmbuild_path = os.path.expanduser(self.rpmbuild_path)
self._rpmbuild_rpms_path = os.path.join(
self.rpmbuild_path, u'RPMS', self.architecture)
self._rpmbuild_sources_path = os.path.join(self.rpmbuild_path, u'SOURCES')
self._rpmbuild_specs_path = os.path.join(self.rpmbuild_path, u'SPECS')
def _BuildFromSpecFile(self, spec_filename):
"""Builds the rpms directly from a spec file.
Args:
spec_filename: the name of the spec file as stored in the rpmbuild
SPECS sub directory.
Returns:
True if the build was successful, False otherwise.
"""
current_path = os.getcwd()
os.chdir(self.rpmbuild_path)
command = u'rpmbuild -ba {0:s} > {1:s} 2>&1'.format(
os.path.join(u'SPECS', spec_filename), self.LOG_FILENAME)
exit_code = subprocess.call(command, shell=True)
if exit_code != 0:
logging.error(u'Running: "{0:s}" failed.'.format(command))
os.chdir(current_path)
return exit_code == 0
def _BuildFromSourcePackage(self, source_filename):
"""Builds the rpms directly from the source package file.
For this to work the source package needs to contain a valid rpm .spec file.
Args:
source_filename: the name of the source package file.
Returns:
True if the build was successful, False otherwise.
"""
command = u'rpmbuild -ta {0:s} > {1:s} 2>&1'.format(
source_filename, self.LOG_FILENAME)
exit_code = subprocess.call(command, shell=True)
if exit_code != 0:
logging.error(u'Running: "{0:s}" failed.'.format(command))
return False
return True
def _CreateRpmbuildDirectories(self):
"""Creates the rpmbuild and sub directories."""
if not os.path.exists(self.rpmbuild_path):
os.mkdir(self.rpmbuild_path)
if not os.path.exists(self._rpmbuild_sources_path):
os.mkdir(self._rpmbuild_sources_path)
if not os.path.exists(self._rpmbuild_specs_path):
os.mkdir(self._rpmbuild_specs_path)
def _CreateSpecFile(self, project_name, spec_file_data):
"""Creates a spec file in the rpmbuild directory.
Args:
project_name: the name of the project.
spec_file_data: the spec file data.
"""
spec_filename = os.path.join(
self._rpmbuild_specs_path, u'{0:s}.spec'.format(project_name))
spec_file = open(spec_filename, 'w')
spec_file.write(spec_file_data)
spec_file.close()
def _CopySourceFile(self, source_filename):
"""Copies the source file to the rpmbuild directory.
Args:
source_filename: the name of the source package file.
"""
shutil.copy(source_filename, self._rpmbuild_sources_path)
def _MoveRpms(self, project_name, project_version):
"""Moves the rpms from the rpmbuild directory into to current directory.
Args:
project_name: the name of the project.
project_version: the version of the project.
"""
filenames = glob.glob(os.path.join(
self._rpmbuild_rpms_path, u'{0:s}-*{1!s}-1.{2:s}.rpm'.format(
project_name, project_version, self.architecture)))
for filename in filenames:
logging.info(u'Moving: {0:s}'.format(filename))
shutil.move(filename, '.')
@classmethod
def CheckBuildDependencies(cls):
"""Checks if the build dependencies are met.
Returns:
A list of package names that need to be installed or an empty list.
"""
missing_packages = []
for package_name in cls._BUILD_DEPENDENCIES:
if not cls.CheckIsInstalled(package_name):
missing_packages.append(package_name)
return missing_packages
@classmethod
def CheckIsInstalled(cls, package_name):
"""Checks if a package is installed.
Args:
package_name: the name of the package.
Returns:
A boolean value containing true if the package is installed
false otherwise.
"""
command = u'rpm -qi {0:s} >/dev/null 2>&1'.format(package_name)
exit_code = subprocess.call(command, shell=True)
return exit_code == 0
def Clean(self, source_helper):
"""Cleans the rpmbuild directory.
Args:
source_helper: the source helper (instance of SourceHelper).
"""
# Remove previous versions build directories.
filenames_to_ignore = re.compile(u'{0:s}-{1!s}'.format(
source_helper.project_name, source_helper.project_version))
filenames = glob.glob(os.path.join(
self.rpmbuild_path, u'BUILD', u'{0:s}-*'.format(
source_helper.project_name)))
for filename in filenames:
if not filenames_to_ignore.match(filename):
logging.info(u'Removing: {0:s}'.format(filename))
shutil.rmtree(filename)
# Remove previous versions of rpms.
filenames_to_ignore = re.compile(
u'{0:s}-.*{1!s}-1.{2:s}.rpm'.format(
source_helper.project_name, source_helper.project_version,
self.architecture))
rpm_filenames_glob = u'{0:s}-*-1.{1:s}.rpm'.format(
source_helper.project_name, self.architecture)
filenames = glob.glob(rpm_filenames_glob)
for filename in filenames:
if not filenames_to_ignore.match(filename):
logging.info(u'Removing: {0:s}'.format(filename))
os.remove(filename)
filenames = glob.glob(os.path.join(
self.rpmbuild_path, u'RPMS', self.architecture, rpm_filenames_glob))
for filename in filenames:
if not filenames_to_ignore.match(filename):
logging.info(u'Removing: {0:s}'.format(filename))
os.remove(filename)
# Remove previous versions of source rpms.
filenames_to_ignore = re.compile(u'{0:s}-.*{1!s}-1.src.rpm'.format(
source_helper.project_name, source_helper.project_version))
filenames = glob.glob(os.path.join(
self.rpmbuild_path, u'SRPMS',
u'{0:s}-*-1.src.rpm'.format(source_helper.project_name)))
for filename in filenames:
if not filenames_to_ignore.match(filename):
logging.info(u'Removing: {0:s}'.format(filename))
os.remove(filename)
def GetOutputFilename(self, source_helper):
"""Retrieves the filename of one of the resulting files.
Args:
source_helper: the source helper (instance of SourceHelper).
Returns:
A filename of one of the resulting rpms.
"""
return u'{0:s}-{1!s}-1.{2:s}.rpm'.format(
source_helper.project_name, source_helper.project_version,
self.architecture)
class LibyalRpmBuildHelper(RpmBuildHelper):
"""Class that helps in building libyal rpm packages (.rpm)."""
def Build(self, source_helper):
"""Builds the rpms.
Args:
source_helper: the source helper (instance of SourceHelper).
Returns:
True if the build was successful, False otherwise.
"""
source_filename = source_helper.Download()
logging.info(u'Building rpm of: {0:s}'.format(source_filename))
# rpmbuild wants the source package filename without the status indication.
rpm_source_filename = u'{0:s}-{1!s}.tar.gz'.format(
source_helper.project_name, source_helper.project_version)
os.rename(source_filename, rpm_source_filename)
build_successful = self._BuildFromSourcePackage(rpm_source_filename)
if build_successful:
# Move the rpms to the build directory.
self._MoveRpms(source_helper.project_name, source_helper.project_version)
# Remove BUILD directory.
filename = os.path.join(
self.rpmbuild_path, u'BUILD', u'{0:s}-{1!s}'.format(
source_helper.project_name, source_helper.project_version))
logging.info(u'Removing: {0:s}'.format(filename))
shutil.rmtree(filename)
# Remove SRPMS file.
filename = os.path.join(
self.rpmbuild_path, u'SRPMS', u'{0:s}-{1!s}-1.src.rpm'.format(
source_helper.project_name, source_helper.project_version))
logging.info(u'Removing: {0:s}'.format(filename))
os.remove(filename)
# Change the source package filename back to the original.
os.rename(rpm_source_filename, source_filename)
return build_successful
class LibyalBuilder(object):
"""Class that helps in building libyal libaries."""
def __init__(self, build_target):
"""Initializes the dependency builder.
Args:
build_target: the build target.
"""
super(LibyalBuilder, self).__init__()
self._build_target = build_target
def _BuildLibyalLibrary(self, source_helper):
"""Builds a libyal project and its Python module.
Args:
source_helper: the source helper (instance of SourceHelper).
Returns:
True if the build is successful or False on error.
"""
build_helper = None
if self._build_target == 'dpkg':
build_helper = LibyalDpkgBuildHelper()
elif self._build_target == 'dpkg-source':
build_helper = LibyalSourceDpkgBuildHelper()
elif self._build_target == 'pkg':
build_helper = LibyalPkgBuildHelper()
elif self._build_target == 'rpm':
build_helper = LibyalRpmBuildHelper()
elif self._build_target in ['vs2008', 'vs2010', 'vs2012', 'vs2013']:
if self._build_target == 'vs2013':
logging.warning(u'Untested experimental build target: vs2013.')
build_helper = LibyalMsiBuildHelper()
if not build_helper:
return False
output_filename = build_helper.GetOutputFilename(source_helper)
build_helper.Clean(source_helper)
if not os.path.exists(output_filename):
if not build_helper.Build(source_helper):
logging.warning((
u'Build of: {0:s} failed, for more information check '
u'{1:s}').format(
source_helper.project_name, build_helper.LOG_FILENAME))
return False
if os.path.exists(build_helper.LOG_FILENAME):
logging.info(u'Removing: {0:s}'.format(build_helper.LOG_FILENAME))
os.remove(build_helper.LOG_FILENAME)
return True
def Build(self, project_name):
"""Builds a libyal project.
Args:
project_name: the project name.
Returns:
True if the build is successful or False on error.
"""
if self._build_target == 'download':
source_helper = SourcePackageHelper(project_name)
source_helper.Clean()
source_filename = source_helper.Download()
# If available run the script post-download.sh after download.
if os.path.exists(u'post-download.sh'):
command = u'sh ./post-download.sh {0:s}'.format(source_filename)
exit_code = subprocess.call(command, shell=True)
if exit_code != 0:
logging.error(u'Running: "{0:s}" failed.'.format(command))
return False
elif self._build_target == 'git':
source_helper = LibyalGitRepositoryHelper(project_name)
source_helper.Clean()
# TODO: build source.
# source_directory = source_helper.Create()
_ = source_helper.Create()
build_helper = MakeBuildHelper()
if not build_helper.Build(source_helper):
logging.warning((
u'Build of: {0:s} failed, for more information check '
u'{1:s}').format(
source_helper.project_name, build_helper.LOG_FILENAME))
return False
else:
source_helper = SourcePackageHelper(project_name)
source_helper.Clean()
if not self._BuildLibyalLibrary(source_helper):
return False
return True
def Main():
build_targets = frozenset([
'download', 'dpkg', 'dpkg-source', 'git', 'pkg', 'rpm',
'vs2008', 'vs2010', 'vs2012', 'vs2013'])
args_parser = argparse.ArgumentParser(description=(
'Downloads and builds the latest versions of the libyal libraries.'))
args_parser.add_argument(
'build_target', choices=sorted(build_targets), action='store',
metavar='BUILD_TARGET', default=None, help='The build target.')
args_parser.add_argument(
'--build-directory', '--build_directory', action='store',
metavar='DIRECTORY', dest='build_directory', type=unicode,
default=u'.', help=(
u'The location of the the build directory.'))
args_parser.add_argument(
'-c', '--config', dest='config_file', action='store',
metavar='CONFIG_FILE', default=None,
help='path of the build configuration file.')
# TODO allow to set msbuild, python path
# TODO allow to set dpkg version suffix and distribution.
options = args_parser.parse_args()
if not options.build_target:
print('Build target missing.')
print('')
args_parser.print_help()
print('')
return False
if options.build_target not in build_targets:
print('Unsupported build target: {0:s}.'.format(options.build_target))
print('')
args_parser.print_help()
print('')
return False
if not options.config_file:
options.config_file = os.path.join(
os.path.dirname(__file__), 'data', 'libraries.ini')
if not os.path.exists(options.config_file):
print(u'No such config file: {0:s}.'.format(options.config_file))
print(u'')
return False
build_configuration = BuildConfiguration()
build_configuration.ReadFromFile(options.config_file)
logging.basicConfig(
level=logging.INFO, format=u'[%(levelname)s] %(message)s')
if options.build_target in ['dpkg', 'dpkg-source']:
missing_packages = DpkgBuildHelper.CheckBuildDependencies()
if missing_packages:
print((u'Required build package(s) missing. Please install: '
u'{0:s}.'.format(u', '.join(missing_packages))))
print(u'')
return False
elif options.build_target == 'rpm':
missing_packages = RpmBuildHelper.CheckBuildDependencies()
if missing_packages:
print((u'Required build package(s) missing. Please install: '
u'{0:s}.'.format(u', '.join(missing_packages))))
print(u'')
return False
libyal_builder = LibyalBuilder(options.build_target)
if not os.path.exists(options.build_directory):
os.mkdir(options.build_directory)
current_working_directory = os.getcwd()
os.chdir(options.build_directory)
result = True
for project_name in build_configuration.library_names:
if not libyal_builder.Build(project_name):
print(u'Failed building: {0:s}'.format(project_name))
result = False
break
os.chdir(current_working_directory)
return result
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
| apache-2.0 |
ygenc/onlineLDA | onlineldavb_new/build/scipy/scipy/cluster/vq.py | 4 | 25514 | """
====================================================================
K-means clustering and vector quantization (:mod:`scipy.cluster.vq`)
====================================================================
Provides routines for k-means clustering, generating code books
from k-means models, and quantizing vectors by comparing them with
centroids in a code book.
.. autosummary::
:toctree: generated/
whiten -- Normalize a group of observations so each feature has unit variance
vq -- Calculate code book membership of a set of observation vectors
kmeans -- Performs k-means on a set of observation vectors forming k clusters
kmeans2 -- A different implementation of k-means with more methods
-- for initializing centroids
Background information
======================
The k-means algorithm takes as input the number of clusters to
generate, k, and a set of observation vectors to cluster. It
returns a set of centroids, one for each of the k clusters. An
observation vector is classified with the cluster number or
centroid index of the centroid closest to it.
A vector v belongs to cluster i if it is closer to centroid i than
any other centroids. If v belongs to i, we say centroid i is the
dominating centroid of v. The k-means algorithm tries to
minimize distortion, which is defined as the sum of the squared distances
between each observation vector and its dominating centroid. Each
step of the k-means algorithm refines the choices of centroids to
reduce distortion. The change in distortion is used as a
stopping criterion: when the change is lower than a threshold, the
k-means algorithm is not making sufficient progress and
terminates. One can also define a maximum number of iterations.
Since vector quantization is a natural application for k-means,
information theory terminology is often used. The centroid index
or cluster index is also referred to as a "code" and the table
mapping codes to centroids and vice versa is often referred as a
"code book". The result of k-means, a set of centroids, can be
used to quantize vectors. Quantization aims to find an encoding of
vectors that reduces the expected distortion.
All routines expect obs to be a M by N array where the rows are
the observation vectors. The codebook is a k by N array where the
i'th row is the centroid of code word i. The observation vectors
and centroids have the same feature dimension.
As an example, suppose we wish to compress a 24-bit color image
(each pixel is represented by one byte for red, one for blue, and
one for green) before sending it over the web. By using a smaller
8-bit encoding, we can reduce the amount of data by two
thirds. Ideally, the colors for each of the 256 possible 8-bit
encoding values should be chosen to minimize distortion of the
color. Running k-means with k=256 generates a code book of 256
codes, which fills up all possible 8-bit sequences. Instead of
sending a 3-byte value for each pixel, the 8-bit centroid index
(or code word) of the dominating centroid is transmitted. The code
book is also sent over the wire so each 8-bit code can be
translated back to a 24-bit pixel value representation. If the
image of interest was of an ocean, we would expect many 24-bit
blues to be represented by 8-bit codes. If it was an image of a
human face, more flesh tone colors would be represented in the
code book.
"""
__docformat__ = 'restructuredtext'
__all__ = ['whiten', 'vq', 'kmeans', 'kmeans2']
# TODO:
# - implements high level method for running several times k-means with
# different initialialization
# - warning: what happens if different number of clusters ? For now, emit a
# warning, but it is not great, because I am not sure it really make sense to
# succeed in this case (maybe an exception is better ?)
import warnings
from numpy.random import randint
from numpy import shape, zeros, sqrt, argmin, minimum, array, \
newaxis, arange, compress, equal, common_type, single, double, take, \
std, mean
import numpy as np
class ClusterError(Exception):
pass
def whiten(obs):
"""
Normalize a group of observations on a per feature basis.
Before running k-means, it is beneficial to rescale each feature
dimension of the observation set with whitening. Each feature is
divided by its standard deviation across all observations to give
it unit variance.
Parameters
----------
obs : ndarray
Each row of the array is an observation. The
columns are the features seen during each observation.
>>> # f0 f1 f2
>>> obs = [[ 1., 1., 1.], #o0
... [ 2., 2., 2.], #o1
... [ 3., 3., 3.], #o2
... [ 4., 4., 4.]]) #o3
Returns
-------
result : ndarray
Contains the values in `obs` scaled by the standard devation
of each column.
Examples
--------
>>> from numpy import array
>>> from scipy.cluster.vq import whiten
>>> features = array([[ 1.9,2.3,1.7],
... [ 1.5,2.5,2.2],
... [ 0.8,0.6,1.7,]])
>>> whiten(features)
array([[ 3.41250074, 2.20300046, 5.88897275],
[ 2.69407953, 2.39456571, 7.62102355],
[ 1.43684242, 0.57469577, 5.88897275]])
"""
std_dev = std(obs, axis=0)
return obs / std_dev
def vq(obs, code_book):
"""
Assign codes from a code book to observations.
Assigns a code from a code book to each observation. Each
observation vector in the 'M' by 'N' `obs` array is compared with the
centroids in the code book and assigned the code of the closest
centroid.
The features in `obs` should have unit variance, which can be
acheived by passing them through the whiten function. The code
book can be created with the k-means algorithm or a different
encoding algorithm.
Parameters
----------
obs : ndarray
Each row of the 'N' x 'M' array is an observation. The columns are
the "features" seen during each observation. The features must be
whitened first using the whiten function or something equivalent.
code_book : ndarray
The code book is usually generated using the k-means algorithm.
Each row of the array holds a different code, and the columns are
the features of the code.
>>> # f0 f1 f2 f3
>>> code_book = [
... [ 1., 2., 3., 4.], #c0
... [ 1., 2., 3., 4.], #c1
... [ 1., 2., 3., 4.]]) #c2
Returns
-------
code : ndarray
A length N array holding the code book index for each observation.
dist : ndarray
The distortion (distance) between the observation and its nearest
code.
Notes
-----
This currently forces 32-bit math precision for speed. Anyone know
of a situation where this undermines the accuracy of the algorithm?
Examples
--------
>>> from numpy import array
>>> from scipy.cluster.vq import vq
>>> code_book = array([[1.,1.,1.],
... [2.,2.,2.]])
>>> features = array([[ 1.9,2.3,1.7],
... [ 1.5,2.5,2.2],
... [ 0.8,0.6,1.7]])
>>> vq(features,code_book)
(array([1, 1, 0],'i'), array([ 0.43588989, 0.73484692, 0.83066239]))
"""
try:
import _vq
ct = common_type(obs, code_book)
c_obs = obs.astype(ct)
c_code_book = code_book.astype(ct)
if ct is single:
results = _vq.vq(c_obs, c_code_book)
elif ct is double:
results = _vq.vq(c_obs, c_code_book)
else:
results = py_vq(obs, code_book)
except ImportError:
results = py_vq(obs, code_book)
return results
def py_vq(obs, code_book):
""" Python version of vq algorithm.
The algorithm computes the euclidian distance between each
observation and every frame in the code_book.
Parameters
----------
obs : ndarray
Expects a rank 2 array. Each row is one observation.
code_book : ndarray
Code book to use. Same format than obs. Should have same number of
features (eg columns) than obs.
Returns
-------
code : ndarray
code[i] gives the label of the ith obversation, that its code is
code_book[code[i]].
mind_dist : ndarray
min_dist[i] gives the distance between the ith observation and its
corresponding code.
Notes
-----
This function is slower than the C version but works for
all input types. If the inputs have the wrong types for the
C versions of the function, this one is called as a last resort.
It is about 20 times slower than the C version.
"""
# n = number of observations
# d = number of features
if np.ndim(obs) == 1:
if not np.ndim(obs) == np.ndim(code_book):
raise ValueError(
"Observation and code_book should have the same rank")
else:
return _py_vq_1d(obs, code_book)
else:
(n, d) = shape(obs)
# code books and observations should have same number of features and same
# shape
if not np.ndim(obs) == np.ndim(code_book):
raise ValueError("Observation and code_book should have the same rank")
elif not d == code_book.shape[1]:
raise ValueError("Code book(%d) and obs(%d) should have the same " \
"number of features (eg columns)""" %
(code_book.shape[1], d))
code = zeros(n, dtype=int)
min_dist = zeros(n)
for i in range(n):
dist = np.sum((obs[i] - code_book) ** 2, 1)
code[i] = argmin(dist)
min_dist[i] = dist[code[i]]
return code, sqrt(min_dist)
def _py_vq_1d(obs, code_book):
""" Python version of vq algorithm for rank 1 only.
Parameters
----------
obs : ndarray
Expects a rank 1 array. Each item is one observation.
code_book : ndarray
Code book to use. Same format than obs. Should rank 1 too.
Returns
-------
code : ndarray
code[i] gives the label of the ith obversation, that its code is
code_book[code[i]].
mind_dist : ndarray
min_dist[i] gives the distance between the ith observation and its
corresponding code.
"""
raise RuntimeError("_py_vq_1d buggy, do not use rank 1 arrays for now")
n = obs.size
nc = code_book.size
dist = np.zeros((n, nc))
for i in range(nc):
dist[:, i] = np.sum(obs - code_book[i])
print dist
code = argmin(dist)
min_dist = dist[code]
return code, sqrt(min_dist)
def py_vq2(obs, code_book):
"""2nd Python version of vq algorithm.
The algorithm simply computes the euclidian distance between each
observation and every frame in the code_book/
Parameters
----------
obs : ndarray
Expect a rank 2 array. Each row is one observation.
code_book : ndarray
Code book to use. Same format than obs. Should have same number of
features (eg columns) than obs.
Returns
-------
code : ndarray
code[i] gives the label of the ith obversation, that its code is
code_book[code[i]].
mind_dist : ndarray
min_dist[i] gives the distance between the ith observation and its
corresponding code.
Notes
-----
This could be faster when number of codebooks is small, but it
becomes a real memory hog when codebook is large. It requires
N by M by O storage where N=number of obs, M = number of
features, and O = number of codes.
"""
d = shape(obs)[1]
# code books and observations should have same number of features
if not d == code_book.shape[1]:
raise ValueError("""
code book(%d) and obs(%d) should have the same
number of features (eg columns)""" % (code_book.shape[1], d))
diff = obs[newaxis, :, :] - code_book[:,newaxis,:]
dist = sqrt(np.sum(diff * diff, -1))
code = argmin(dist, 0)
min_dist = minimum.reduce(dist, 0) #the next line I think is equivalent
# - and should be faster
#min_dist = choose(code,dist) # but in practice, didn't seem to make
# much difference.
return code, min_dist
def _kmeans(obs, guess, thresh=1e-5):
""" "raw" version of k-means.
Returns
-------
code_book :
the lowest distortion codebook found.
avg_dist :
the average distance a observation is from a code in the book.
Lower means the code_book matches the data better.
See Also
--------
kmeans : wrapper around k-means
XXX should have an axis variable here.
Examples
--------
Note: not whitened in this example.
>>> from numpy import array
>>> from scipy.cluster.vq import _kmeans
>>> features = array([[ 1.9,2.3],
... [ 1.5,2.5],
... [ 0.8,0.6],
... [ 0.4,1.8],
... [ 1.0,1.0]])
>>> book = array((features[0],features[2]))
>>> _kmeans(features,book)
(array([[ 1.7 , 2.4 ],
[ 0.73333333, 1.13333333]]), 0.40563916697728591)
"""
code_book = array(guess, copy = True)
avg_dist = []
diff = thresh+1.
while diff > thresh:
nc = code_book.shape[0]
#compute membership and distances between obs and code_book
obs_code, distort = vq(obs, code_book)
avg_dist.append(mean(distort, axis=-1))
#recalc code_book as centroids of associated obs
if(diff > thresh):
has_members = []
for i in arange(nc):
cell_members = compress(equal(obs_code, i), obs, 0)
if cell_members.shape[0] > 0:
code_book[i] = mean(cell_members, 0)
has_members.append(i)
#remove code_books that didn't have any members
code_book = take(code_book, has_members, 0)
if len(avg_dist) > 1:
diff = avg_dist[-2] - avg_dist[-1]
#print avg_dist
return code_book, avg_dist[-1]
def kmeans(obs, k_or_guess, iter=20, thresh=1e-5):
"""
Performs k-means on a set of observation vectors forming k clusters.
The k-means algorithm adjusts the centroids until sufficient
progress cannot be made, i.e. the change in distortion since
the last iteration is less than some threshold. This yields
a code book mapping centroids to codes and vice versa.
Distortion is defined as the sum of the squared differences
between the observations and the corresponding centroid.
Parameters
----------
obs : ndarray
Each row of the M by N array is an observation vector. The
columns are the features seen during each observation.
The features must be whitened first with the `whiten` function.
k_or_guess : int or ndarray
The number of centroids to generate. A code is assigned to
each centroid, which is also the row index of the centroid
in the code_book matrix generated.
The initial k centroids are chosen by randomly selecting
observations from the observation matrix. Alternatively,
passing a k by N array specifies the initial k centroids.
iter : int, optional
The number of times to run k-means, returning the codebook
with the lowest distortion. This argument is ignored if
initial centroids are specified with an array for the
``k_or_guess`` parameter. This parameter does not represent the
number of iterations of the k-means algorithm.
thresh : float, optional
Terminates the k-means algorithm if the change in
distortion since the last k-means iteration is less than
or equal to thresh.
Returns
-------
codebook : ndarray
A k by N array of k centroids. The i'th centroid
codebook[i] is represented with the code i. The centroids
and codes generated represent the lowest distortion seen,
not necessarily the globally minimal distortion.
distortion : float
The distortion between the observations passed and the
centroids generated.
See Also
--------
kmeans2 : a different implementation of k-means clustering
with more methods for generating initial centroids but without
using a distortion change threshold as a stopping criterion.
whiten : must be called prior to passing an observation matrix
to kmeans.
Examples
--------
>>> from numpy import array
>>> from scipy.cluster.vq import vq, kmeans, whiten
>>> features = array([[ 1.9,2.3],
... [ 1.5,2.5],
... [ 0.8,0.6],
... [ 0.4,1.8],
... [ 0.1,0.1],
... [ 0.2,1.8],
... [ 2.0,0.5],
... [ 0.3,1.5],
... [ 1.0,1.0]])
>>> whitened = whiten(features)
>>> book = array((whitened[0],whitened[2]))
>>> kmeans(whitened,book)
(array([[ 2.3110306 , 2.86287398],
[ 0.93218041, 1.24398691]]), 0.85684700941625547)
>>> from numpy import random
>>> random.seed((1000,2000))
>>> codes = 3
>>> kmeans(whitened,codes)
(array([[ 2.3110306 , 2.86287398],
[ 1.32544402, 0.65607529],
[ 0.40782893, 2.02786907]]), 0.5196582527686241)
"""
if int(iter) < 1:
raise ValueError('iter must be at least 1.')
if type(k_or_guess) == type(array([])):
guess = k_or_guess
if guess.size < 1:
raise ValueError("Asked for 0 cluster ? initial book was %s" % \
guess)
result = _kmeans(obs, guess, thresh = thresh)
else:
#initialize best distance value to a large value
best_dist = np.inf
No = obs.shape[0]
k = k_or_guess
if k < 1:
raise ValueError("Asked for 0 cluster ? ")
for i in range(iter):
#the intial code book is randomly selected from observations
guess = take(obs, randint(0, No, k), 0)
book, dist = _kmeans(obs, guess, thresh = thresh)
if dist < best_dist:
best_book = book
best_dist = dist
result = best_book, best_dist
return result
def _kpoints(data, k):
"""Pick k points at random in data (one row = one observation).
This is done by taking the k first values of a random permutation of 1..N
where N is the number of observation.
Parameters
----------
data : ndarray
Expect a rank 1 or 2 array. Rank 1 are assumed to describe one
dimensional data, rank 2 multidimensional data, in which case one
row is one observation.
k : int
Number of samples to generate.
"""
if data.ndim > 1:
n = data.shape[0]
else:
n = data.size
p = np.random.permutation(n)
x = data[p[:k], :].copy()
return x
def _krandinit(data, k):
"""Returns k samples of a random variable which parameters depend on data.
More precisely, it returns k observations sampled from a Gaussian random
variable which mean and covariances are the one estimated from data.
Parameters
----------
data : ndarray
Expect a rank 1 or 2 array. Rank 1 are assumed to describe one
dimensional data, rank 2 multidimensional data, in which case one
row is one observation.
k : int
Number of samples to generate.
"""
def init_rank1(data):
mu = np.mean(data)
cov = np.cov(data)
x = np.random.randn(k)
x *= np.sqrt(cov)
x += mu
return x
def init_rankn(data):
mu = np.mean(data, 0)
cov = np.atleast_2d(np.cov(data, rowvar = 0))
# k rows, d cols (one row = one obs)
# Generate k sample of a random variable ~ Gaussian(mu, cov)
x = np.random.randn(k, mu.size)
x = np.dot(x, np.linalg.cholesky(cov).T) + mu
return x
nd = np.ndim(data)
if nd == 1:
return init_rank1(data)
else:
return init_rankn(data)
_valid_init_meth = {'random': _krandinit, 'points': _kpoints}
def _missing_warn():
"""Print a warning when called."""
warnings.warn("One of the clusters is empty. "
"Re-run kmean with a different initialization.")
def _missing_raise():
"""raise a ClusterError when called."""
raise ClusterError("One of the clusters is empty. "
"Re-run kmean with a different initialization.")
_valid_miss_meth = {'warn': _missing_warn, 'raise': _missing_raise}
def kmeans2(data, k, iter = 10, thresh = 1e-5, minit = 'random',
missing = 'warn'):
"""
Classify a set of observations into k clusters using the k-means algorithm.
The algorithm attempts to minimize the Euclidian distance between
observations and centroids. Several initialization methods are
included.
Parameters
----------
data : ndarray
A 'M' by 'N' array of 'M' observations in 'N' dimensions or a length
'M' array of 'M' one-dimensional observations.
k : int or ndarray
The number of clusters to form as well as the number of
centroids to generate. If `minit` initialization string is
'matrix', or if a ndarray is given instead, it is
interpreted as initial cluster to use instead.
iter : int
Number of iterations of the k-means algrithm to run. Note
that this differs in meaning from the iters parameter to
the kmeans function.
thresh : float
(not used yet)
minit : string
Method for initialization. Available methods are 'random',
'points', 'uniform', and 'matrix':
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
'points': choose k observations (rows) at random from data for
the initial centroids.
'uniform': generate k observations from the data from a uniform
distribution defined by the data set (unsupported).
'matrix': interpret the k parameter as a k by M (or length k
array for one-dimensional data) array of initial centroids.
Returns
-------
centroid : ndarray
A 'k' by 'N' array of centroids found at the last iteration of
k-means.
label : ndarray
label[i] is the code or index of the centroid the
i'th observation is closest to.
"""
if missing not in _valid_miss_meth.keys():
raise ValueError("Unkown missing method: %s" % str(missing))
# If data is rank 1, then we have 1 dimension problem.
nd = np.ndim(data)
if nd == 1:
d = 1
#raise ValueError("Input of rank 1 not supported yet")
elif nd == 2:
d = data.shape[1]
else:
raise ValueError("Input of rank > 2 not supported")
if np.size(data) < 1:
raise ValueError("Input has 0 items.")
# If k is not a single value, then it should be compatible with data's
# shape
if np.size(k) > 1 or minit == 'matrix':
if not nd == np.ndim(k):
raise ValueError("k is not an int and has not same rank than data")
if d == 1:
nc = len(k)
else:
(nc, dc) = k.shape
if not dc == d:
raise ValueError("k is not an int and has not same rank than\
data")
clusters = k.copy()
else:
try:
nc = int(k)
except TypeError:
raise ValueError("k (%s) could not be converted to an integer " % str(k))
if nc < 1:
raise ValueError("kmeans2 for 0 clusters ? (k was %s)" % str(k))
if not nc == k:
warnings.warn("k was not an integer, was converted.")
try:
init = _valid_init_meth[minit]
except KeyError:
raise ValueError("unknown init method %s" % str(minit))
clusters = init(data, k)
if int(iter) < 1:
raise ValueError("iter = %s is not valid. iter must be a positive integer." % iter)
return _kmeans2(data, clusters, iter, nc, _valid_miss_meth[missing])
def _kmeans2(data, code, niter, nc, missing):
""" "raw" version of kmeans2. Do not use directly.
Run k-means with a given initial codebook.
"""
for i in range(niter):
# Compute the nearest neighbour for each obs
# using the current code book
label = vq(data, code)[0]
# Update the code by computing centroids using the new code book
for j in range(nc):
mbs = np.where(label==j)
if mbs[0].size > 0:
code[j] = np.mean(data[mbs], axis=0)
else:
missing()
return code, label
if __name__ == '__main__':
pass
#import _vq
#a = np.random.randn(4, 2)
#b = np.random.randn(2, 2)
#print _vq.vq(a, b)
#print _vq.vq(np.array([[1], [2], [3], [4], [5], [6.]]),
# np.array([[2.], [5.]]))
#print _vq.vq(np.array([1, 2, 3, 4, 5, 6.]), np.array([2., 5.]))
#_vq.vq(a.astype(np.float32), b.astype(np.float32))
#_vq.vq(a, b.astype(np.float32))
#_vq.vq([0], b)
| gpl-3.0 |
oloendithas/GT-P6800_JB_Kernel | tools/perf/python/twatch.py | 3213 | 1338 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, sample_period = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
bartoldeman/easybuild-framework | easybuild/toolchains/__init__.py | 1 | 1244 | ##
# Copyright 2012-2018 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Declaration of toolchains namespace.
:author: Stijn De Weirdt (Ghent University)
:author: Kenneth Hoste (Ghent University)
"""
import pkg_resources
pkg_resources.declare_namespace(__name__)
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.