repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
hemantsangwan/Python-home-assistant | homeassistant/components/light/tellstick.py | 2 | 3083 | """ Support for Tellstick lights. """
import logging
# pylint: disable=no-name-in-module, import-error
from homeassistant.components.light import ATTR_BRIGHTNESS
from homeassistant.const import ATTR_FRIENDLY_NAME
from homeassistant.helpers.device import ToggleDevice
import tellcore.constants as tellcore_constants
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
""" Find and return tellstick lights. """
try:
import tellcore.telldus as telldus
except ImportError:
logging.getLogger(__name__).exception(
"Failed to import tellcore")
return []
core = telldus.TelldusCore()
switches_and_lights = core.devices()
lights = []
for switch in switches_and_lights:
if switch.methods(tellcore_constants.TELLSTICK_DIM):
lights.append(TellstickLight(switch))
add_devices_callback(lights)
class TellstickLight(ToggleDevice):
""" Represents a tellstick light """
last_sent_command_mask = (tellcore_constants.TELLSTICK_TURNON |
tellcore_constants.TELLSTICK_TURNOFF |
tellcore_constants.TELLSTICK_DIM |
tellcore_constants.TELLSTICK_UP |
tellcore_constants.TELLSTICK_DOWN)
def __init__(self, tellstick):
self.tellstick = tellstick
self.state_attr = {ATTR_FRIENDLY_NAME: tellstick.name}
self.brightness = 0
@property
def name(self):
""" Returns the name of the switch if any. """
return self.tellstick.name
@property
def is_on(self):
""" True if switch is on. """
return self.brightness > 0
def turn_off(self, **kwargs):
""" Turns the switch off. """
self.tellstick.turn_off()
self.brightness = 0
def turn_on(self, **kwargs):
""" Turns the switch on. """
brightness = kwargs.get(ATTR_BRIGHTNESS)
if brightness is None:
self.brightness = 255
else:
self.brightness = brightness
self.tellstick.dim(self.brightness)
@property
def state_attributes(self):
""" Returns optional state attributes. """
attr = {
ATTR_FRIENDLY_NAME: self.name
}
attr[ATTR_BRIGHTNESS] = int(self.brightness)
return attr
def update(self):
""" Update state of the light. """
last_command = self.tellstick.last_sent_command(
self.last_sent_command_mask)
if last_command == tellcore_constants.TELLSTICK_TURNON:
self.brightness = 255
elif last_command == tellcore_constants.TELLSTICK_TURNOFF:
self.brightness = 0
elif (last_command == tellcore_constants.TELLSTICK_DIM or
last_command == tellcore_constants.TELLSTICK_UP or
last_command == tellcore_constants.TELLSTICK_DOWN):
last_sent_value = self.tellstick.last_sent_value()
if last_sent_value is not None:
self.brightness = last_sent_value
| mit |
gangadharkadam/contributionerp | erpnext/patches/v5_0/update_item_desc_in_invoice.py | 96 | 1641 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import frappe
from frappe.website.utils import find_first_image
from frappe.utils import cstr
import re
def execute():
item_details = frappe._dict()
for d in frappe.db.sql("select name, description, image from `tabItem`", as_dict=1):
description = cstr(d.description).strip()
item_details.setdefault(d.name, frappe._dict({
"description": description,
"image": d.image
}))
dt_list= ["Sales Invoice Item","Purchase Invoice Item"]
for dt in dt_list:
frappe.reload_doctype(dt)
records = frappe.db.sql("""select name, item_code, description from `tab{0}`
where ifnull(item_code, '') != '' and description is not null """.format(dt), as_dict=1)
count = 1
for d in records:
if item_details.get(d.item_code) and cstr(d.description) == item_details.get(d.item_code).description:
desc = item_details.get(d.item_code).description
image = item_details.get(d.item_code).image
else:
desc, image = extract_image_and_description(cstr(d.description))
if not image:
item_detail = item_details.get(d.item_code)
if item_detail:
image = item_detail.image
frappe.db.sql("""update `tab{0}` set description = %s, image = %s
where name = %s """.format(dt), (desc, image, d.name))
count += 1
if count % 500 == 0:
frappe.db.commit()
def extract_image_and_description(data):
image_url = find_first_image(data)
desc = data
for tag in ("img", "table", "tr", "td"):
desc = re.sub("\</*{0}[^>]*\>".format(tag), "", desc)
return desc, image_url | agpl-3.0 |
pigeonflight/strider-plone | docker/appengine/lib/django-1.4/tests/regressiontests/utils/jslex.py | 34 | 9619 | """Tests for jslex."""
# encoding: utf-8
# originally from https://bitbucket.org/ned/jslex
from django.test import TestCase
from django.utils.jslex import JsLexer, prepare_js_for_gettext
class JsTokensTest(TestCase):
LEX_CASES = [
# ids
("a ABC $ _ a123", ["id a", "id ABC", "id $", "id _", "id a123"]),
(r"\u1234 abc\u0020 \u0065_\u0067", [r"id \u1234", r"id abc\u0020", r"id \u0065_\u0067"]),
# numbers
("123 1.234 0.123e-3 0 1E+40 1e1 .123", ["dnum 123", "dnum 1.234", "dnum 0.123e-3", "dnum 0", "dnum 1E+40", "dnum 1e1", "dnum .123"]),
("0x1 0xabCD 0XABcd", ["hnum 0x1", "hnum 0xabCD", "hnum 0XABcd"]),
("010 0377 090", ["onum 010", "onum 0377", "dnum 0", "dnum 90"]),
("0xa123ghi", ["hnum 0xa123", "id ghi"]),
# keywords
("function Function FUNCTION", ["keyword function", "id Function", "id FUNCTION"]),
("const constructor in inherits", ["keyword const", "id constructor", "keyword in", "id inherits"]),
("true true_enough", ["reserved true", "id true_enough"]),
# strings
(''' 'hello' "hello" ''', ["string 'hello'", 'string "hello"']),
(r""" 'don\'t' "don\"t" '"' "'" '\'' "\"" """,
[r"""string 'don\'t'""", r'''string "don\"t"''', r"""string '"'""", r'''string "'"''', r"""string '\''""", r'''string "\""''']),
(ur'"ƃuıxǝ⅂ ʇdıɹɔsɐʌɐſ\""', [ur'string "ƃuıxǝ⅂ ʇdıɹɔsɐʌɐſ\""']),
# comments
("a//b", ["id a", "linecomment //b"]),
("/****/a/=2//hello", ["comment /****/", "id a", "punct /=", "dnum 2", "linecomment //hello"]),
("/*\n * Header\n */\na=1;", ["comment /*\n * Header\n */", "id a", "punct =", "dnum 1", "punct ;"]),
# punctuation
("a+++b", ["id a", "punct ++", "punct +", "id b"]),
# regex
(r"a=/a*/,1", ["id a", "punct =", "regex /a*/", "punct ,", "dnum 1"]),
(r"a=/a*[^/]+/,1", ["id a", "punct =", "regex /a*[^/]+/", "punct ,", "dnum 1"]),
(r"a=/a*\[^/,1", ["id a", "punct =", r"regex /a*\[^/", "punct ,", "dnum 1"]),
(r"a=/\//,1", ["id a", "punct =", r"regex /\//", "punct ,", "dnum 1"]),
# next two are from http://www.mozilla.org/js/language/js20-2002-04/rationale/syntax.html#regular-expressions
("""for (var x = a in foo && "</x>" || mot ? z:/x:3;x<5;y</g/i) {xyz(x++);}""",
["keyword for", "punct (", "keyword var", "id x", "punct =", "id a", "keyword in",
"id foo", "punct &&", 'string "</x>"', "punct ||", "id mot", "punct ?", "id z",
"punct :", "regex /x:3;x<5;y</g", "punct /", "id i", "punct )", "punct {",
"id xyz", "punct (", "id x", "punct ++", "punct )", "punct ;", "punct }"]),
("""for (var x = a in foo && "</x>" || mot ? z/x:3;x<5;y</g/i) {xyz(x++);}""",
["keyword for", "punct (", "keyword var", "id x", "punct =", "id a", "keyword in",
"id foo", "punct &&", 'string "</x>"', "punct ||", "id mot", "punct ?", "id z",
"punct /", "id x", "punct :", "dnum 3", "punct ;", "id x", "punct <", "dnum 5",
"punct ;", "id y", "punct <", "regex /g/i", "punct )", "punct {",
"id xyz", "punct (", "id x", "punct ++", "punct )", "punct ;", "punct }"]),
# Various "illegal" regexes that are valid according to the std.
(r"""/????/, /++++/, /[----]/ """, ["regex /????/", "punct ,", "regex /++++/", "punct ,", "regex /[----]/"]),
# Stress cases from http://stackoverflow.com/questions/5533925/what-javascript-constructs-does-jslex-incorrectly-lex/5573409#5573409
(r"""/\[/""", [r"""regex /\[/"""]),
(r"""/[i]/""", [r"""regex /[i]/"""]),
(r"""/[\]]/""", [r"""regex /[\]]/"""]),
(r"""/a[\]]/""", [r"""regex /a[\]]/"""]),
(r"""/a[\]]b/""", [r"""regex /a[\]]b/"""]),
(r"""/[\]/]/gi""", [r"""regex /[\]/]/gi"""]),
(r"""/\[[^\]]+\]/gi""", [r"""regex /\[[^\]]+\]/gi"""]),
("""
rexl.re = {
NAME: /^(?!\d)(?:\w)+|^"(?:[^"]|"")+"/,
UNQUOTED_LITERAL: /^@(?:(?!\d)(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/,
QUOTED_LITERAL: /^'(?:[^']|'')*'/,
NUMERIC_LITERAL: /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/,
SYMBOL: /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/
};
""",
["id rexl", "punct .", "id re", "punct =", "punct {",
"id NAME", "punct :", r"""regex /^(?!\d)(?:\w)+|^"(?:[^"]|"")+"/""", "punct ,",
"id UNQUOTED_LITERAL", "punct :", r"""regex /^@(?:(?!\d)(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/""", "punct ,",
"id QUOTED_LITERAL", "punct :", r"""regex /^'(?:[^']|'')*'/""", "punct ,",
"id NUMERIC_LITERAL", "punct :", r"""regex /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/""", "punct ,",
"id SYMBOL", "punct :", r"""regex /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/""",
"punct }", "punct ;"
]),
("""
rexl.re = {
NAME: /^(?!\d)(?:\w)+|^"(?:[^"]|"")+"/,
UNQUOTED_LITERAL: /^@(?:(?!\d)(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/,
QUOTED_LITERAL: /^'(?:[^']|'')*'/,
NUMERIC_LITERAL: /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/,
SYMBOL: /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/
};
str = '"';
""",
["id rexl", "punct .", "id re", "punct =", "punct {",
"id NAME", "punct :", r"""regex /^(?!\d)(?:\w)+|^"(?:[^"]|"")+"/""", "punct ,",
"id UNQUOTED_LITERAL", "punct :", r"""regex /^@(?:(?!\d)(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/""", "punct ,",
"id QUOTED_LITERAL", "punct :", r"""regex /^'(?:[^']|'')*'/""", "punct ,",
"id NUMERIC_LITERAL", "punct :", r"""regex /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/""", "punct ,",
"id SYMBOL", "punct :", r"""regex /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/""",
"punct }", "punct ;",
"id str", "punct =", """string '"'""", "punct ;",
]),
(r""" this._js = "e.str(\"" + this.value.replace(/\\/g, "\\\\").replace(/"/g, "\\\"") + "\")"; """,
["keyword this", "punct .", "id _js", "punct =", r'''string "e.str(\""''', "punct +", "keyword this", "punct .",
"id value", "punct .", "id replace", "punct (", r"regex /\\/g", "punct ,", r'string "\\\\"', "punct )",
"punct .", "id replace", "punct (", r'regex /"/g', "punct ,", r'string "\\\""', "punct )", "punct +",
r'string "\")"', "punct ;"]),
]
def make_function(input, toks):
def test_func(self):
lexer = JsLexer()
result = ["%s %s" % (name, tok) for name, tok in lexer.lex(input) if name != 'ws']
self.assertListEqual(result, toks)
return test_func
for i, (input, toks) in enumerate(JsTokensTest.LEX_CASES):
setattr(JsTokensTest, "test_case_%d" % i, make_function(input, toks))
GETTEXT_CASES = (
(
r"""
a = 1; /* /[0-9]+/ */
b = 0x2a0b / 1; // /[0-9]+/
c = 3;
""",
r"""
a = 1; /* /[0-9]+/ */
b = 0x2a0b / 1; // /[0-9]+/
c = 3;
"""
), (
r"""
a = 1.234e-5;
/*
* /[0-9+/
*/
b = .0123;
""",
r"""
a = 1.234e-5;
/*
* /[0-9+/
*/
b = .0123;
"""
), (
r"""
x = y / z;
alert(gettext("hello"));
x /= 3;
""",
r"""
x = y / z;
alert(gettext("hello"));
x /= 3;
"""
), (
r"""
s = "Hello \"th/foo/ere\"";
s = 'He\x23llo \'th/foo/ere\'';
s = 'slash quote \", just quote "';
""",
r"""
s = "Hello \"th/foo/ere\"";
s = "He\x23llo \'th/foo/ere\'";
s = "slash quote \", just quote \"";
"""
), (
r"""
s = "Line continuation\
continued /hello/ still the string";/hello/;
""",
r"""
s = "Line continuation\
continued /hello/ still the string";"REGEX";
"""
), (
r"""
var regex = /pattern/;
var regex2 = /matter/gm;
var regex3 = /[*/]+/gm.foo("hey");
""",
r"""
var regex = "REGEX";
var regex2 = "REGEX";
var regex3 = "REGEX".foo("hey");
"""
), (
r"""
for (var x = a in foo && "</x>" || mot ? z:/x:3;x<5;y</g/i) {xyz(x++);}
for (var x = a in foo && "</x>" || mot ? z/x:3;x<5;y</g/i) {xyz(x++);}
""",
r"""
for (var x = a in foo && "</x>" || mot ? z:"REGEX"/i) {xyz(x++);}
for (var x = a in foo && "</x>" || mot ? z/x:3;x<5;y<"REGEX") {xyz(x++);}
"""
), (
r"""
\u1234xyz = gettext('Hello there');
""", r"""
Uu1234xyz = gettext("Hello there");
"""
)
)
class JsToCForGettextTest(TestCase):
pass
def make_function(js, c):
def test_func(self):
self.assertMultiLineEqual(prepare_js_for_gettext(js), c)
return test_func
for i, pair in enumerate(GETTEXT_CASES):
setattr(JsToCForGettextTest, "test_case_%d" % i, make_function(*pair))
| mit |
ttfseiko/openerp-trunk | openerp/addons/base/module/report/ir_module_reference_print.py | 384 | 3704 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
class ir_module_reference_print(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(ir_module_reference_print, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'findobj': self._object_find,
'objdoc': self._object_doc,
'objdoc2': self._object_doc2,
'findflds': self._fields_find,
})
def _object_doc(self, obj):
modobj = self.pool[obj]
strdocs= modobj.__doc__
if not strdocs:
return None
else:
strdocs=strdocs.strip().splitlines(True)
res = ''
for stre in strdocs:
if not stre or stre.isspace():
break
res += stre
return res
def _object_doc2(self, obj):
modobj = self.pool[obj]
strdocs= modobj.__doc__
if not strdocs:
return None
else:
strdocs=strdocs.strip().splitlines(True)
res = []
fou = False
for stre in strdocs:
if fou:
res.append(stre.strip())
elif not stre or stre.isspace():
fou = True
return res
def _object_find(self, module):
ids2 = self.pool['ir.model.data'].search(self.cr, self.uid, [('module','=',module), ('model','=','ir.model')])
ids = []
for mod in self.pool['ir.model.data'].browse(self.cr, self.uid, ids2):
ids.append(mod.res_id)
modobj = self.pool['ir.model']
return modobj.browse(self.cr, self.uid, ids)
def _fields_find(self, obj, module):
res = []
data_obj = self.pool['ir.model.data']
modobj = self.pool[obj]
fname_wildcard = 'field_' + modobj._name.replace('.', '_') + '_%'
module_fields_ids = data_obj.search(self.cr, self.uid, [('model', '=', 'ir.model.fields'), ('module', '=', module), ('name', 'like', fname_wildcard)])
if module_fields_ids:
module_fields_res_ids = [x['res_id'] for x in data_obj.read(self.cr, self.uid, module_fields_ids, ['res_id'])]
module_fields_names = [x['name'] for x in self.pool['ir.model.fields'].read(self.cr, self.uid, module_fields_res_ids, ['name'])]
res = modobj.fields_get(self.cr, self.uid, allfields=module_fields_names).items()
res.sort()
return res
report_sxw.report_sxw('report.ir.module.reference', 'ir.module.module',
'addons/base/module/report/ir_module_reference.rml',
parser=ir_module_reference_print, header=False)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sunze/py_flask | venv/lib/python3.4/site-packages/kombu/transport/sqlalchemy/models.py | 38 | 1961 | from __future__ import absolute_import
import datetime
from sqlalchemy import (Column, Integer, String, Text, DateTime,
Sequence, Boolean, ForeignKey, SmallInteger)
from sqlalchemy.orm import relation
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from sqlalchemy.schema import MetaData
class_registry = {}
metadata = MetaData()
ModelBase = declarative_base(metadata=metadata, class_registry=class_registry)
class Queue(object):
__table_args__ = {'sqlite_autoincrement': True, 'mysql_engine': 'InnoDB'}
id = Column(Integer, Sequence('queue_id_sequence'), primary_key=True,
autoincrement=True)
name = Column(String(200), unique=True)
def __init__(self, name):
self.name = name
def __str__(self):
return '<Queue({self.name})>'.format(self=self)
@declared_attr
def messages(cls):
return relation('Message', backref='queue', lazy='noload')
class Message(object):
__table_args__ = {'sqlite_autoincrement': True, 'mysql_engine': 'InnoDB'}
id = Column(Integer, Sequence('message_id_sequence'),
primary_key=True, autoincrement=True)
visible = Column(Boolean, default=True, index=True)
sent_at = Column('timestamp', DateTime, nullable=True, index=True,
onupdate=datetime.datetime.now)
payload = Column(Text, nullable=False)
version = Column(SmallInteger, nullable=False, default=1)
__mapper_args__ = {'version_id_col': version}
def __init__(self, payload, queue):
self.payload = payload
self.queue = queue
def __str__(self):
return '<Message: {0.sent_at} {0.payload} {0.queue_id}>'.format(self)
@declared_attr
def queue_id(self):
return Column(
Integer,
ForeignKey(
'%s.id' % class_registry['Queue'].__tablename__,
name='FK_kombu_message_queue'
)
)
| mit |
msabramo/pip | pip/vcs/subversion.py | 86 | 10632 | from __future__ import absolute_import
import logging
import os
import re
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip.index import Link
from pip.utils import rmtree, display_path, call_subprocess
from pip.utils.logging import indent_log
from pip.vcs import vcs, VersionControl
_svn_xml_url_re = re.compile('url="([^"]+)"')
_svn_rev_re = re.compile('committed-rev="(\d+)"')
_svn_url_re = re.compile(r'URL: (.+)')
_svn_revision_re = re.compile(r'Revision: (.+)')
_svn_info_xml_rev_re = re.compile(r'\s*revision="(\d+)"')
_svn_info_xml_url_re = re.compile(r'<url>(.*)</url>')
logger = logging.getLogger(__name__)
class Subversion(VersionControl):
name = 'svn'
dirname = '.svn'
repo_name = 'checkout'
schemes = ('svn', 'svn+ssh', 'svn+http', 'svn+https', 'svn+svn')
def get_info(self, location):
"""Returns (url, revision), where both are strings"""
assert not location.rstrip('/').endswith(self.dirname), \
'Bad directory: %s' % location
output = call_subprocess(
[self.cmd, 'info', location],
show_stdout=False,
extra_environ={'LANG': 'C'},
)
match = _svn_url_re.search(output)
if not match:
logger.warning(
'Cannot determine URL of svn checkout %s',
display_path(location),
)
logger.debug('Output that cannot be parsed: \n%s', output)
return None, None
url = match.group(1).strip()
match = _svn_revision_re.search(output)
if not match:
logger.warning(
'Cannot determine revision of svn checkout %s',
display_path(location),
)
logger.debug('Output that cannot be parsed: \n%s', output)
return url, None
return url, match.group(1)
def export(self, location):
"""Export the svn repository at the url to the destination location"""
url, rev = self.get_url_rev()
rev_options = get_rev_options(url, rev)
logger.info('Exporting svn repository %s to %s', url, location)
with indent_log():
if os.path.exists(location):
# Subversion doesn't like to check out over an existing
# directory --force fixes this, but was only added in svn 1.5
rmtree(location)
call_subprocess(
[self.cmd, 'export'] + rev_options + [url, location],
filter_stdout=self._filter, show_stdout=False)
def switch(self, dest, url, rev_options):
call_subprocess(
[self.cmd, 'switch'] + rev_options + [url, dest])
def update(self, dest, rev_options):
call_subprocess(
[self.cmd, 'update'] + rev_options + [dest])
def obtain(self, dest):
url, rev = self.get_url_rev()
rev_options = get_rev_options(url, rev)
if rev:
rev_display = ' (to revision %s)' % rev
else:
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.info(
'Checking out %s%s to %s',
url,
rev_display,
display_path(dest),
)
call_subprocess(
[self.cmd, 'checkout', '-q'] + rev_options + [url, dest])
def get_location(self, dist, dependency_links):
for url in dependency_links:
egg_fragment = Link(url).egg_fragment
if not egg_fragment:
continue
if '-' in egg_fragment:
# FIXME: will this work when a package has - in the name?
key = '-'.join(egg_fragment.split('-')[:-1]).lower()
else:
key = egg_fragment
if key == dist.key:
return url.split('#', 1)[0]
return None
def get_revision(self, location):
"""
Return the maximum revision for all files under a given location
"""
# Note: taken from setuptools.command.egg_info
revision = 0
for base, dirs, files in os.walk(location):
if self.dirname not in dirs:
dirs[:] = []
continue # no sense walking uncontrolled subdirs
dirs.remove(self.dirname)
entries_fn = os.path.join(base, self.dirname, 'entries')
if not os.path.exists(entries_fn):
# FIXME: should we warn?
continue
dirurl, localrev = self._get_svn_url_rev(base)
if base == location:
base_url = dirurl + '/' # save the root url
elif not dirurl or not dirurl.startswith(base_url):
dirs[:] = []
continue # not part of the same svn tree, skip it
revision = max(revision, localrev)
return revision
def get_url_rev(self):
# hotfix the URL scheme after removing svn+ from svn+ssh:// readd it
url, rev = super(Subversion, self).get_url_rev()
if url.startswith('ssh://'):
url = 'svn+' + url
return url, rev
def get_url(self, location):
# In cases where the source is in a subdirectory, not alongside
# setup.py we have to look up in the location until we find a real
# setup.py
orig_location = location
while not os.path.exists(os.path.join(location, 'setup.py')):
last_location = location
location = os.path.dirname(location)
if location == last_location:
# We've traversed up to the root of the filesystem without
# finding setup.py
logger.warning(
"Could not find setup.py for directory %s (tried all "
"parent directories)",
orig_location,
)
return None
return self._get_svn_url_rev(location)[0]
def _get_svn_url_rev(self, location):
from pip.exceptions import InstallationError
with open(os.path.join(location, self.dirname, 'entries')) as f:
data = f.read()
if (data.startswith('8')
or data.startswith('9')
or data.startswith('10')):
data = list(map(str.splitlines, data.split('\n\x0c\n')))
del data[0][0] # get rid of the '8'
url = data[0][3]
revs = [int(d[9]) for d in data if len(d) > 9 and d[9]] + [0]
elif data.startswith('<?xml'):
match = _svn_xml_url_re.search(data)
if not match:
raise ValueError('Badly formatted data: %r' % data)
url = match.group(1) # get repository URL
revs = [int(m.group(1)) for m in _svn_rev_re.finditer(data)] + [0]
else:
try:
# subversion >= 1.7
xml = call_subprocess(
[self.cmd, 'info', '--xml', location],
show_stdout=False,
)
url = _svn_info_xml_url_re.search(xml).group(1)
revs = [
int(m.group(1)) for m in _svn_info_xml_rev_re.finditer(xml)
]
except InstallationError:
url, revs = None, []
if revs:
rev = max(revs)
else:
rev = 0
return url, rev
def get_tag_revs(self, svn_tag_url):
stdout = call_subprocess(
[self.cmd, 'ls', '-v', svn_tag_url], show_stdout=False)
results = []
for line in stdout.splitlines():
parts = line.split()
rev = int(parts[0])
tag = parts[-1].strip('/')
results.append((tag, rev))
return results
def find_tag_match(self, rev, tag_revs):
best_match_rev = None
best_tag = None
for tag, tag_rev in tag_revs:
if (tag_rev > rev and
(best_match_rev is None or best_match_rev > tag_rev)):
# FIXME: Is best_match > tag_rev really possible?
# or is it a sign something is wacky?
best_match_rev = tag_rev
best_tag = tag
return best_tag
def get_src_requirement(self, dist, location, find_tags=False):
repo = self.get_url(location)
if repo is None:
return None
parts = repo.split('/')
# FIXME: why not project name?
egg_project_name = dist.egg_name().split('-', 1)[0]
rev = self.get_revision(location)
if parts[-2] in ('tags', 'tag'):
# It's a tag, perfect!
full_egg_name = '%s-%s' % (egg_project_name, parts[-1])
elif parts[-2] in ('branches', 'branch'):
# It's a branch :(
full_egg_name = '%s-%s-r%s' % (dist.egg_name(), parts[-1], rev)
elif parts[-1] == 'trunk':
# Trunk :-/
full_egg_name = '%s-dev_r%s' % (dist.egg_name(), rev)
if find_tags:
tag_url = '/'.join(parts[:-1]) + '/tags'
tag_revs = self.get_tag_revs(tag_url)
match = self.find_tag_match(rev, tag_revs)
if match:
logger.info(
'trunk checkout %s seems to be equivalent to tag %s',
match,
)
repo = '%s/%s' % (tag_url, match)
full_egg_name = '%s-%s' % (egg_project_name, match)
else:
# Don't know what it is
logger.warning(
'svn URL does not fit normal structure (tags/branches/trunk): '
'%s',
repo,
)
full_egg_name = '%s-dev_r%s' % (egg_project_name, rev)
return 'svn+%s@%s#egg=%s' % (repo, rev, full_egg_name)
def get_rev_options(url, rev):
if rev:
rev_options = ['-r', rev]
else:
rev_options = []
r = urllib_parse.urlsplit(url)
if hasattr(r, 'username'):
# >= Python-2.5
username, password = r.username, r.password
else:
netloc = r[1]
if '@' in netloc:
auth = netloc.split('@')[0]
if ':' in auth:
username, password = auth.split(':', 1)
else:
username, password = auth, None
else:
username, password = None, None
if username:
rev_options += ['--username', username]
if password:
rev_options += ['--password', password]
return rev_options
vcs.register(Subversion)
| mit |
biolab/red | examples.py | 1 | 3607 | """
Copyright (C) 2013 Marinka Zitnik <marinka.zitnik@fri.uni-lj.si>
This file is part of Red.
Red is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Red is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Red. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import numpy as np
from sklearn.metrics import roc_auc_score
from data import loader
from red import Red
def run_red(G, S, H, genes):
c = 100
alpha = 0.1
lambda_u = 1e-4
lambda_v = lambda_u
beta = alpha
gene_red = Red(G, S, H, genes)
gene_red.order(rank=c, lambda_u=lambda_u, lambda_v=lambda_v,
alpha=alpha, beta=beta,
verbose=False, callback=None)
return gene_red
def predict_alleviating(G, S, H, genes, alleviating_set):
g2i = {g: i for i, g in enumerate(genes)}
for cls, g1, g2 in alleviating_set:
G[g2i[g1], g2i[g2]] = G[g2i[g2], g2i[g1]] = np.nan
gene_red = run_red(G, S, H, genes)
pred = [gene_red.alleviating(u, v) for _, u, v in alleviating_set]
auc = roc_auc_score(zip(*alleviating_set)[0], pred)
print "Alleviating AUC: %5.4f" % auc
return gene_red, auc
def predict_kegg(G, S, H, genes, kegg):
gene_red = run_red(G, S, H, genes)
pred = [gene_red.epistatic_to(v, u) for _, u, v in kegg]
auc = roc_auc_score(zip(*kegg)[0], pred)
print "KEGG AUC: %5.4f" % auc
return gene_red, auc
def predict_glycans(G, S, H, genes, glycans):
gene_red = run_red(G, S, H, genes)
pred = [gene_red.epistatic_to(v, u) for _, u, v in glycans]
auc = roc_auc_score(zip(*glycans)[0], pred)
print "N-linked glycosylation AUC: %5.4f" % auc
return gene_red, auc
path = os.path.abspath("data/080930a_DM_data.mat")
G, S, H, genes = loader.load_jonikas_data(path)
np.random.seed(42)
# 1
path_ord_kegg = os.path.abspath("data/KEGG_ordered.txt")
path_unord_kegg = os.path.abspath("data/KEGG_nonordered.txt")
kegg = loader.load_battle_KEGG_data(path_ord_kegg, path_unord_kegg)
predict_kegg(G, S, H, genes, kegg)
# 2
path_neg_glycans = os.path.abspath("data/N-linked-glycans_negative.txt")
path_pos_glycans = os.path.abspath("data/N-linked-glycans_positive.txt")
glycans = loader.load_n_linked_glycans_data(path_pos_glycans, path_neg_glycans)
predict_glycans(G, S, H, genes, glycans)
# 3
alleviating_set = loader.get_alleviating_interactions(path)
predict_alleviating(G, S, H, genes, alleviating_set)
# 4
gene_red = run_red(G, S, H, genes)
glycan_genes = {'CWH41', 'DIE2', 'ALG8', 'ALG6', 'ALG5', 'ALG12',
'ALG9', 'ALG3', 'OST3', 'OST5'}
erad_genes = {'MNL1', 'YOS9', 'YLR104W', 'DER1', 'USA1', 'HRD3', 'HRD1',
'UBC7', 'CUE1'}
tailanch_genes = {'SGT2', 'MDY2', 'YOR164C',
'GET3', 'GET2', 'GET1'}
print '\n**N-linked glycosylation pathway'
gene_red.print_relationships(glycan_genes)
gene_red.construct_network(glycan_genes)
print '\n**ERAD pathway'
gene_red.print_relationships(erad_genes)
gene_red.construct_network(erad_genes)
print '\n**Tail-anchored protein insertion pathway'
gene_red.print_relationships(tailanch_genes)
gene_red.construct_network(tailanch_genes) | gpl-3.0 |
afaheem88/tempest | tempest/services/compute/json/migrations_client.py | 6 | 1231 | # Copyright 2014 NEC Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from six.moves.urllib import parse as urllib
from tempest.api_schema.response.compute.v2_1 import migrations as schema
from tempest.common import service_client
class MigrationsClient(service_client.ServiceClient):
def list_migrations(self, params=None):
"""Lists all migrations."""
url = 'os-migrations'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(schema.list_migrations, resp, body)
return service_client.ResponseBodyList(resp, body['migrations'])
| apache-2.0 |
sakshambhatla/twitterDataAnalysis | main.py | 1 | 1030 | #!/usr/bin/env python
import tweepy, time
def search_tweets(q, count=100):
return t.search.tweets(q=q, result_type='recent', count=count)
accessFile = open("../accessDetails.txt", "rb")
CONSUMER_KEY = str(accessFile.readline()).rstrip()
CONSUMER_SECRET = str(accessFile.readline()).rstrip()
ACCESS_KEY = str(accessFile.readline()).rstrip()
ACCESS_SECRET = str(accessFile.readline()).rstrip()
lastID=0
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
api = tweepy.API(auth)
print api.rate_limit_status()
while True:
results = api.search(q="testbot sbhatla", since_id = lastID)
try:
lastID = results[0].id
except:
pass
#print lastID
for result in results:
print result.id, result.author.name, result.favorite_count, result.retweet_count
#print result.favorite_count
#print result.author.name
#print result.author.screen_name
#print result.author.followers_count
#print result.retweet_count
print result.text.encode('utf-8')
time.sleep(3)
| gpl-2.0 |
alrusdi/lettuce | tests/integration/lib/Django-1.2.5/django/contrib/sites/tests.py | 139 | 2138 | from django.conf import settings
from django.contrib.sites.models import Site, RequestSite, get_current_site
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpRequest
from django.test import TestCase
class SitesFrameworkTests(TestCase):
def setUp(self):
Site(id=settings.SITE_ID, domain="example.com", name="example.com").save()
self.old_Site_meta_installed = Site._meta.installed
Site._meta.installed = True
def tearDown(self):
Site._meta.installed = self.old_Site_meta_installed
def test_site_manager(self):
# Make sure that get_current() does not return a deleted Site object.
s = Site.objects.get_current()
self.assert_(isinstance(s, Site))
s.delete()
self.assertRaises(ObjectDoesNotExist, Site.objects.get_current)
def test_site_cache(self):
# After updating a Site object (e.g. via the admin), we shouldn't return a
# bogus value from the SITE_CACHE.
site = Site.objects.get_current()
self.assertEqual(u"example.com", site.name)
s2 = Site.objects.get(id=settings.SITE_ID)
s2.name = "Example site"
s2.save()
site = Site.objects.get_current()
self.assertEqual(u"Example site", site.name)
def test_get_current_site(self):
# Test that the correct Site object is returned
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
site = get_current_site(request)
self.assert_(isinstance(site, Site))
self.assertEqual(site.id, settings.SITE_ID)
# Test that an exception is raised if the sites framework is installed
# but there is no matching Site
site.delete()
self.assertRaises(ObjectDoesNotExist, get_current_site, request)
# A RequestSite is returned if the sites framework is not installed
Site._meta.installed = False
site = get_current_site(request)
self.assert_(isinstance(site, RequestSite))
self.assertEqual(site.name, u"example.com")
| gpl-3.0 |
JVillella/tensorflow | tensorflow/python/kernel_tests/clip_ops_test.py | 87 | 12131 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.clip_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.platform import test
class ClipTest(test.TestCase):
# ClipByValue test
def testClipByValue(self):
with self.test_session():
x = constant_op.constant([-5.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3])
np_ans = [[-4.4, 2.0, 3.0], [4.0, 4.4, 4.4]]
clip_value = 4.4
ans = clip_ops.clip_by_value(x, -clip_value, clip_value)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
def testClipByValueBadShape(self):
with self.test_session():
x = constant_op.constant([-5.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3, 1])
# Use a nonsensical shape.
clip = constant_op.constant([1.0, 2.0])
with self.assertRaises(ValueError):
_ = clip_ops.clip_by_value(x, -clip, clip)
with self.assertRaises(ValueError):
_ = clip_ops.clip_by_value(x, 1.0, clip)
def testClipByValueNonFinite(self):
with self.test_session():
x = constant_op.constant([float('NaN'), float('Inf'), -float('Inf')])
np_ans = [float('NaN'), 4.0, -4.0]
clip_value = 4.0
ans = clip_ops.clip_by_value(x, -clip_value, clip_value)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
# ClipByNorm tests
def testClipByNormClipped(self):
# Norm clipping when clip_norm < 5
with self.test_session():
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Norm of x = sqrt(3^2 + 4^2) = 5
np_ans = [[-2.4, 0.0, 0.0], [3.2, 0.0, 0.0]]
clip_norm = 4.0
ans = clip_ops.clip_by_norm(x, clip_norm)
tf_ans = ans.eval()
clip_tensor = constant_op.constant(4.0)
ans = clip_ops.clip_by_norm(x, clip_norm)
tf_ans_tensor = ans.eval()
self.assertAllClose(np_ans, tf_ans)
self.assertAllClose(np_ans, tf_ans_tensor)
def testClipByNormBadShape(self):
with self.test_session():
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3, 1])
# Use a nonsensical shape.
clip = constant_op.constant([1.0, 2.0])
with self.assertRaises(ValueError):
_ = clip_ops.clip_by_norm(x, clip)
def testClipByNormNotClipped(self):
# No norm clipping when clip_norm >= 5
with self.test_session():
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Norm of x = sqrt(3^2 + 4^2) = 5
np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 0.0]]
clip_norm = 6.0
ans = clip_ops.clip_by_norm(x, clip_norm)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
def testClipByNormZero(self):
# No norm clipping when norm = 0
with self.test_session():
x = constant_op.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
# Norm = 0, no changes
np_ans = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
clip_norm = 6.0
ans = clip_ops.clip_by_norm(x, clip_norm)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
def testClipByNormClippedWithDim0(self):
# Norm clipping when clip_norm < 5
with self.test_session():
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
# Norm of x[:, 0] = sqrt(3^2 + 4^2) = 5, x[:, 2] = 3
np_ans = [[-2.4, 0.0, 0.0], [3.2, 0.0, 3.0]]
clip_norm = 4.0
ans = clip_ops.clip_by_norm(x, clip_norm, [0])
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
def testClipByNormClippedWithDim1(self):
# Norm clipping when clip_norm < 5
with self.test_session():
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
# Norm of x[0, :] = 3, x[1, :] = sqrt(3^2 + 4^2) = 5
np_ans = [[-3.0, 0.0, 0.0], [3.2, 0.0, 2.4]]
clip_norm = 4.0
ans = clip_ops.clip_by_norm(x, clip_norm, [1])
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
def testClipByNormNotClippedWithAxes(self):
# No norm clipping when clip_norm >= 5
with self.test_session():
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
# Norm of x[0, :] = 3, x[1, :] = sqrt(3^2 + 4^2) = 5
np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 3.0]]
clip_norm = 6.0
ans = clip_ops.clip_by_norm(x, clip_norm, [1])
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
# ClipByGlobalNorm tests
def testClipByGlobalNormClipped(self):
# Norm clipping when clip_norm < 5
with self.test_session():
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
x1 = constant_op.constant([1.0, -2.0])
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
clip_norm = 4.0
# Answers are the original tensors scaled by 4.0/5.0
np_ans_0 = [[-1.6, 0.0, 0.0], [3.2, 0.0, 0.0]]
np_ans_1 = [0.8, -1.6]
ans, norm = clip_ops.clip_by_global_norm((x0, x1), clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].eval()
tf_norm = norm.eval()
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
def testClipByGlobalNormClippedTensor(self):
# Norm clipping when clip_norm < 5
with self.test_session():
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
x1 = constant_op.constant([1.0, -2.0])
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
clip_norm = constant_op.constant(4.0)
# Answers are the original tensors scaled by 4.0/5.0
np_ans_0 = [[-1.6, 0.0, 0.0], [3.2, 0.0, 0.0]]
np_ans_1 = [0.8, -1.6]
ans, norm = clip_ops.clip_by_global_norm((x0, x1), clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].eval()
tf_norm = norm.eval()
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
def testClipByGlobalNormSupportsNone(self):
# Norm clipping when clip_norm < 5
with self.test_session():
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
x1 = constant_op.constant([1.0, -2.0])
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
clip_norm = 4.0
# Answers are the original tensors scaled by 4.0/5.0
np_ans_0 = [[-1.6, 0.0, 0.0], [3.2, 0.0, 0.0]]
np_ans_1 = [0.8, -1.6]
ans, norm = clip_ops.clip_by_global_norm((x0, None, x1, None), clip_norm)
self.assertTrue(ans[1] is None)
self.assertTrue(ans[3] is None)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[2].eval()
tf_norm = norm.eval()
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
def testClipByGlobalNormWithIndexedSlicesClipped(self):
# Norm clipping when clip_norm < 5
with self.test_session():
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
x1 = ops.IndexedSlices(
constant_op.constant([1.0, -2.0]), constant_op.constant([3, 4]))
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
clip_norm = 4.0
# Answers are the original tensors scaled by 4.0/5.0
np_ans_0 = [[-1.6, 0.0, 0.0], [3.2, 0.0, 0.0]]
np_ans_1 = [0.8, -1.6]
ans, norm = clip_ops.clip_by_global_norm([x0, x1], clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].values.eval()
tf_norm = norm.eval()
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
def testClipByGlobalNormPreservesDenseShape(self):
dense_shape = (1,)
slices = ops.IndexedSlices(
constant_op.constant([1.0]),
constant_op.constant([0]),
dense_shape=dense_shape)
ans, _ = clip_ops.clip_by_global_norm([slices], 1.0)
modified_slices = ans[0]
self.assertEqual(dense_shape, slices.dense_shape)
self.assertEqual(dense_shape, modified_slices.dense_shape)
def testClipByGlobalNormNotClipped(self):
# No norm clipping when clip_norm >= 5
with self.test_session():
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
x1 = constant_op.constant([1.0, -2.0])
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
np_ans_0 = [[-2.0, 0.0, 0.0], [4.0, 0.0, 0.0]]
np_ans_1 = [1.0, -2.0]
clip_norm = 6.0
ans, norm = clip_ops.clip_by_global_norm([x0, x1], clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].eval()
tf_norm = norm.eval()
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
def testClipByGlobalNormZero(self):
# No norm clipping when norm = 0
with self.test_session():
x0 = constant_op.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
x1 = constant_op.constant([0.0, 0.0])
# Norm = 0, no changes
np_ans_0 = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
np_ans_1 = [0.0, 0.0]
clip_norm = 6.0
ans, norm = clip_ops.clip_by_global_norm([x0, x1], clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].eval()
tf_norm = norm.eval()
self.assertAllClose(tf_norm, 0.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
def testClipByAverageNormClipped(self):
# Norm clipping when average clip_norm < 0.83333333
with self.test_session():
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
np_ans = [[-2.88, 0.0, 0.0], [3.84, 0.0, 0.0]]
clip_norm = 0.8
ans = clip_ops.clip_by_average_norm(x, clip_norm)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
def testClipByAverageNormClippedTensor(self):
# Norm clipping when average clip_norm < 0.83333333
with self.test_session():
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
np_ans = [[-2.88, 0.0, 0.0], [3.84, 0.0, 0.0]]
clip_norm = constant_op.constant(0.8)
ans = clip_ops.clip_by_average_norm(x, clip_norm)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
def testClipByAverageNormNotClipped(self):
# No norm clipping when average clip_norm >= 0.83333333
with self.test_session():
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 0.0]]
clip_norm = 0.9
ans = clip_ops.clip_by_average_norm(x, clip_norm)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
def testClipByAverageNormZero(self):
# No norm clipping when average clip_norm = 0
with self.test_session():
x = constant_op.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
# Average norm = 0, no changes
np_ans = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
clip_norm = 0.9
ans = clip_ops.clip_by_average_norm(x, clip_norm)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
if __name__ == '__main__':
test.main()
| apache-2.0 |
cgwalters/anaconda | pyanaconda/ui/tui/spokes/askvnc.py | 2 | 6302 | # Ask vnc text spoke
#
# Copyright (C) 2012 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Jesse Keating <jkeating@redhat.com>
#
from pyanaconda.ui.tui.spokes import NormalTUISpoke
from pyanaconda.ui.tui.simpleline import TextWidget, ColumnWidget
from pyanaconda.ui.tui.tuiobject import YesNoDialog
from pyanaconda.constants import USEVNC, USETEXT, IPMI_ABORTED
from pyanaconda.constants_text import INPUT_PROCESSED
from pyanaconda.i18n import N_, _
from pyanaconda.ui.communication import hubQ
from pyanaconda.ui.tui import exception_msg_handler
from pyanaconda.iutil import execWithRedirect
from pyanaconda.flags import can_touch_runtime_system
from pyanaconda import iutil
import getpass
import sys
def exception_msg_handler_and_exit(event, data):
"""Display an exception and exit so that we don't end up in a loop."""
exception_msg_handler(event, data)
sys.exit(1)
class AskVNCSpoke(NormalTUISpoke):
title = N_("VNC")
# This spoke is kinda standalone, not meant to be used with a hub
# We pass in some fake data just to make our parents happy
def __init__(self, app, data, storage=None, payload=None,
instclass=None, message=None):
NormalTUISpoke.__init__(self, app, data, storage, payload, instclass)
# The TUI hasn't been initialized with the message handlers yet. Add an
# exception message handler so that the TUI exits if anything goes wrong
# at this stage.
self._app.register_event_handler(hubQ.HUB_CODE_EXCEPTION, exception_msg_handler_and_exit)
if message:
self._message = message
else:
self._message = _("X was unable to start on your "
"machine. Would you like to "
"start VNC to connect to "
"this computer from another "
"computer and perform a "
"graphical installation or continue "
"with a text mode installation?")
self._choices = (_(USEVNC), _(USETEXT))
self._usevnc = False
@property
def indirect(self):
return True
def refresh(self, args = None):
NormalTUISpoke.refresh(self, args)
self._window += [TextWidget(self._message), ""]
for idx, choice in enumerate(self._choices):
number = TextWidget("%2d)" % (idx + 1))
c = ColumnWidget([(3, [number]), (None, [TextWidget(choice)])], 1)
self._window += [c, ""]
return True
def input(self, args, key):
"""Override input so that we can launch the VNC password spoke"""
try:
keyid = int(key) - 1
if 0 <= keyid < len(self._choices):
choice = self._choices[keyid]
if choice == _(USETEXT):
self._usevnc = False
else:
self._usevnc = True
newspoke = VNCPassSpoke(self.app, self.data, self.storage,
self.payload, self.instclass)
self.app.switch_screen_modal(newspoke)
self.apply()
self.close()
return INPUT_PROCESSED
except ValueError:
pass
if key.lower() == _('q'):
d = YesNoDialog(self.app, _(self.app.quit_message))
self.app.switch_screen_modal(d)
if d.answer:
iutil.ipmi_report(IPMI_ABORTED)
if can_touch_runtime_system("Quit and Reboot"):
execWithRedirect("systemctl", ["--no-wall", "reboot"])
else:
exit(1)
else:
return key
def apply(self):
self.data.vnc.enabled = self._usevnc
class VNCPassSpoke(NormalTUISpoke):
title = N_("VNC Password")
def __init__(self, app, data, storage, payload, instclass, message=None):
NormalTUISpoke.__init__(self, app, data, storage, payload, instclass)
self._password = ""
if message:
self._message = message
else:
self._message = _("Please provide VNC password (must be six to eight characters long).\n"
"You will have to type it twice. Leave blank for no password")
@property
def indirect(self):
return True
@property
def completed(self):
return True # We're always complete
def refresh(self, args = None):
NormalTUISpoke.refresh(self, args)
self._window += [TextWidget(self._message), ""]
return True
def prompt(self, args = None):
"""Override prompt as password typing is special."""
p1 = getpass.getpass(_("Password: "))
p2 = getpass.getpass(_("Password (confirm): "))
if p1 != p2:
print(_("Passwords do not match!"))
return None
elif 0 < len(p1) < 6:
print(_("The password must be at least "
"six characters long."))
return None
elif len(p1) > 8:
print(_("The password cannot be more than "
"eight characters long."))
return None
else:
self._password = p1
self.apply()
self.close()
def apply(self):
self.data.vnc.password = self._password
| gpl-2.0 |
rjhunter8285/nsc-cloudproject-s22016 | api/FlaskApp/FlaskApp/python_modules/oauthlib/oauth2/rfc6749/grant_types/base.py | 35 | 1739 | # -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749.grant_types
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import unicode_literals, absolute_import
import logging
from oauthlib.oauth2.rfc6749 import errors, utils
log = logging.getLogger(__name__)
class GrantTypeBase(object):
error_uri = None
request_validator = None
def create_authorization_response(self, request, token_handler):
raise NotImplementedError('Subclasses must implement this method.')
def create_token_response(self, request, token_handler):
raise NotImplementedError('Subclasses must implement this method.')
def validate_grant_type(self, request):
client_id = getattr(request, 'client_id', None)
if not self.request_validator.validate_grant_type(client_id,
request.grant_type, request.client, request):
log.debug('Unauthorized from %r (%r) access to grant type %s.',
request.client_id, request.client, request.grant_type)
raise errors.UnauthorizedClientError(request=request)
def validate_scopes(self, request):
if not request.scopes:
request.scopes = utils.scope_to_list(request.scope) or utils.scope_to_list(
self.request_validator.get_default_scopes(request.client_id, request))
log.debug('Validating access to scopes %r for client %r (%r).',
request.scopes, request.client_id, request.client)
if not self.request_validator.validate_scopes(request.client_id,
request.scopes, request.client, request):
raise errors.InvalidScopeError(request=request)
| apache-2.0 |
danielefranceschi/mitochondriametrics | src/imageDecorator.py | 1 | 2647 | # import the necessary packages
import string
import cv2
import numpy as np
class ImageDecorator:
@staticmethod
def drawDot(img,x,y,color,size=5):
cv2.circle(img, (int(x), int(y)), size, color, -1)
@staticmethod
def decorateImage(img, features, pixelsPerMetric=1, drawMidPoints=True):
# working copy
originalImage = img.copy()
# loop over the features individually
for om in features:
box=om.getBoundingBox()
box2=np.array(box, dtype="int")
cv2.drawContours(originalImage, [box2.astype("int")], -1, (0, 255, 0), 2)
# loop over the original points and draw them
for (x, y) in box2:
ImageDecorator.drawDot(originalImage,x,y,(0, 0, 255))
if drawMidPoints:
# unpack the ordered bounding box, then compute the midpoint
# between the top-left and top-right coordinates, followed by
# the midpoint between bottom-left and bottom-right coordinates
(tltrX, tltrY) = om.midPoint(om.tl, om.tr)
(blbrX, blbrY) = om.midPoint(om.bl, om.br)
# compute the midpoint between the top-left and top-right points,
# followed by the midpoint between the top-right and bottom-right
(tlblX, tlblY) = om.midPoint(om.tl, om.bl)
(trbrX, trbrY) = om.midPoint(om.tr, om.br)
# draw the midpoints on the image
ImageDecorator.drawDot(originalImage,tltrX,tltrY,(255, 0, 0))
ImageDecorator.drawDot(originalImage,blbrX,blbrY,(255, 0, 0))
ImageDecorator.drawDot(originalImage,tlblX,tlblY,(255, 0, 0))
ImageDecorator.drawDot(originalImage,trbrX,trbrY,(255, 0, 0))
# draw lines between the midpoints
cv2.line(originalImage, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)), (255, 0, 255), 2)
cv2.line(originalImage, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)), (255, 0, 255), 2)
# get the size of the object
(dimA, dimB) = om.scaledDimensions(pixelsPerMetric)
# draw the object sizes on the image
cv2.putText(originalImage, "{:.1f}".format(dimA),
(int(tltrX - 15), int(tltrY - 10)), cv2.FONT_HERSHEY_SIMPLEX,
0.65, (255, 255, 255), 2)
cv2.putText(originalImage, "{:.1f}".format(dimB),
(int(trbrX + 10), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX,
0.65, (255, 255, 255), 2)
return originalImage
| gpl-3.0 |
KitKatXperience/platform_external_chromium_org | third_party/closure_linter/closure_linter/javascripttokens.py | 266 | 4955 | #!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to represent JavaScript tokens."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
from closure_linter.common import tokens
class JavaScriptTokenType(tokens.TokenType):
"""Enumeration of JavaScript token types, and useful sets of token types."""
NUMBER = 'number'
START_SINGLE_LINE_COMMENT = '//'
START_BLOCK_COMMENT = '/*'
START_DOC_COMMENT = '/**'
END_BLOCK_COMMENT = '*/'
END_DOC_COMMENT = 'doc */'
COMMENT = 'comment'
SINGLE_QUOTE_STRING_START = "'string"
SINGLE_QUOTE_STRING_END = "string'"
DOUBLE_QUOTE_STRING_START = '"string'
DOUBLE_QUOTE_STRING_END = 'string"'
STRING_TEXT = 'string'
START_BLOCK = '{'
END_BLOCK = '}'
START_PAREN = '('
END_PAREN = ')'
START_BRACKET = '['
END_BRACKET = ']'
REGEX = '/regex/'
FUNCTION_DECLARATION = 'function(...)'
FUNCTION_NAME = 'function functionName(...)'
START_PARAMETERS = 'startparams('
PARAMETERS = 'pa,ra,ms'
END_PARAMETERS = ')endparams'
SEMICOLON = ';'
DOC_FLAG = '@flag'
DOC_INLINE_FLAG = '{@flag ...}'
DOC_START_BRACE = 'doc {'
DOC_END_BRACE = 'doc }'
DOC_PREFIX = 'comment prefix: * '
SIMPLE_LVALUE = 'lvalue='
KEYWORD = 'keyword'
OPERATOR = 'operator'
IDENTIFIER = 'identifier'
STRING_TYPES = frozenset([
SINGLE_QUOTE_STRING_START, SINGLE_QUOTE_STRING_END,
DOUBLE_QUOTE_STRING_START, DOUBLE_QUOTE_STRING_END, STRING_TEXT])
COMMENT_TYPES = frozenset([START_SINGLE_LINE_COMMENT, COMMENT,
START_BLOCK_COMMENT, START_DOC_COMMENT,
END_BLOCK_COMMENT, END_DOC_COMMENT,
DOC_START_BRACE, DOC_END_BRACE,
DOC_FLAG, DOC_INLINE_FLAG, DOC_PREFIX])
FLAG_DESCRIPTION_TYPES = frozenset([
DOC_INLINE_FLAG, COMMENT, DOC_START_BRACE, DOC_END_BRACE])
FLAG_ENDING_TYPES = frozenset([DOC_FLAG, END_DOC_COMMENT])
NON_CODE_TYPES = COMMENT_TYPES | frozenset([
tokens.TokenType.WHITESPACE, tokens.TokenType.BLANK_LINE])
UNARY_OPERATORS = ['!', 'new', 'delete', 'typeof', 'void']
UNARY_OK_OPERATORS = ['--', '++', '-', '+'] + UNARY_OPERATORS
UNARY_POST_OPERATORS = ['--', '++']
# An expression ender is any token that can end an object - i.e. we could have
# x.y or [1, 2], or (10 + 9) or {a: 10}.
EXPRESSION_ENDER_TYPES = [tokens.TokenType.NORMAL, IDENTIFIER, NUMBER,
SIMPLE_LVALUE, END_BRACKET, END_PAREN, END_BLOCK,
SINGLE_QUOTE_STRING_END, DOUBLE_QUOTE_STRING_END]
class JavaScriptToken(tokens.Token):
"""JavaScript token subclass of Token, provides extra instance checks.
The following token types have data in attached_object:
- All JsDoc flags: a parser.JsDocFlag object.
"""
def IsKeyword(self, keyword):
"""Tests if this token is the given keyword.
Args:
keyword: The keyword to compare to.
Returns:
True if this token is a keyword token with the given name.
"""
return self.type == JavaScriptTokenType.KEYWORD and self.string == keyword
def IsOperator(self, operator):
"""Tests if this token is the given operator.
Args:
operator: The operator to compare to.
Returns:
True if this token is a operator token with the given name.
"""
return self.type == JavaScriptTokenType.OPERATOR and self.string == operator
def IsAssignment(self):
"""Tests if this token is an assignment operator.
Returns:
True if this token is an assignment operator.
"""
return (self.type == JavaScriptTokenType.OPERATOR and
self.string.endswith('=') and
self.string not in ('==', '!=', '>=', '<=', '===', '!=='))
def IsComment(self):
"""Tests if this token is any part of a comment.
Returns:
True if this token is any part of a comment.
"""
return self.type in JavaScriptTokenType.COMMENT_TYPES
def IsCode(self):
"""Tests if this token is code, as opposed to a comment or whitespace."""
return self.type not in JavaScriptTokenType.NON_CODE_TYPES
def __repr__(self):
return '<JavaScriptToken: %d, %s, "%s", %r, %r>' % (self.line_number,
self.type, self.string,
self.values,
self.metadata)
| bsd-3-clause |
skoppisetty/idigbio-appliance | lib/sqlalchemy/exc.py | 17 | 10556 | # sqlalchemy/exc.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Exceptions used with SQLAlchemy.
The base exception class is :class:`.SQLAlchemyError`. Exceptions which are raised as a
result of DBAPI exceptions are all subclasses of
:class:`.DBAPIError`.
"""
import traceback
class SQLAlchemyError(Exception):
"""Generic error class."""
class ArgumentError(SQLAlchemyError):
"""Raised when an invalid or conflicting function argument is supplied.
This error generally corresponds to construction time state errors.
"""
class CircularDependencyError(SQLAlchemyError):
"""Raised by topological sorts when a circular dependency is detected.
There are two scenarios where this error occurs:
* In a Session flush operation, if two objects are mutually dependent
on each other, they can not be inserted or deleted via INSERT or
DELETE statements alone; an UPDATE will be needed to post-associate
or pre-deassociate one of the foreign key constrained values.
The ``post_update`` flag described at :ref:`post_update` can resolve
this cycle.
* In a :meth:`.MetaData.create_all`, :meth:`.MetaData.drop_all`,
:attr:`.MetaData.sorted_tables` operation, two :class:`.ForeignKey`
or :class:`.ForeignKeyConstraint` objects mutually refer to each
other. Apply the ``use_alter=True`` flag to one or both,
see :ref:`use_alter`.
"""
def __init__(self, message, cycles, edges, msg=None):
if msg is None:
message += " Cycles: %r all edges: %r" % (cycles, edges)
else:
message = msg
SQLAlchemyError.__init__(self, message)
self.cycles = cycles
self.edges = edges
def __reduce__(self):
return self.__class__, (None, self.cycles,
self.edges, self.args[0])
class CompileError(SQLAlchemyError):
"""Raised when an error occurs during SQL compilation"""
class IdentifierError(SQLAlchemyError):
"""Raised when a schema name is beyond the max character limit"""
# Moved to orm.exc; compatibility definition installed by orm import until 0.6
ConcurrentModificationError = None
class DisconnectionError(SQLAlchemyError):
"""A disconnect is detected on a raw DB-API connection.
This error is raised and consumed internally by a connection pool. It can
be raised by the :meth:`.PoolEvents.checkout` event
so that the host pool forces a retry; the exception will be caught
three times in a row before the pool gives up and raises
:class:`~sqlalchemy.exc.InvalidRequestError` regarding the connection attempt.
"""
# Moved to orm.exc; compatibility definition installed by orm import until 0.6
FlushError = None
class TimeoutError(SQLAlchemyError):
"""Raised when a connection pool times out on getting a connection."""
class InvalidRequestError(SQLAlchemyError):
"""SQLAlchemy was asked to do something it can't do.
This error generally corresponds to runtime state errors.
"""
class ResourceClosedError(InvalidRequestError):
"""An operation was requested from a connection, cursor, or other
object that's in a closed state."""
class NoSuchColumnError(KeyError, InvalidRequestError):
"""A nonexistent column is requested from a ``RowProxy``."""
class NoReferenceError(InvalidRequestError):
"""Raised by ``ForeignKey`` to indicate a reference cannot be resolved."""
class NoReferencedTableError(NoReferenceError):
"""Raised by ``ForeignKey`` when the referred ``Table`` cannot be located."""
def __init__(self, message, tname):
NoReferenceError.__init__(self, message)
self.table_name = tname
def __reduce__(self):
return self.__class__, (self.args[0], self.table_name)
class NoReferencedColumnError(NoReferenceError):
"""Raised by ``ForeignKey`` when the referred ``Column`` cannot be located."""
def __init__(self, message, tname, cname):
NoReferenceError.__init__(self, message)
self.table_name = tname
self.column_name = cname
def __reduce__(self):
return self.__class__, (self.args[0], self.table_name,
self.column_name)
class NoSuchTableError(InvalidRequestError):
"""Table does not exist or is not visible to a connection."""
class UnboundExecutionError(InvalidRequestError):
"""SQL was attempted without a database connection to execute it on."""
class DontWrapMixin(object):
"""A mixin class which, when applied to a user-defined Exception class,
will not be wrapped inside of :class:`.StatementError` if the error is
emitted within the process of executing a statement.
E.g.::
from sqlalchemy.exc import DontWrapMixin
class MyCustomException(Exception, DontWrapMixin):
pass
class MySpecialType(TypeDecorator):
impl = String
def process_bind_param(self, value, dialect):
if value == 'invalid':
raise MyCustomException("invalid!")
"""
import sys
if sys.version_info < (2, 5):
class DontWrapMixin:
pass
# Moved to orm.exc; compatibility definition installed by orm import until 0.6
UnmappedColumnError = None
class StatementError(SQLAlchemyError):
"""An error occurred during execution of a SQL statement.
:class:`StatementError` wraps the exception raised
during execution, and features :attr:`.statement`
and :attr:`.params` attributes which supply context regarding
the specifics of the statement which had an issue.
The wrapped exception object is available in
the :attr:`.orig` attribute.
"""
statement = None
"""The string SQL statement being invoked when this exception occurred."""
params = None
"""The parameter list being used when this exception occurred."""
orig = None
"""The DBAPI exception object."""
def __init__(self, message, statement, params, orig):
SQLAlchemyError.__init__(self, message)
self.statement = statement
self.params = params
self.orig = orig
def __reduce__(self):
return self.__class__, (self.args[0], self.statement,
self.params, self.orig)
def __str__(self):
from sqlalchemy.sql import util
params_repr = util._repr_params(self.params, 10)
return ' '.join((SQLAlchemyError.__str__(self),
repr(self.statement), repr(params_repr)))
class DBAPIError(StatementError):
"""Raised when the execution of a database operation fails.
Wraps exceptions raised by the DB-API underlying the
database operation. Driver-specific implementations of the standard
DB-API exception types are wrapped by matching sub-types of SQLAlchemy's
:class:`DBAPIError` when possible. DB-API's ``Error`` type maps to
:class:`DBAPIError` in SQLAlchemy, otherwise the names are identical. Note
that there is no guarantee that different DB-API implementations will
raise the same exception type for any given error condition.
:class:`DBAPIError` features :attr:`~.StatementError.statement`
and :attr:`~.StatementError.params` attributes which supply context regarding
the specifics of the statement which had an issue, for the
typical case when the error was raised within the context of
emitting a SQL statement.
The wrapped exception object is available in the :attr:`~.StatementError.orig` attribute.
Its type and properties are DB-API implementation specific.
"""
@classmethod
def instance(cls, statement, params,
orig,
dbapi_base_err,
connection_invalidated=False):
# Don't ever wrap these, just return them directly as if
# DBAPIError didn't exist.
if isinstance(orig, (KeyboardInterrupt, SystemExit, DontWrapMixin)):
return orig
if orig is not None:
# not a DBAPI error, statement is present.
# raise a StatementError
if not isinstance(orig, dbapi_base_err) and statement:
return StatementError(
"%s (original cause: %s)" % (
str(orig),
traceback.format_exception_only(orig.__class__, orig)[-1].strip()
), statement, params, orig)
name, glob = orig.__class__.__name__, globals()
if name in glob and issubclass(glob[name], DBAPIError):
cls = glob[name]
return cls(statement, params, orig, connection_invalidated)
def __reduce__(self):
return self.__class__, (self.statement, self.params,
self.orig, self.connection_invalidated)
def __init__(self, statement, params, orig, connection_invalidated=False):
try:
text = str(orig)
except (KeyboardInterrupt, SystemExit):
raise
except Exception, e:
text = 'Error in str() of DB-API-generated exception: ' + str(e)
StatementError.__init__(
self,
'(%s) %s' % (orig.__class__.__name__, text),
statement,
params,
orig
)
self.connection_invalidated = connection_invalidated
class InterfaceError(DBAPIError):
"""Wraps a DB-API InterfaceError."""
class DatabaseError(DBAPIError):
"""Wraps a DB-API DatabaseError."""
class DataError(DatabaseError):
"""Wraps a DB-API DataError."""
class OperationalError(DatabaseError):
"""Wraps a DB-API OperationalError."""
class IntegrityError(DatabaseError):
"""Wraps a DB-API IntegrityError."""
class InternalError(DatabaseError):
"""Wraps a DB-API InternalError."""
class ProgrammingError(DatabaseError):
"""Wraps a DB-API ProgrammingError."""
class NotSupportedError(DatabaseError):
"""Wraps a DB-API NotSupportedError."""
# Warnings
class SADeprecationWarning(DeprecationWarning):
"""Issued once per usage of a deprecated API."""
class SAPendingDeprecationWarning(PendingDeprecationWarning):
"""Issued once per usage of a deprecated API."""
class SAWarning(RuntimeWarning):
"""Issued at runtime."""
| gpl-3.0 |
miacro/pkget | pkget/test/test_yaml.py | 1 | 3955 | import unittest
import os
from pkget import Yaml
class YamlTest(unittest.TestCase):
def test_load_all(self):
def test_load_all(*args, **kwargs):
result = []
for item in Yaml.load_all(*args, **kwargs):
result.append(item)
return result
def assert_yaml_file_1(result, index=0):
self.assertEqual(result[index]["default"]["recipepaths"], [None])
self.assertEqual(
result[index]["default"]["installprefix"], "~/.local")
self.assertEqual(
result[index]["default"]["pkginfoprefix"], "~/.local/pkgetinfo")
self.assertEqual(
result[index]["local"]["recipepaths"], ["/usr/local", None])
self.assertEqual(
result[index]["local"]["installprefix"], "/usr/local/")
self.assertEqual(
result[index]["local"]["pkginfoprefix"], "/usr/local/pkgetinfo")
self.assertEqual(
result[1 + index]["global"]["recipepaths"], None)
self.assertEqual(
result[1 + index]["global"]["installprefix"], "/usr/local/")
self.assertEqual(
result[1 + index]["global"]["pkginfoprefix"],
"/usr/local/pkgetinfo")
def assert_yaml_file_2(result, index=0):
self.assertEqual(
result[index]["etcd"]["description"],
"Highly-available key value store for shared configuration and \
service discovery")
self.assertEqual(
result[index]["etcd"]["website"], "https://github.com/coreos/etcd")
self.assertEqual(result[index]["etcd"]["type"], "tar.gz")
self.assertEqual(result[index]["etcd"]["version"], "3.2.5")
self.assertEqual(result[index]["etcd"]["os"], "linux")
self.assertEqual(result[index]["etcd"]["arch"], "amd64")
self.assertEqual(result[index]["etcd"]["url"], None)
self.assertEqual(
result[index]["etcd"]["urlprefix"],
"https://github.com/coreos/etcd/releases/download")
self.assertEqual(result[index]["etcd"]["depends"], None)
result = test_load_all(
filenames=os.path.join(
os.path.dirname(__file__), "test_yaml_1.yaml"))
assert_yaml_file_1(result)
result = test_load_all(
filenames=[os.path.join(
os.path.dirname(__file__), "test_yaml_1.yaml"),
os.path.join(
os.path.dirname(__file__), "test_yaml_2.yaml")])
assert_yaml_file_1(result)
assert_yaml_file_2(result, 2)
result = test_load_all(contents="--- \n\
a: 12 \n\
b: df \n")
self.assertEqual(result[0]["a"], 12)
self.assertEqual(result[0]["b"], "df")
result = test_load_all(
contents=["---\na: 23\nb: ddd\n",
"---\na: df\nb: 45\n---\ng: 34\n"])
self.assertEqual(result[0]["a"], 23)
self.assertEqual(result[0]["b"], "ddd")
self.assertEqual(result[1]["a"], "df")
self.assertEqual(result[1]["b"], 45)
self.assertEqual(result[2]["g"], 34)
result = test_load_all(
contents=["---\na: 23\nb: ddd\n",
"---\na: df\nb: 45\n---\ng: 34\n"],
filenames=[os.path.join(os.path.dirname(__file__),
"test_yaml_1.yaml"),
os.path.join(os.path.dirname(__file__),
"test_yaml_2.yaml")])
self.assertEqual(result[0]["a"], 23)
self.assertEqual(result[0]["b"], "ddd")
self.assertEqual(result[1]["a"], "df")
self.assertEqual(result[1]["b"], 45)
self.assertEqual(result[2]["g"], 34)
assert_yaml_file_1(result, 3)
assert_yaml_file_2(result, 5)
| gpl-3.0 |
khagler/boto | boto/gs/connection.py | 157 | 5478 | # Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.gs.bucket import Bucket
from boto.s3.connection import S3Connection
from boto.s3.connection import SubdomainCallingFormat
from boto.s3.connection import check_lowercase_bucketname
from boto.utils import get_utf8_value
class Location(object):
DEFAULT = 'US'
EU = 'EU'
class GSConnection(S3Connection):
DefaultHost = 'storage.googleapis.com'
QueryString = 'Signature=%s&Expires=%d&GoogleAccessId=%s'
def __init__(self, gs_access_key_id=None, gs_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None,
host=DefaultHost, debug=0, https_connection_factory=None,
calling_format=SubdomainCallingFormat(), path='/',
suppress_consec_slashes=True):
super(GSConnection, self).__init__(gs_access_key_id, gs_secret_access_key,
is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
host, debug, https_connection_factory, calling_format, path,
"google", Bucket,
suppress_consec_slashes=suppress_consec_slashes)
def create_bucket(self, bucket_name, headers=None,
location=Location.DEFAULT, policy=None,
storage_class='STANDARD'):
"""
Creates a new bucket. By default it's located in the USA. You can
pass Location.EU to create bucket in the EU. You can also pass
a LocationConstraint for where the bucket should be located, and
a StorageClass describing how the data should be stored.
:type bucket_name: string
:param bucket_name: The name of the new bucket.
:type headers: dict
:param headers: Additional headers to pass along with the request to GCS.
:type location: :class:`boto.gs.connection.Location`
:param location: The location of the new bucket.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GCS.
:type storage_class: string
:param storage_class: Either 'STANDARD' or 'DURABLE_REDUCED_AVAILABILITY'.
"""
check_lowercase_bucketname(bucket_name)
if policy:
if headers:
headers[self.provider.acl_header] = policy
else:
headers = {self.provider.acl_header : policy}
if not location:
location = Location.DEFAULT
location_elem = ('<LocationConstraint>%s</LocationConstraint>'
% location)
if storage_class:
storage_class_elem = ('<StorageClass>%s</StorageClass>'
% storage_class)
else:
storage_class_elem = ''
data = ('<CreateBucketConfiguration>%s%s</CreateBucketConfiguration>'
% (location_elem, storage_class_elem))
response = self.make_request(
'PUT', get_utf8_value(bucket_name), headers=headers,
data=get_utf8_value(data))
body = response.read()
if response.status == 409:
raise self.provider.storage_create_error(
response.status, response.reason, body)
if response.status == 200:
return self.bucket_class(self, bucket_name)
else:
raise self.provider.storage_response_error(
response.status, response.reason, body)
def get_bucket(self, bucket_name, validate=True, headers=None):
"""
Retrieves a bucket by name.
If the bucket does not exist, an ``S3ResponseError`` will be raised. If
you are unsure if the bucket exists or not, you can use the
``S3Connection.lookup`` method, which will either return a valid bucket
or ``None``.
:type bucket_name: string
:param bucket_name: The name of the bucket
:type headers: dict
:param headers: Additional headers to pass along with the request to
AWS.
:type validate: boolean
:param validate: If ``True``, it will try to fetch all keys within the
given bucket. (Default: ``True``)
"""
bucket = self.bucket_class(self, bucket_name)
if validate:
bucket.get_all_keys(headers, maxkeys=0)
return bucket
| mit |
DanialLiu/SkiaWin32Port | third_party/externals/gyp/test/mac/gyptest-ldflags.py | 244 | 1864 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that filenames passed to various linker flags are converted into
build-directory relative paths correctly.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR = 'ldflags'
test.run_gyp('subdirectory/test.gyp', chdir=CHDIR)
test.build('subdirectory/test.gyp', test.ALL, chdir=CHDIR)
test.pass_test()
# These flags from `man ld` couldl show up in OTHER_LDFLAGS and need path
# translation.
#
# Done:
# -exported_symbols_list filename
# -unexported_symbols_list file
# -reexported_symbols_list file
# -sectcreate segname sectname file
#
# Will be done on demand:
# -weak_library path_to_library
# -reexport_library path_to_library
# -lazy_library path_to_library
# -upward_library path_to_library
# -syslibroot rootdir
# -framework name[,suffix]
# -weak_framework name[,suffix]
# -reexport_framework name[,suffix]
# -lazy_framework name[,suffix]
# -upward_framework name[,suffix]
# -force_load path_to_archive
# -filelist file[,dirname]
# -dtrace file
# -order_file file # should use ORDER_FILE
# -exported_symbols_order file
# -bundle_loader executable # should use BUNDLE_LOADER
# -alias_list filename
# -seg_addr_table filename
# -dylib_file install_name:file_name
# -interposable_list filename
# -object_path_lto filename
#
#
# obsolete:
# -sectorder segname sectname orderfile
# -seg_addr_table_filename path
#
#
# ??:
# -map map_file_path
# -sub_library library_name
# -sub_umbrella framework_name
| bsd-3-clause |
tomaszhof/python | scripts/dicomSender.py | 1 | 2034 | import os
import sys
import shlex, subprocess
from multiprocessing import Process, Pool
def psend(fname):
command_line = "~/tools/dcm4/dcm4che-2.0.28/bin/dcmsnd DCM@mdc.scape.psnc.pl:7183 " + fname + " -keystore ~/keys/wcpit/wcpit-keystore.jks -keystorepw wcpit. -truststore ~/keys/wcpit/wcpit-truststore.jks -truststorepw wcpit. -tls AES -pdv1"
args = [command_line]
print "sendig: " + fname + "[" + str(count) +"]"
subprocess.Popen(args, shell=True)
print 'done.'
return 'Sent: ' + fname
def sendDicomFromFolder(root):
count = 0
fnames = []
p = Pool()
for path, subdirs, files in os.walk(root):
for name in files:
fileToSend = os.path.join(path, name)
#command_line = "~/tools/dcm4/dcm4che-2.0.28/bin/dcmsnd DCM@localhost:7183 " + fileToSend + " -keystore ~/keys/wcpit/wcpit-keystore.jks -keystorepw wcpit. -truststore ~/keys/wcpit/wcpit-truststore.jks -truststorepw wcpit. -tls AES -pdv1"
command_line = "~/tools/dcm4/dcm4che-2.0.28/bin/dcmsnd DCM@mdc.scape.psnc.pl:7183 " + fileToSend + " -keystore ~/keys/wcpit/wcpit-keystore.jks -keystorepw wcpit. -truststore ~/keys/wcpit/wcpit-truststore.jks -truststorepw wcpit. -tls AES -pdv1"
args = [command_line] #shlex.split(command_line)
#print args
count = count + 1
if count < 5:
#fnames.append(fileToSend)
print "sendig: " + fileToSend + "[" + str(count) +"]"
subprocess.Popen(args, shell=True)
print "done.\n"
else:
#r = p.imap(psend,fnames)
#del fnames[:]
#print r.get()
return
#os.system(command_line)
#os.system("~/tools/dcm4/dcm4che-2.0.28/bin/dcmsnd DCM@mdc.scape.psnc.pl:7183 " + fileToSend + " -keystore ~/keys/wcpit/wcpit-keystore.jks -keystorepw wcpit. -truststore ~/keys/wcpit/wcpit-truststore.jks -truststorepw wcpit. -tls AES -pdv1")
#print "done.\n"
def main():
folderName = sys.argv[1]
print "Start sending folder " + folderName + " ... \n"
sendDicomFromFolder(folderName)
print "Completed."
if __name__=="__main__":main()
| apache-2.0 |
ashhher3/cvxpy | cvxpy/tests/test_scs.py | 11 | 6736 | """
Copyright 2013 Steven Diamond, Eric Chu
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from cvxpy import *
import cvxpy.atoms.elementwise.log as cvxlog
from cvxpy.tests.base_test import BaseTest
import cvxopt
import unittest
import math
import numpy as np
import sys
if sys.version_info >= (3, 0):
from functools import reduce
class TestSCS(BaseTest):
""" Unit tests for the nonlinear atoms module. """
def setUp(self):
self.x = Variable(2, name='x')
self.y = Variable(2, name='y')
self.A = Variable(2,2,name='A')
self.B = Variable(2,2,name='B')
self.C = Variable(3,2,name='C')
# Overriden method to assume lower accuracy.
def assertItemsAlmostEqual(self, a, b, places=2):
super(TestSCS, self).assertItemsAlmostEqual(a,b,places=places)
# Overriden method to assume lower accuracy.
def assertAlmostEqual(self, a, b, places=2):
super(TestSCS, self).assertAlmostEqual(a, b, places=places)
def test_log_problem(self):
# Log in objective.
obj = Maximize(sum_entries(log(self.x)))
constr = [self.x <= [1, math.e]]
p = Problem(obj, constr)
result = p.solve(solver=SCS)
self.assertAlmostEqual(result, 1)
self.assertItemsAlmostEqual(self.x.value, [1, math.e])
# Log in constraint.
obj = Minimize(sum_entries(self.x))
constr = [log(self.x) >= 0, self.x <= [1,1]]
p = Problem(obj, constr)
result = p.solve(solver=SCS)
self.assertAlmostEqual(result, 2)
self.assertItemsAlmostEqual(self.x.value, [1,1])
# Index into log.
obj = Maximize(log(self.x)[1])
constr = [self.x <= [1, math.e]]
p = Problem(obj,constr)
result = p.solve(solver=SCS)
self.assertAlmostEqual(result, 1)
def test_sigma_max(self):
"""Test sigma_max.
"""
const = Constant([[1,2,3],[4,5,6]])
constr = [self.C == const]
prob = Problem(Minimize(norm(self.C, 2)), constr)
result = prob.solve(solver=SCS)
self.assertAlmostEqual(result, norm(const, 2).value)
self.assertItemsAlmostEqual(self.C.value, const.value)
def test_sdp_var(self):
"""Test sdp var.
"""
const = Constant([[1,2,3],[4,5,6], [7,8,9]])
X = Semidef(3)
prob = Problem(Minimize(0), [X == const])
prob.solve(verbose=True, solver=SCS)
self.assertEqual(prob.status, INFEASIBLE)
def test_entr(self):
"""Test the entr atom.
"""
self.assertEqual(entr(0).value, 0)
assert np.isneginf(entr(-1).value)
def test_kl_div(self):
"""Test a problem with kl_div.
"""
import numpy as np
import cvxpy as cp
kK=50
kSeed=10
prng=np.random.RandomState(kSeed)
#Generate a random reference distribution
npSPriors=prng.uniform(0.0,1.0,(kK,1))
npSPriors=npSPriors/sum(npSPriors)
#Reference distribution
p_refProb=cp.Parameter(kK,1,sign='positive')
#Distribution to be estimated
v_prob=cp.Variable(kK,1)
objkl=0.0
for k in range(kK):
objkl += cp.kl_div(v_prob[k,0],p_refProb[k,0])
constrs=[sum([v_prob[k,0] for k in range(kK)])==1]
klprob=cp.Problem(cp.Minimize(objkl),constrs)
p_refProb.value=npSPriors
result = klprob.solve(solver=SCS, verbose=True)
self.assertItemsAlmostEqual(v_prob.value, npSPriors)
def test_entr(self):
"""Test a problem with entr.
"""
for n in [5, 10, 25]:
print(n)
x = Variable(n)
obj = Maximize(sum_entries(entr(x)))
p = Problem(obj, [sum_entries(x) == 1])
p.solve(solver=SCS, verbose=True)
self.assertItemsAlmostEqual(x.value, n*[1./n])
def test_exp(self):
"""Test a problem with exp.
"""
for n in [5, 10, 25]:
print(n)
x = Variable(n)
obj = Minimize(sum_entries(exp(x)))
p = Problem(obj, [sum_entries(x) == 1])
p.solve(solver=SCS, verbose=True)
self.assertItemsAlmostEqual(x.value, n*[1./n])
def test_log(self):
"""Test a problem with log.
"""
for n in [5, 10, 25]:
print(n)
x = Variable(n)
obj = Maximize(sum_entries(log(x)))
p = Problem(obj, [sum_entries(x) == 1])
p.solve(solver=SCS, verbose=True)
self.assertItemsAlmostEqual(x.value, n*[1./n])
def test_consistency(self):
"""Test case for non-deterministic behavior in cvxopt.
"""
import cvxpy
xs = [0, 1, 2, 3]
ys = [51, 60, 70, 75]
eta1 = cvxpy.Variable()
eta2 = cvxpy.Variable()
eta3 = cvxpy.Variable()
theta1s = [eta1 + eta3*x for x in xs]
lin_parts = [theta1 * y + eta2 * y**2 for (theta1, y) in zip(theta1s, ys)]
g_parts = [-cvxpy.quad_over_lin(theta1, -4*eta2) + 0.5 * cvxpy.log(-2 * eta2)
for theta1 in theta1s]
objective = reduce(lambda x,y: x+y, lin_parts + g_parts)
problem = cvxpy.Problem(cvxpy.Maximize(objective))
problem.solve(verbose=True, solver=cvxpy.SCS)
assert problem.status in [cvxpy.OPTIMAL_INACCURATE, cvxpy.OPTIMAL]
return [eta1.value, eta2.value, eta3.value]
def test_warm_start(self):
"""Test warm starting.
"""
x = Variable(10)
obj = Minimize(sum_entries(exp(x)))
prob = Problem(obj, [sum_entries(x) == 1])
result = prob.solve(solver=SCS)
assert prob.solve(solver=SCS, verbose=True) == result
# TODO Probably a bad check. Ought to be the same.
assert prob.solve(solver=SCS, warm_start=True, verbose=True) != result
# def test_kl_div(self):
# """Test the kl_div atom.
# """
# self.assertEqual(kl_div(0, 0).value, 0)
# self.assertEqual(kl_div(1, 0).value, np.inf)
# self.assertEqual(kl_div(0, 1).value, np.inf)
# self.assertEqual(kl_div(-1, -1).value, np.inf)
| gpl-3.0 |
rectory-school/rectory-apps | detention_notifier/management/commands/send_detentions.py | 1 | 2557 | #!/usr/bin/python
import logging
from datetime import date, datetime
import time
import traceback
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from django.core.mail import send_mail
from django.utils import timezone
from detention_notifier.models import Detention, DetentionMailer, DetentionErrorNotification
from detention_notifier.email import get_message
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Send detentions"
def handle(self, *args, **kwargs):
logger.info("Beginning detention send routine")
detention_mailer = DetentionMailer.objects.get()
unsent_detentions = Detention.objects.filter(sent=False)
if detention_mailer.do_not_send_same_day_before:
# Convert the surpression time into a fully aware datetime object
# so that we can compare it to the current time, and, if needed,
# exclude detentions that were assigned today
today = date.today()
surpress_time = detention_mailer.do_not_send_same_day_before
nonaware_surpress_check = datetime(today.year, today.month, today.day, surpress_time.hour, surpress_time.minute)
surpress_check = timezone.get_current_timezone().localize(nonaware_surpress_check)
# It has not yet hit the time required to send today's detentions
if timezone.now() < surpress_check:
# Only include detentions *before* today
unsent_detentions = unsent_detentions.filter(detention_date__lt=date.today())
error_recipients = [o.address for o in DetentionErrorNotification.objects.filter(mailer=detention_mailer)]
for detention in unsent_detentions:
try:
message = get_message(detention)
message.send()
#Sleep for 1 second for SES rate limiting
#time.sleep(1)
detention.sent = True
detention.save()
except ValueError as e:
send_mail("Error sending detention", str(e), 'technology@rectoryschool.org', error_recipients)
continue
except Exception as e:
full_exception = traceback.format_exc()
send_mail("Detention mailer internal error", full_exception, 'technology@rectoryschool.org', error_recipients)
continue
| mit |
tastynoodle/django | tests/m2m_intermediary/models.py | 114 | 1262 | """
9. Many-to-many relationships via an intermediary table
For many-to-many relationships that need extra fields on the intermediary
table, use an intermediary model.
In this example, an ``Article`` can have multiple ``Reporter`` objects, and
each ``Article``-``Reporter`` combination (a ``Writer``) has a ``position``
field, which specifies the ``Reporter``'s position for the given article
(e.g. "Staff writer").
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Reporter(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateField()
def __str__(self):
return self.headline
@python_2_unicode_compatible
class Writer(models.Model):
reporter = models.ForeignKey(Reporter)
article = models.ForeignKey(Article)
position = models.CharField(max_length=100)
def __str__(self):
return '%s (%s)' % (self.reporter, self.position)
| bsd-3-clause |
zangree/ryu | ryu/services/protocols/bgp/info_base/ipv6.py | 47 | 2988 | # Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Defines data types and models required specifically for IPv4 support.
"""
import logging
from ryu.lib.packet.bgp import IPAddrPrefix
from ryu.lib.packet.bgp import RF_IPv6_UC
from ryu.services.protocols.bgp.info_base.base import Path
from ryu.services.protocols.bgp.info_base.base import Table
from ryu.services.protocols.bgp.info_base.base import Destination
from ryu.services.protocols.bgp.info_base.base import NonVrfPathProcessingMixin
from ryu.services.protocols.bgp.info_base.base import PrefixFilter
LOG = logging.getLogger('bgpspeaker.info_base.ipv6')
class IPv6Dest(Destination, NonVrfPathProcessingMixin):
"""v6 Destination
Store IPv6 Paths.
"""
ROUTE_FAMILY = RF_IPv6_UC
def _best_path_lost(self):
old_best_path = self._best_path
NonVrfPathProcessingMixin._best_path_lost(self)
self._core_service._signal_bus.best_path_changed(old_best_path, True)
def _new_best_path(self, best_path):
NonVrfPathProcessingMixin._new_best_path(self, best_path)
self._core_service._signal_bus.best_path_changed(best_path, False)
class Ipv6Table(Table):
"""Global table to store IPv4 routing information.
Uses `IPv6Dest` to store destination information for each known vpnv6
paths.
"""
ROUTE_FAMILY = RF_IPv6_UC
VPN_DEST_CLASS = IPv6Dest
def __init__(self, core_service, signal_bus):
super(Ipv6Table, self).__init__(None, core_service, signal_bus)
def _table_key(self, nlri):
"""Return a key that will uniquely identify this NLRI inside
this table.
"""
return nlri.prefix
def _create_dest(self, nlri):
return self.VPN_DEST_CLASS(self, nlri)
def __str__(self):
return '%s(scope_id: %s, rf: %s)' % (
self.__class__.__name__, self.scope_id, self.route_family
)
class Ipv6Path(Path):
"""Represents a way of reaching an v6 destination."""
ROUTE_FAMILY = RF_IPv6_UC
VRF_PATH_CLASS = None # defined in init - anti cyclic import hack
NLRI_CLASS = IPAddrPrefix
def __init__(self, *args, **kwargs):
super(Ipv6Path, self).__init__(*args, **kwargs)
from ryu.services.protocols.bgp.info_base.vrf6 import Vrf6Path
self.VRF_PATH_CLASS = Vrf6Path
class Ipv6PrefixFilter(PrefixFilter):
"""IPv6 Prefix Filter class"""
ROUTE_FAMILY = RF_IPv6_UC
| apache-2.0 |
tobspr/panda3d | direct/src/stdpy/file.py | 10 | 12313 | """ This module reimplements Python's file I/O mechanisms using Panda
constructs. This enables Python to interface more easily with Panda's
virtual file system, and it also better-supports Panda's
SIMPLE_THREADS model, by avoiding blocking all threads while waiting
for I/O to complete. """
__all__ = [
'open', 'listdir', 'walk', 'join',
'isfile', 'isdir', 'exists', 'lexists', 'getmtime', 'getsize',
'execfile',
]
from panda3d import core
import sys
import os
import io
import encodings
from posixpath import join
_vfs = core.VirtualFileSystem.getGlobalPtr()
if sys.version_info < (3, 0):
# Python 3 defines these subtypes of IOError, but Python 2 doesn't.
FileNotFoundError = IOError
IsADirectoryError = IOError
FileExistsError = IOError
PermissionError = IOError
unicodeType = unicode
strType = str
else:
unicodeType = str
strType = ()
def open(file, mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True):
if sys.version_info >= (3, 0):
# Python 3 is much stricter than Python 2, which lets
# unknown flags fall through.
for ch in mode:
if ch not in 'rwxabt+U':
raise ValueError("invalid mode: '%s'" % (mode))
creating = 'x' in mode
writing = 'w' in mode
appending = 'a' in mode
updating = '+' in mode
binary = 'b' in mode
universal = 'U' in mode
reading = universal or 'r' in mode
if binary and 't' in mode:
raise ValueError("can't have text and binary mode at once")
if creating + reading + writing + appending > 1:
raise ValueError("must have exactly one of create/read/write/append mode")
if binary:
if encoding:
raise ValueError("binary mode doesn't take an encoding argument")
if errors:
raise ValueError("binary mode doesn't take an errors argument")
if newline:
raise ValueError("binary mode doesn't take a newline argument")
if isinstance(file, core.Istream) or isinstance(file, core.Ostream):
# If we were given a stream instead of a filename, assign
# it directly.
raw = StreamIOWrapper(file)
raw.mode = mode
else:
vfile = None
if isinstance(file, core.VirtualFile):
# We can also "open" a VirtualFile object for reading.
vfile = file
filename = vfile.getFilename()
elif isinstance(file, unicodeType):
# If a raw string is given, assume it's an os-specific
# filename.
filename = core.Filename.fromOsSpecificW(file)
elif isinstance(file, strType):
filename = core.Filename.fromOsSpecific(file)
else:
# If a Filename is given, make a writable copy anyway.
filename = core.Filename(file)
if binary or sys.version_info >= (3, 0):
filename.setBinary()
else:
filename.setText()
if not vfile:
vfile = _vfs.getFile(filename)
if not vfile:
if reading:
raise FileNotFoundError("No such file or directory: '%s'" % (filename))
vfile = _vfs.createFile(filename)
if not vfile:
raise IOError("Failed to create file: '%s'" % (filename))
elif creating:
# In 'creating' mode, we have to raise FileExistsError
# if the file already exists. Otherwise, it's the same
# as 'writing' mode.
raise FileExistsError("File exists: '%s'" % (filename))
elif vfile.isDirectory():
raise IsADirectoryError("Is a directory: '%s'" % (filename))
# Actually open the streams.
if reading:
if updating:
stream = vfile.openReadWriteFile(False)
else:
stream = vfile.openReadFile(False)
if not stream:
raise IOError("Could not open %s for reading" % (filename))
elif writing or creating:
if updating:
stream = vfile.openReadWriteFile(True)
else:
stream = vfile.openWriteFile(False, True)
if not stream:
raise IOError("Could not open %s for writing" % (filename))
elif appending:
if updating:
stream = vfile.openReadAppendFile()
else:
stream = vfile.openAppendFile()
if not stream:
raise IOError("Could not open %s for appending" % (filename))
else:
raise ValueError("Must have exactly one of create/read/write/append mode and at most one plus")
raw = StreamIOWrapper(stream, needsVfsClose=True)
raw.mode = mode
raw.name = vfile.getFilename().toOsSpecific()
# If a binary stream was requested, return the stream we've created.
if binary:
return raw
# If we're in Python 2, we don't decode unicode strings by default.
if not encoding and sys.version_info < (3, 0):
return raw
line_buffering = False
if buffering == 1:
line_buffering = True
elif buffering == 0:
raise ValueError("can't have unbuffered text I/O")
# Otherwise, create a TextIOWrapper object to wrap it.
wrapper = io.TextIOWrapper(raw, encoding, errors, newline, line_buffering)
wrapper.mode = mode
return wrapper
if sys.version_info < (3, 0):
# Python 2 had an alias for open() called file().
__all__.append('file')
file = open
class StreamIOWrapper(io.IOBase):
""" This is a file-like object that wraps around a C++ istream and/or
ostream object. It only deals with binary data; to work with text I/O,
create an io.TextIOWrapper object around this, or use the open()
function that is also provided with this module. """
def __init__(self, stream, needsVfsClose=False):
self.__stream = stream
self.__needsVfsClose = needsVfsClose
self.__reader = None
self.__writer = None
self.__lastWrite = False
if isinstance(stream, core.Istream):
self.__reader = core.StreamReader(stream, False)
if isinstance(stream, core.Ostream):
self.__writer = core.StreamWriter(stream, False)
self.__lastWrite = True
if sys.version_info >= (3, 0):
# In Python 3, we use appendData, which only accepts bytes.
self.__write = self.__writer.appendData
else:
# In Python 2.7, we also accept unicode objects, which are
# implicitly converted to C++ strings.
self.__write = self.__writer.write
def __repr__(self):
s = "<direct.stdpy.file.StreamIOWrapper"
if hasattr(self, 'name'):
s += " name='%s'" % (self.name)
if hasattr(self, 'mode'):
s += " mode='%s'" % (self.mode)
s += ">"
return s
def readable(self):
return self.__reader is not None
def writable(self):
return self.__writer is not None
def close(self):
if self.__needsVfsClose:
if self.__reader and self.__writer:
_vfs.closeReadWriteFile(self.__stream)
elif self.__reader:
_vfs.closeReadFile(self.__stream)
else: # self.__writer:
_vfs.closeWriteFile(self.__stream)
self.__needsVfsClose = False
self.__stream = None
self.__reader = None
self.__writer = None
def flush(self):
if self.__writer:
self.__stream.clear() # clear eof flag
self.__stream.flush()
def read(self, size=-1):
if not self.__reader:
if not self.__writer:
# The stream is not even open at all.
raise ValueError("I/O operation on closed file")
# The stream is open only in write mode.
raise IOError("Attempt to read from write-only stream")
self.__stream.clear() # clear eof flag
self.__lastWrite = False
if size is not None and size >= 0:
result = self.__reader.extractBytes(size)
else:
# Read to end-of-file.
result = b''
while not self.__stream.eof():
result += self.__reader.extractBytes(512)
return result
read1 = read
def readline(self, size=-1):
if not self.__reader:
if not self.__writer:
# The stream is not even open at all.
raise ValueError("I/O operation on closed file")
# The stream is open only in write mode.
raise IOError("Attempt to read from write-only stream")
self.__stream.clear() # clear eof flag
self.__lastWrite = False
return self.__reader.readline()
def seek(self, offset, whence = 0):
if self.__stream:
self.__stream.clear() # clear eof flag
if self.__reader:
self.__stream.seekg(offset, whence)
if self.__writer:
self.__stream.seekp(offset, whence)
def tell(self):
if self.__lastWrite:
if self.__writer:
return self.__stream.tellp()
else:
if self.__reader:
return self.__stream.tellg()
raise ValueError("I/O operation on closed file")
def write(self, b):
if not self.__writer:
if not self.__reader:
# The stream is not even open at all.
raise ValueError("I/O operation on closed file")
# The stream is open only in read mode.
raise IOError("Attempt to write to read-only stream")
self.__stream.clear() # clear eof flag
self.__write(b)
self.__lastWrite = True
def writelines(self, lines):
if not self.__writer:
if not self.__reader:
# The stream is not even open at all.
raise ValueError("I/O operation on closed file")
# The stream is open only in read mode.
raise IOError("Attempt to write to read-only stream")
self.__stream.clear() # clear eof flag
for line in lines:
self.__write(line)
self.__lastWrite = True
def listdir(path):
""" Implements os.listdir over vfs. """
files = []
dirlist = _vfs.scanDirectory(core.Filename.fromOsSpecific(path))
if dirlist is None:
raise OSError("No such file or directory: '%s'" % (path))
for file in dirlist:
files.append(file.getFilename().getBasename())
return files
def walk(top, topdown = True, onerror = None, followlinks = True):
""" Implements os.walk over vfs.
Note: we don't support onerror or followlinks; errors are ignored
and links are always followed. """
dirnames = []
filenames = []
dirlist = _vfs.scanDirectory(top)
if dirlist:
for file in dirlist:
if file.isDirectory():
dirnames.append(file.getFilename().getBasename())
else:
filenames.append(file.getFilename().getBasename())
if topdown:
yield (top, dirnames, filenames)
for dir in dirnames:
next = join(top, dir)
for tuple in walk(next, topdown = topdown):
yield tuple
if not topdown:
yield (top, dirnames, filenames)
def isfile(path):
return _vfs.isRegularFile(core.Filename.fromOsSpecific(path))
def isdir(path):
return _vfs.isDirectory(core.Filename.fromOsSpecific(path))
def exists(path):
return _vfs.exists(core.Filename.fromOsSpecific(path))
def lexists(path):
return _vfs.exists(core.Filename.fromOsSpecific(path))
def getmtime(path):
file = _vfs.getFile(core.Filename.fromOsSpecific(path), True)
if not file:
raise os.error
return file.getTimestamp()
def getsize(path):
file = _vfs.getFile(core.Filename.fromOsSpecific(path), True)
if not file:
raise os.error
return file.getFileSize()
def execfile(path, globals=None, locals=None):
file = _vfs.getFile(core.Filename.fromOsSpecific(path), True)
if not file:
raise os.error
data = file.readFile(False)
exec(data, globals, locals)
| bsd-3-clause |
ilovezy/three.js | utils/exporters/blender/addons/io_three/exporter/api/light.py | 195 | 1099 | from bpy import data, types
from .. import utilities, logger
def _lamp(func):
"""
:param func:
"""
def inner(name, *args, **kwargs):
"""
:param name:
:param *args:
:param **kwargs:
"""
if isinstance(name, types.Lamp):
lamp = name
else:
lamp = data.lamps[name]
return func(lamp, *args, **kwargs)
return inner
@_lamp
def angle(lamp):
"""
:param lamp:
:rtype: float
"""
logger.debug("light.angle(%s)", lamp)
return lamp.spot_size
@_lamp
def color(lamp):
"""
:param lamp:
:rtype: int
"""
logger.debug("light.color(%s)", lamp)
colour = (lamp.color.r, lamp.color.g, lamp.color.b)
return utilities.rgb2int(colour)
@_lamp
def distance(lamp):
"""
:param lamp:
:rtype: float
"""
logger.debug("light.distance(%s)", lamp)
return lamp.distance
@_lamp
def intensity(lamp):
"""
:param lamp:
:rtype: float
"""
logger.debug("light.intensity(%s)", lamp)
return round(lamp.energy, 2)
| mit |
martinwicke/tensorflow | tensorflow/tools/quantization/quantize_graph.py | 6 | 55970 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Transforms a float-trained graph into an equivalent quantized version.
An example of command-line usage is:
bazel build tensorflow/tools/quantization:quantize_graph \
&& bazel-bin/tensorflow/tools/quantization/quantize_graph \
--input=tensorflow_inception_graph.pb
--output_node_names="softmax2" --print_nodes --output=/tmp/quantized_graph.pb \
--mode=eightbit --logtostderr
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_boolean("print_nodes", False, """Lists all nodes in the model.""")
flags.DEFINE_string("input", "", """TensorFlow 'GraphDef' file to load.""")
flags.DEFINE_string("output_node_names", "",
"""Output node names, comma separated.""")
flags.DEFINE_string("output", "", """File to save the output graph to.""")
flags.DEFINE_integer("bitdepth", 8,
"""How many bits to quantize the graph to.""")
flags.DEFINE_string("mode", "round",
"""What transformation to apply (round, quantize,"""
""" eightbit, weights, or weights_rounded).""")
flags.DEFINE_string("test_input_dims", "1,224,224,3",
"""The size of the input tensor to use when testing a"""
""" graph loaded from a file.""")
flags.DEFINE_boolean("strip_redundant_quantization", True,
"""Removes redundant dequantize/quantize pairs.""")
flags.DEFINE_boolean("quantized_input", False,
"If true, assume Placeholders are quantized with values "
"covering [--quantized_input_min,--quantized_input_max]. "
"Only supported when --mode=eightbit")
flags.DEFINE_float("quantized_input_min", 0,
"The minimum of the actual input range when "
"--quantized_input")
flags.DEFINE_float("quantized_input_max", 1,
"The maximum of the actual input range when "
"--quantized_input")
flags.DEFINE_float(
"quantized_fallback_min", None,
"The fallback 'min' value to use for layers which lack min-max "
"information. Note: this should be considered a coarse tool just good "
"enough for experimentation purposes, since graphs quantized in this way "
"would be very inaccurate.")
flags.DEFINE_float(
"quantized_fallback_max", None,
"The fallback 'max' value to use for layers which lack min-max "
"information. Note: this should be considered a coarse tool just good "
"enough for experimentation purposes, since graphs quantized in this way "
"would be very inaccurate.")
def print_input_nodes(current_node, nodes_map, indent, already_visited):
print(" " * indent + current_node.op + ":" + current_node.name)
already_visited[current_node.name] = True
for input_node_name in current_node.input:
if input_node_name in already_visited:
continue
input_node = nodes_map[input_node_name]
print_input_nodes(input_node, nodes_map, indent + 1, already_visited)
def create_node(op, name, inputs):
new_node = tf.NodeDef()
new_node.op = op
new_node.name = name
for input_name in inputs:
new_node.input.extend([input_name])
return new_node
def create_constant_node(name, value, dtype, shape=None):
node = create_node("Const", name, [])
set_attr_dtype(node, "dtype", dtype)
set_attr_tensor(node, "value", value, dtype, shape)
return node
def copy_attr(node, key, attr_value):
try:
node.attr[key].CopyFrom(attr_value)
except KeyError:
pass
def set_attr_dtype(node, key, value):
try:
node.attr[key].CopyFrom(tf.AttrValue(type=value.as_datatype_enum))
except KeyError:
pass
def set_attr_shape(node, key, value):
try:
node.attr[key].CopyFrom(
tf.AttrValue(shape=tensor_shape.as_shape(value).as_proto()))
except KeyError:
pass
def set_attr_tensor(node, key, value, dtype, shape=None):
try:
node.attr[key].CopyFrom(tf.AttrValue(
tensor=tensor_util.make_tensor_proto(value,
dtype=dtype,
shape=shape)))
except KeyError:
pass
def set_attr_string(node, key, value):
try:
node.attr[key].CopyFrom(tf.AttrValue(s=value))
except KeyError:
pass
def set_attr_int_list(node, key, value):
list_value = tf.AttrValue.ListValue(i=value)
try:
node.attr[key].CopyFrom(tf.AttrValue(list=list_value))
except KeyError:
pass
def set_attr_bool(node, key, value):
try:
node.attr[key].CopyFrom(tf.AttrValue(b=value))
except KeyError:
pass
def set_attr_int(node, key, value):
try:
node.attr[key].CopyFrom(tf.AttrValue(i=value))
except KeyError:
pass
def set_attr_float(node, key, value):
try:
node.attr[key].CopyFrom(tf.AttrValue(f=value))
except KeyError:
pass
def node_name_from_input(node_name):
"""Strips off ports and other decorations to get the underlying node name."""
if node_name.startswith("^"):
node_name = node_name[1:]
m = re.search(r"(.*):\d+$", node_name)
if m:
node_name = m.group(1)
return node_name
def ensure_tensor_name_has_port(node_name):
"""Makes sure that a tensor name has :0 if no explicit port exists."""
m = re.search(r"(.*):\d+$", node_name)
if m:
name_with_port = node_name
else:
name_with_port = node_name + ":0"
return name_with_port
def unique_node_name_from_input(node_name):
"""Replaces invalid characters in input names to get a unique node name."""
return node_name.replace(":", "__port__").replace("^", "__hat__")
def quantize_array(arr, num_buckets):
"""Quantizes a numpy array.
This function maps each scalar in arr to the center of one of num_buckets
buckets. For instance,
quantize_array([0, 0.3, 0.6, 1], 2) => [0.25, 0.25, 0.75, 0.75]
Args:
arr: The numpy array to quantize.
num_buckets: The number of buckets to map "var" to.
Returns:
The quantized numpy array.
Raises:
ValueError: when num_buckets < 1.
"""
if num_buckets < 1:
raise ValueError("num_buckets must be >= 1")
arr_max = arr.max()
arr_min = arr.min()
if arr_max == arr_min:
return arr
bucket_width = (arr_max - arr_min) / num_buckets
# Map scalars to bucket indices. Take special care of max(arr).
bucket_indices = np.floor((arr - arr_min) / bucket_width)
bucket_indices[bucket_indices == num_buckets] = num_buckets - 1
# Map each scalar to the center of a bucket.
arr = arr_min + bucket_width * (bucket_indices + 0.5)
return arr
def quantize_weight_rounded(input_node):
"""Returns a replacement node for input_node containing bucketed floats."""
input_tensor = input_node.attr["value"].tensor
tensor_value = tensor_util.MakeNdarray(input_tensor)
shape = input_tensor.tensor_shape
# Currently, the parameter FLAGS.bitdepth is used to compute the
# number of buckets as 1 << FLAGS.bitdepth, meaning the number of
# buckets can only be a power of 2.
# This could be fixed by introducing a new parameter, num_buckets,
# which would allow for more flexibility in chosing the right model
# size/accuracy tradeoff. But I didn't want to add more parameters
# to this script than absolutely necessary.
num_buckets = 1 << FLAGS.bitdepth
tensor_value_rounded = quantize_array(tensor_value, num_buckets)
tensor_shape_list = tensor_util.TensorShapeProtoToList(shape)
return [create_constant_node(input_node.name, tensor_value_rounded,
tf.float32, shape=tensor_shape_list)]
def quantize_weight_eightbit(input_node, quantization_mode):
"""Returns replacement nodes for input_node using the Dequantize op."""
base_name = input_node.name + "_"
quint8_const_name = base_name + "quint8_const"
min_name = base_name + "min"
max_name = base_name + "max"
float_tensor = tensor_util.MakeNdarray(
input_node.attr["value"].tensor)
min_value = np.min(float_tensor.flatten())
max_value = np.max(float_tensor.flatten())
# min_value == max_value is a tricky case. It can occur for general
# tensors, and of course for scalars. The quantized ops cannot deal
# with this case, so we set max_value to something else.
# It's a tricky question what is the numerically best solution to
# deal with this degeneracy.
# TODO(petewarden): Better use a tolerance than a hard comparison?
if min_value == max_value:
if abs(min_value) < 0.000001:
max_value = min_value + 1.0
elif min_value > 0:
max_value = 2 * min_value
else:
max_value = min_value / 2.0
sess = tf.Session()
with sess.as_default():
quantize_op = tf.contrib.quantization.python.quantize_v2(
float_tensor,
min_value,
max_value,
tf.quint8,
mode=quantization_mode)
quint8_tensor = quantize_op[0].eval()
shape = tensor_util.TensorShapeProtoToList(input_node.attr[
"value"].tensor.tensor_shape)
quint8_const_node = create_constant_node(quint8_const_name,
quint8_tensor,
tf.quint8,
shape=shape)
min_node = create_constant_node(min_name, min_value, tf.float32)
max_node = create_constant_node(max_name, max_value, tf.float32)
dequantize_node = create_node("Dequantize", input_node.name,
[quint8_const_name, min_name, max_name])
set_attr_dtype(dequantize_node, "T", tf.quint8)
set_attr_string(dequantize_node, "mode", quantization_mode)
return [quint8_const_node, min_node, max_node, dequantize_node]
EightbitizeRecursionState = collections.namedtuple(
"EightbitizeRecursionState", ["already_visited", "output_node_stack",
"merged_with_fake_quant"])
class GraphRewriter(object):
"""Takes a float graph, and rewrites it in quantized form."""
def __init__(self, input_graph, mode, quantized_input_range,
fallback_quantization_range=None):
"""Sets up the class to rewrite a float graph.
Args:
input_graph: A float graph to transform.
mode: A string controlling how quantization is performed -
round, quantize, eightbit, or weights.
quantized_input_range: if set, assume the input is
quantized and represents the range
[quantized_input_range[0], quantized_input_range[1]]
fallback_quantization_range: if set, then for nodes where the quantization
range can't be inferred from the graph, use the range
[fallback_quantization_range[0], fallback_quantization_range[1]) instead
of using a RequantizationRange node in the graph.
Raises:
ValueError: Two nodes with the same name were found in the graph.
"""
self.input_graph = input_graph
self.nodes_map = self.create_nodes_map(input_graph)
self.output_graph = None
self.mode = mode
self.final_node_renames = {}
if quantized_input_range:
self.input_range = (quantized_input_range[0], quantized_input_range[1])
if self.input_range[0] >= self.input_range[1]:
raise ValueError("Invalid quantized_input_range: [%s,%s]" %
self.input_range)
if self.mode != "eightbit":
raise ValueError(
"quantized_input_range can only be specified in eightbit mode")
else:
self.input_range = None
if fallback_quantization_range:
self.fallback_quantization_range = [fallback_quantization_range[0],
fallback_quantization_range[1]]
if (self.fallback_quantization_range[0] >=
self.fallback_quantization_range[1]):
raise ValueError("Invalid fallback_quantization_range: [%s,%s]" %
self.fallback_quantization_range)
if self.mode != "eightbit":
raise ValueError(
"fallback_quantization_range can only be "
"specified in eightbit mode")
else:
self.fallback_quantization_range = None
# Data that is valid only during the recursive call to rewrite the graph.
self.state = None
def create_nodes_map(self, graph):
"""Builds a mapping of node names to their defs from the graph."""
nodes_map = {}
for node in graph.node:
if node.name not in nodes_map.keys():
nodes_map[node.name] = node
else:
raise ValueError("Duplicate node names detected.")
return nodes_map
def rewrite(self, output_node_names):
"""Triggers rewriting of the float graph.
Args:
output_node_names: A list of names of the nodes that produce the final
results.
Returns:
A quantized version of the float graph.
"""
self.output_graph = tf.GraphDef()
output_nodes = [self.nodes_map[output_node_name]
for output_node_name in output_node_names]
if self.mode == "round":
self.already_visited = {}
for output_node in output_nodes:
self.round_nodes_recursively(output_node)
elif self.mode == "quantize":
self.already_visited = {}
self.already_quantized = {}
for output_node in output_nodes:
self.quantize_nodes_recursively(output_node)
elif self.mode == "eightbit":
self.set_input_graph(graph_util.remove_training_nodes(self.input_graph))
output_nodes = [self.nodes_map[output_node_name]
for output_node_name in output_node_names]
self.state = EightbitizeRecursionState(already_visited={},
output_node_stack=[],
merged_with_fake_quant={})
for output_node in output_nodes:
self.eightbitize_nodes_recursively(output_node)
self.state = None
if self.input_range:
self.add_output_graph_node(create_constant_node(
"quantized_input_min_value", self.input_range[0], tf.float32, []))
self.add_output_graph_node(create_constant_node(
"quantized_input_max_value", self.input_range[1], tf.float32, []))
if self.fallback_quantization_range:
self.add_output_graph_node(create_constant_node(
"fallback_quantization_min_value",
self.fallback_quantization_range[0], tf.float32, []))
self.add_output_graph_node(create_constant_node(
"fallback_quantization_max_value",
self.fallback_quantization_range[1], tf.float32, []))
if FLAGS.strip_redundant_quantization:
self.output_graph = self.remove_redundant_quantization(
self.output_graph)
self.remove_dead_nodes(output_node_names)
self.apply_final_node_renames()
elif self.mode == "weights":
self.output_graph = self.quantize_weights(self.input_graph,
b"MIN_COMBINED")
self.remove_dead_nodes(output_node_names)
elif self.mode == "weights_rounded":
self.output_graph = self.quantize_weights(self.input_graph, self.mode)
self.remove_dead_nodes(output_node_names)
else:
print("Bad mode - " + self.mode + ".")
return self.output_graph
def round_nodes_recursively(self, current_node):
"""The entry point for simple rounding quantization."""
if self.already_visited[current_node.name]:
return
self.already_visited[current_node.name] = True
for input_node_name in current_node.input:
input_node_name = node_name_from_input(input_node_name)
input_node = self.nodes_map[input_node_name]
self.round_nodes_recursively(input_node)
nodes_to_quantize = ["Conv2D", "BiasAdd", "MatMul"]
if any(current_node.op in s for s in nodes_to_quantize):
new_node = tf.NodeDef()
new_node.CopyFrom(current_node)
new_node.name = current_node.name + "_original"
self.add_output_graph_node(new_node)
levels = 1 << FLAGS.bitdepth
constant_name = current_node.name + "_round_depth"
constant_tensor = tf.constant(levels, dtype=tf.int32, name=constant_name)
constant_node = constant_tensor.op.node_def
self.add_output_graph_node(constant_node)
quantize_node = tf.NodeDef()
quantize_node.op = "RoundToSteps"
quantize_node.name = current_node.name
quantize_node.input.extend([current_node.name + "_original"])
quantize_node.input.extend([constant_node.name])
self.add_output_graph_node(quantize_node)
else:
new_node = tf.NodeDef()
new_node.CopyFrom(current_node)
self.add_output_graph_node(new_node)
def quantize_nodes_recursively(self, current_node):
"""The entry point for quantizing nodes to eight bit and back."""
if self.already_visited[current_node.name]:
return
self.already_visited[current_node.name] = True
for input_node_name in current_node.input:
input_node_name = node_name_from_input(input_node_name)
input_node = self.nodes_map[input_node_name]
self.quantize_nodes_recursively(input_node)
nodes_to_quantize = ["Conv2D", "BiasAdd", "MatMul"]
if any(current_node.op in s for s in nodes_to_quantize):
for input_name in current_node.input:
input_name = node_name_from_input(input_name)
input_node = self.nodes_map[input_name]
self.quantize_node(input_node)
self.quantize_node(current_node)
else:
new_node = tf.NodeDef()
new_node.CopyFrom(current_node)
self.add_output_graph_node(new_node)
def quantize_node(self, input_node):
"""Handles quantizing a single node."""
input_name = input_node.name
if input_name in self.already_quantized:
return
self.already_quantized[input_name] = True
original_input_name = input_name + "_original"
reshape_name = input_name + "_reshape"
reshape_dims_name = input_name + "_reshape_dims"
max_name = input_name + "_max"
min_name = input_name + "_min"
dims_name = input_name + "_dims"
quantize_name = input_name + "_quantize"
dequantize_name = input_name
original_input_node = tf.NodeDef()
original_input_node.CopyFrom(input_node)
original_input_node.name = original_input_name
self.add_output_graph_node(original_input_node)
reshape_dims_node = create_constant_node(reshape_dims_name, -1, tf.int32,
[1])
self.add_output_graph_node(reshape_dims_node)
reshape_node = create_node("Reshape", reshape_name, [original_input_name,
reshape_dims_name])
set_attr_dtype(reshape_node, "T", tf.float32)
self.add_output_graph_node(reshape_node)
dims_node = create_constant_node(dims_name, 0, tf.int32, [1])
self.add_output_graph_node(dims_node)
max_node = create_node("Max", max_name, [reshape_name, dims_name])
set_attr_dtype(max_node, "T", tf.float32)
set_attr_bool(max_node, "keep_dims", False)
self.add_output_graph_node(max_node)
min_node = create_node("Min", min_name, [reshape_name, dims_name])
set_attr_dtype(min_node, "T", tf.float32)
set_attr_bool(min_node, "keep_dims", False)
self.add_output_graph_node(min_node)
quantize_node = create_node("Quantize", quantize_name, [original_input_name,
min_name, max_name])
set_attr_dtype(quantize_node, "T", tf.quint8)
set_attr_string(quantize_node, "mode", b"MIN_FIRST")
self.add_output_graph_node(quantize_node)
dequantize_node = create_node("Dequantize", dequantize_name,
[quantize_name, min_name, max_name])
set_attr_dtype(dequantize_node, "T", tf.quint8)
set_attr_string(dequantize_node, "mode", b"MIN_FIRST")
self.add_output_graph_node(dequantize_node)
def should_merge_with_fake_quant_node(self):
"""Should the current node merge with self.state.output_node_stack[-1]?"""
if not self.state.output_node_stack: return False
top = self.state.output_node_stack[-1]
return top[1] == 0 and top[0].op in ["FakeQuantWithMinMaxVars"]
def should_quantize_const(self, node):
if not self.state.output_node_stack: return False
top = self.state.output_node_stack[-1]
if not top[2]: return False
dtype = tf.as_dtype(node.attr["dtype"].type)
assert dtype == tf.float32, (
"Failed to quantized constant %s of type %s" % (node.name, dtype))
return True
def eightbitize_nodes_recursively(self, current_node):
"""The entry point for transforming a graph into full eight bit."""
if current_node.name in self.state.already_visited:
if (self.should_merge_with_fake_quant_node() or
current_node.name in self.state.merged_with_fake_quant):
raise ValueError("Unsupported graph structure: output of node %s "
"is processed by a FakeQuant* node and should have "
"no other outputs.", current_node.name)
return
self.state.already_visited[current_node.name] = True
for i, input_node_name in enumerate(current_node.input):
quantize_input = False
if current_node.op in ("MatMul", "Conv2D", "BiasAdd", "MaxPool",
"AvgPool", "Relu", "Relu6",
"BatchNormWithGlobalNormalization"):
quantize_input = True
elif current_node.op == "Concat" and i > 0:
quantize_input = (
tf.as_dtype(current_node.attr["T"].type) == tf.float32)
elif current_node.op == "Reshape" and i == 0:
quantize_input = (
tf.as_dtype(current_node.attr["T"].type) == tf.float32)
self.state.output_node_stack.append((current_node, i, quantize_input))
input_node_name = node_name_from_input(input_node_name)
input_node = self.nodes_map[input_node_name]
self.eightbitize_nodes_recursively(input_node)
self.state.output_node_stack.pop()
if current_node.op == "MatMul":
self.eightbitize_mat_mul_node(current_node)
elif current_node.op == "Conv2D":
self.eightbitize_conv_node(current_node)
elif current_node.op == "BiasAdd":
self.eightbitize_bias_add_node(current_node)
elif current_node.op == "MaxPool" or current_node.op == "AvgPool":
self.eightbitize_single_input_tensor_node(current_node,
self.add_pool_function)
elif current_node.op == "Relu" or current_node.op == "Relu6":
self.eightbitize_single_input_tensor_node(current_node,
self.add_relu_function)
elif (current_node.op == "Concat" and
tf.as_dtype(current_node.attr["T"].type) == tf.float32):
self.eightbitize_concat_node(current_node)
elif current_node.op == "BatchNormWithGlobalNormalization":
self.eightbitize_batch_norm_node(current_node)
elif (current_node.op == "Reshape" and
tf.as_dtype(current_node.attr["T"].type) == tf.float32):
self.eightbitize_reshape_node(current_node)
elif (self.input_range and
current_node.op in ("Placeholder", "PlaceholderV2")):
self.eightbitize_placeholder_node(current_node)
elif current_node.op == "FakeQuantWithMinMaxVars":
# It will have been merged into the underlying node.
pass
elif current_node.op == "Const":
if self.should_quantize_const(current_node):
for n in quantize_weight_eightbit(current_node, b"MIN_FIRST"):
self.add_output_graph_node(n)
else:
new_node = tf.NodeDef()
new_node.CopyFrom(current_node)
self.add_output_graph_node(new_node)
###################################################################
# Note: if more cases are added here, you may need to update the op
# name lists in the loop over children at the start of the function.
###################################################################
else:
new_node = tf.NodeDef()
new_node.CopyFrom(current_node)
self.add_output_graph_node(new_node)
if (self.should_merge_with_fake_quant_node() and
current_node.name not in self.state.merged_with_fake_quant):
raise ValueError(
"FakeQuant* node %s failed to merge with node %s of type %s" % (
self.state.output_node_stack[-1][0], current_node.name,
current_node.op))
def add_eightbit_prologue_nodes(self, original_node):
"""Adds input conversion nodes to handle quantizing the underlying node."""
namespace_prefix = original_node.name + "_eightbit"
reshape_dims_name, reduction_dims_name = self.add_common_quantization_nodes(
namespace_prefix)
input_names = []
min_max_names = []
for original_input_name in original_node.input:
quantize_input_name, min_input_name, max_input_name = (
self.eightbitize_input_to_node(namespace_prefix, original_input_name,
reshape_dims_name,
reduction_dims_name))
input_names.append(quantize_input_name)
min_max_names.append(min_input_name)
min_max_names.append(max_input_name)
all_input_names = []
all_input_names.extend(input_names)
all_input_names.extend(min_max_names)
return all_input_names
def add_common_quantization_nodes(self, namespace_prefix):
"""Builds constant nodes needed for quantization of inputs."""
reshape_dims_name = namespace_prefix + "_reshape_dims"
reduction_dims_name = namespace_prefix + "_reduction_dims"
reshape_dims_node = create_constant_node(reshape_dims_name, -1, tf.int32,
[1])
self.add_output_graph_node(reshape_dims_node)
reduction_dims_node = create_constant_node(reduction_dims_name, 0, tf.int32,
[1])
self.add_output_graph_node(reduction_dims_node)
return reshape_dims_name, reduction_dims_name
def eightbitize_input_to_node(self, namespace_prefix, original_input_name,
reshape_dims_name, reduction_dims_name):
"""Takes one float input to an op, and converts it to quantized form."""
unique_input_name = unique_node_name_from_input(original_input_name)
reshape_input_name = namespace_prefix + "_reshape_" + unique_input_name
min_input_name = namespace_prefix + "_min_" + unique_input_name
max_input_name = namespace_prefix + "_max_" + unique_input_name
quantize_input_name = namespace_prefix + "_quantize_" + unique_input_name
reshape_input_node = create_node("Reshape", reshape_input_name,
[original_input_name, reshape_dims_name])
set_attr_dtype(reshape_input_node, "T", tf.float32)
self.add_output_graph_node(reshape_input_node)
min_input_node = create_node("Min", min_input_name, [reshape_input_name,
reduction_dims_name])
set_attr_dtype(min_input_node, "T", tf.float32)
set_attr_bool(min_input_node, "keep_dims", False)
self.add_output_graph_node(min_input_node)
max_input_node = create_node("Max", max_input_name, [reshape_input_name,
reduction_dims_name])
set_attr_dtype(max_input_node, "T", tf.float32)
set_attr_bool(max_input_node, "keep_dims", False)
self.add_output_graph_node(max_input_node)
quantize_input_node = create_node("QuantizeV2", quantize_input_name,
[original_input_name, min_input_name,
max_input_name])
set_attr_dtype(quantize_input_node, "T", tf.quint8)
set_attr_string(quantize_input_node, "mode", b"MIN_FIRST")
self.add_output_graph_node(quantize_input_node)
min_output_name = quantize_input_name + ":1"
max_output_name = quantize_input_name + ":2"
return quantize_input_name, min_output_name, max_output_name
def add_quantize_down_nodes(self, original_node, quantized_output_name):
quantized_outputs = [
quantized_output_name, quantized_output_name + ":1",
quantized_output_name + ":2"
]
min_max_inputs = None
if self.should_merge_with_fake_quant_node():
# Use the inputs to the FakeQuantWithMinMaxVars node as the inputs to
# Requantize.
fake_quant_node = self.state.output_node_stack[-1][0]
min_max_inputs = [fake_quant_node.input[1], fake_quant_node.input[2]]
assert original_node.name not in self.state.merged_with_fake_quant
self.state.merged_with_fake_quant[original_node.name] = True
elif self.fallback_quantization_range:
min_max_inputs = ["fallback_quantization_min_value:0",
"fallback_quantization_max_value:0"]
else:
# Add a RequantizationRange node for finding the min and max values.
requant_range_node = create_node(
"RequantizationRange", original_node.name + "_eightbit_requant_range",
quantized_outputs)
set_attr_dtype(requant_range_node, "Tinput", tf.qint32)
self.add_output_graph_node(requant_range_node)
min_max_inputs = [requant_range_node.name + ":0",
requant_range_node.name + ":1"]
requantize_node = create_node(
"Requantize", original_node.name + "_eightbit_requantize",
quantized_outputs + min_max_inputs)
set_attr_dtype(requantize_node, "Tinput", tf.qint32)
set_attr_dtype(requantize_node, "out_type", tf.quint8)
self.add_output_graph_node(requantize_node)
return requantize_node.name
def add_dequantize_result_node(self, quantized_output_name,
original_node_name, min_tensor_index=1):
min_max_inputs = [
"%s:%s" % (quantized_output_name, min_tensor_index),
"%s:%s" % (quantized_output_name, (min_tensor_index + 1))]
dequantize_name = original_node_name
if self.should_merge_with_fake_quant_node():
fake_quant_node = self.state.output_node_stack[-1][0]
if original_node_name not in self.state.merged_with_fake_quant:
min_max_inputs = [fake_quant_node.input[1], fake_quant_node.input[2]]
self.state.merged_with_fake_quant[original_node_name] = True
dequantize_name = fake_quant_node.name
dequantize_node = create_node(
"Dequantize", dequantize_name,
[quantized_output_name, min_max_inputs[0], min_max_inputs[1]])
set_attr_dtype(dequantize_node, "T", tf.quint8)
set_attr_string(dequantize_node, "mode", b"MIN_FIRST")
self.add_output_graph_node(dequantize_node)
def eightbitize_mat_mul_node(self, original_node):
"""Replaces a MatMul node with the eight bit equivalent sub-graph."""
quantized_mat_mul_name = original_node.name + "_eightbit_quantized_mat_mul"
all_input_names = self.add_eightbit_prologue_nodes(original_node)
quantized_mat_mul_node = create_node(
"QuantizedMatMul", quantized_mat_mul_name,
all_input_names)
set_attr_dtype(quantized_mat_mul_node, "T1", tf.quint8)
set_attr_dtype(quantized_mat_mul_node, "T2", tf.quint8)
set_attr_dtype(quantized_mat_mul_node, "Toutput", tf.qint32)
copy_attr(quantized_mat_mul_node, "transpose_a",
original_node.attr["transpose_a"])
copy_attr(quantized_mat_mul_node, "transpose_b",
original_node.attr["transpose_b"])
self.add_output_graph_node(quantized_mat_mul_node)
quantize_down_name = self.add_quantize_down_nodes(original_node,
quantized_mat_mul_name)
self.add_dequantize_result_node(quantize_down_name, original_node.name)
def eightbitize_conv_node(self, original_node):
"""Replaces a Conv2D node with the eight bit equivalent sub-graph."""
all_input_names = self.add_eightbit_prologue_nodes(original_node)
quantized_conv_name = original_node.name + "_eightbit_quantized_conv"
quantized_conv_node = create_node("QuantizedConv2D", quantized_conv_name,
all_input_names)
copy_attr(quantized_conv_node, "strides", original_node.attr["strides"])
copy_attr(quantized_conv_node, "padding", original_node.attr["padding"])
set_attr_dtype(quantized_conv_node, "Tinput", tf.quint8)
set_attr_dtype(quantized_conv_node, "Tfilter", tf.quint8)
set_attr_dtype(quantized_conv_node, "out_type", tf.qint32)
self.add_output_graph_node(quantized_conv_node)
quantize_down_name = self.add_quantize_down_nodes(original_node,
quantized_conv_name)
self.add_dequantize_result_node(quantize_down_name, original_node.name)
def eightbitize_bias_add_node(self, original_node):
"""Replaces a BiasAdd node with the eight bit equivalent sub-graph."""
quantized_bias_add_name = (original_node.name +
"_eightbit_quantized_bias_add")
all_input_names = self.add_eightbit_prologue_nodes(original_node)
quantized_bias_add_node = create_node(
"QuantizedBiasAdd", quantized_bias_add_name,
all_input_names)
set_attr_dtype(quantized_bias_add_node, "T1", tf.quint8)
set_attr_dtype(quantized_bias_add_node, "T2", tf.quint8)
set_attr_dtype(quantized_bias_add_node, "out_type", tf.qint32)
self.add_output_graph_node(quantized_bias_add_node)
quantize_down_name = self.add_quantize_down_nodes(original_node,
quantized_bias_add_name)
self.add_dequantize_result_node(quantize_down_name, original_node.name)
def eightbitize_single_input_tensor_node(self, original_node,
add_op_function):
"""Replaces a single-tensor node with the eight bit equivalent sub-graph.
Converts a node like this:
Shape(f) Input(f)
| |
+--------v v
Operation
|
v
(f)
Into a quantized equivalent:
Input(f) ReshapeDims
+------v v-------------+
| Reshape
| |
| | ReductionDims
| +-----+ |
| | +---c---------+
| v v v v-------+
| Min Max
| +----+ |
v v v--------+
Quantize
|
v
QuantizedOperation
| | |
v v v
Dequantize
|
v
(f)
Args:
original_node: Float node to be converted.
add_op_function: Function to create the actual node.
Returns:
Subgraph representing the quantized version of the original node.
"""
quantized_op_name = original_node.name + "_eightbit_quantized"
quantized_op_type = "Quantized" + original_node.op
all_input_names = self.add_eightbit_prologue_nodes(original_node)
quantized_op_node = create_node(
quantized_op_type, quantized_op_name, all_input_names)
add_op_function(original_node, quantized_op_node)
self.add_output_graph_node(quantized_op_node)
self.add_dequantize_result_node(quantized_op_name, original_node.name)
def add_pool_function(self, original_node, quantized_op_node):
set_attr_dtype(quantized_op_node, "T", tf.quint8)
copy_attr(quantized_op_node, "ksize", original_node.attr["ksize"])
copy_attr(quantized_op_node, "strides", original_node.attr["strides"])
copy_attr(quantized_op_node, "padding", original_node.attr["padding"])
def add_relu_function(self, unused_arg_node, quantized_op_node):
set_attr_dtype(quantized_op_node, "Tinput", tf.quint8)
def eightbitize_concat_node(self, original_node):
"""Replaces a Concat node with the eight bit equivalent sub-graph.
Converts a node like this:
Shape(f) Input0(f) Input1(f)
| | |
+--------v v v----------+
Concat
|
v
(f)
Into a quantized equivalent:
Shape(f) Input0(f) ReshapeDims Input1(f)
| +------v v--------------+------------------v v------+
| | Reshape Reshape |
| | | | |
| | | ReductionDims | |
| | +------+ | +--------+ |
| | | +---c---------+-----------c-----+ | |
| | +v v v v-------+---------v v v v+ |
| | Min Max Min Max |
| | +----+ | | +-----+ |
| v v v--------+ +----------v v v
| Quantize Quantize
| +------------------+ +----------------------+
+-------------------------------+ | |
v v v
QuantizedConcat
| | |
v v v
Dequantize
|
v
(f)
Args:
original_node: Float node to be converted.
Returns:
Subgraph representing the quantized version of the original node.
"""
namespace_prefix = original_node.name + "_eightbit"
quantized_concat_name = namespace_prefix + "_quantized_concat"
reshape_dims_name, reduction_dims_name = self.add_common_quantization_nodes(
namespace_prefix)
shape_input_name = original_node.input[0]
original_inputs = original_node.input[1:]
input_names = []
min_names = []
max_names = []
for original_input_name in original_inputs:
quantize_input_name, min_input_name, max_input_name = (
self.eightbitize_input_to_node(namespace_prefix, original_input_name,
reshape_dims_name,
reduction_dims_name))
input_names.append(quantize_input_name)
min_names.append(min_input_name)
max_names.append(max_input_name)
all_input_names = [shape_input_name]
all_input_names.extend(input_names)
all_input_names.extend(min_names)
all_input_names.extend(max_names)
quantized_concat_node = create_node(
"QuantizedConcat", quantized_concat_name, all_input_names)
set_attr_int(quantized_concat_node, "N", len(original_inputs))
set_attr_dtype(quantized_concat_node, "T", tf.quint8)
self.add_output_graph_node(quantized_concat_node)
self.add_dequantize_result_node(quantized_concat_name, original_node.name)
def eightbitize_placeholder_node(self, current_node):
"""Replaces a placeholder node with a quint8 placeholder node+dequantize."""
name = current_node.name
# Convert the placeholder into a quantized type.
output_node = tf.NodeDef()
output_node.CopyFrom(current_node)
set_attr_dtype(output_node, "dtype", tf.quint8)
output_node.name += "_original_input"
self.add_output_graph_node(output_node)
# Add a dequantize to convert back to float.
dequantize_node = create_node(
"Dequantize", name,
[output_node.name, "quantized_input_min_value",
"quantized_input_max_value"])
set_attr_dtype(dequantize_node, "T", tf.quint8)
set_attr_string(dequantize_node, "mode", b"MIN_FIRST")
self.add_output_graph_node(dequantize_node)
# For the descent over the graph to work, the dequantize node must be named
# current_node.name. However, for the feeding of the graph to work, the
# placeholder must have the name current_node.name; so record a final set
# of renames to apply after all processing has been done.
self.final_node_renames[output_node.name] = name
self.final_node_renames[dequantize_node.name] = name + "_dequantize"
def eightbitize_reshape_node(self, original_node):
"""Replaces a Reshape node with the eight bit equivalent sub-graph.
Args:
original_node: Float node to be converted.
Returns:
Subgraph representing the quantized version of the original node.
"""
namespace_prefix = original_node.name + "_eightbit"
quantized_reshape_name = namespace_prefix + "_quantized_reshape"
reshape_dims_name, reduction_dims_name = self.add_common_quantization_nodes(
namespace_prefix)
shape_input_name = original_node.input[1]
quantize_input_name, min_input_name, max_input_name = (
self.eightbitize_input_to_node(namespace_prefix, original_node.input[0],
reshape_dims_name, reduction_dims_name))
quantized_reshape_node = create_node(
"QuantizedReshape", quantized_reshape_name,
[quantize_input_name, shape_input_name, min_input_name, max_input_name])
set_attr_dtype(quantized_reshape_node, "T", tf.quint8)
self.add_output_graph_node(quantized_reshape_node)
self.add_dequantize_result_node(quantized_reshape_name, original_node.name)
def eightbitize_batch_norm_node(self, original_node):
"""Replaces a MatMul node with the eight bit equivalent sub-graph."""
namespace_prefix = original_node.name + "_eightbit"
original_input_name = original_node.input[0]
original_mean_name = original_node.input[1]
original_variance_name = original_node.input[2]
original_beta_name = original_node.input[3]
original_gamma_name = original_node.input[4]
quantized_batch_norm_name = namespace_prefix + "_quantized_batch_norm"
reshape_dims_name, reduction_dims_name = self.add_common_quantization_nodes(
namespace_prefix)
quantize_input_name, min_input_name, max_input_name = (
self.eightbitize_input_to_node(namespace_prefix, original_input_name,
reshape_dims_name, reduction_dims_name))
quantize_mean_name, min_mean_name, max_mean_name = (
self.eightbitize_input_to_node(namespace_prefix, original_mean_name,
reshape_dims_name, reduction_dims_name))
quantize_variance_name, min_variance_name, max_variance_name = (
self.eightbitize_input_to_node(namespace_prefix, original_variance_name,
reshape_dims_name, reduction_dims_name))
quantize_beta_name, min_beta_name, max_beta_name = (
self.eightbitize_input_to_node(namespace_prefix, original_beta_name,
reshape_dims_name, reduction_dims_name))
quantize_gamma_name, min_gamma_name, max_gamma_name = (
self.eightbitize_input_to_node(namespace_prefix, original_gamma_name,
reshape_dims_name, reduction_dims_name))
quantized_batch_norm_node = create_node(
"QuantizedBatchNormWithGlobalNormalization", quantized_batch_norm_name,
[quantize_input_name, min_input_name, max_input_name,
quantize_mean_name, min_mean_name, max_mean_name,
quantize_variance_name, min_variance_name, max_variance_name,
quantize_beta_name, min_beta_name, max_beta_name, quantize_gamma_name,
min_gamma_name, max_gamma_name])
set_attr_dtype(quantized_batch_norm_node, "Tinput", tf.quint8)
set_attr_dtype(quantized_batch_norm_node, "out_type", tf.qint32)
copy_attr(quantized_batch_norm_node, "scale_after_normalization",
original_node.attr["scale_after_normalization"])
copy_attr(quantized_batch_norm_node, "variance_epsilon",
original_node.attr["variance_epsilon"])
self.add_output_graph_node(quantized_batch_norm_node)
quantize_down_name = self.add_quantize_down_nodes(original_node,
quantized_batch_norm_name)
self.add_dequantize_result_node(quantize_down_name, original_node.name)
def add_output_graph_node(self, output_node):
"""Inserts one node into the new graph."""
self.output_graph.node.extend([output_node])
def remove_redundant_quantization(self, old_graph):
"""Removes unneeded pairs of quantize/dequantize ops from the graph.
This is a bit of a tricky function, because it's attempting to spot the
pattern of dequantizing from eight-bit up to float, and then immediately
quantizing back down to eight bits again, that's introduced by previous
passes that do 'key-hole' conversions of individual nodes but have to
convert back to float to match the previous output interface, since they
don't know that the next op can handle quantized tensors.
It works by:
- Looking for Quantize nodes.
- Checking to see if their first input is a Dequantize node.
- Seeing if their min/max inputs come from Min/Max nodes.
- Making sure those Min/Max nodes are being fed from the same Dequantize.
- Or that the Min is indirectly being fed from the same Dequantize as Max.
- Making sure the Dequantize is going through a Reshape (which we add
during the previous pass when we create the quantize sub-graph).
- Looking for the dims Const op for the Min/Max dims.
If all of these conditions are met, then it's a sub-graph pattern that
we know how to optimize out (and is likely the common one we've introduced).
We then rewire the graph to skip it entirely, and then rely on the dead node
removal pass to get rid of any nodes that are no longer needed.
Args:
old_graph: The model we'll be stripping redundant nodes from.
Returns:
A graph with the unnecessary nodes removed.
Raises:
ValueError: Two nodes with the same name were found in the graph.
"""
old_nodes_map = self.create_nodes_map(old_graph)
self.output_graph = tf.GraphDef()
inputs_to_rename = {}
# We go through all the nodes, looking for any that match the patterns we
# know how to optimize away.
for node in old_graph.node:
# We always start with a Quantize node, and examine its inputs to see if
# they are in a form that can be removed.
if node.op not in ["Quantize", "QuantizeV2"]:
continue
dequantize_node_name = node_name_from_input(node.input[0])
if dequantize_node_name not in old_nodes_map:
raise ValueError("Input node name '" + dequantize_node_name +
"' not found in node '" + node.name + "'")
dequantize_node = old_nodes_map[dequantize_node_name]
# Do we have a Dequantize feeding in, with the same type as the Quantize?
if dequantize_node.op != "Dequantize":
continue
if node.attr["T"] != dequantize_node.attr["T"]:
continue
# Now look at the other inputs, and ensure they're Min/Max nodes.
min_node_name = node_name_from_input(node.input[1])
max_node_name = node_name_from_input(node.input[2])
min_node = old_nodes_map[min_node_name]
max_node = old_nodes_map[max_node_name]
is_min_right_type = (min_node.op in ["Min", "Dequantize"])
is_max_right_type = (max_node.op in ["Max", "Dequantize"])
if not is_min_right_type or not is_max_right_type:
print("Didn't find expected types on inputs : %s, %s." % (
min_node.op, max_node.op))
continue
min_node_input_name = node_name_from_input(min_node.input[0])
max_node_input_name = node_name_from_input(max_node.input[0])
# There are two different patterns for Min nodes we can recognize, one
# where the input comes directly from the same one as the Max, and
# another where we run it through another Min first, so check for both.
is_same_input = False
if min_node_input_name == max_node_input_name:
is_same_input = True
else:
first_min_node_input = old_nodes_map[min_node_input_name]
if first_min_node_input.op == "Concat":
second_min_node_name = node_name_from_input(
first_min_node_input.input[1])
second_min_node = old_nodes_map[second_min_node_name]
if second_min_node.op == "Min":
second_min_node_input_name = node_name_from_input(
second_min_node.input[0])
is_same_input = (second_min_node_input_name == max_node_input_name)
if not is_same_input:
print("Different min/max inputs: " + min_node_input_name)
continue
# We recognize this pattern, so mark the graph edges to be rewired to
# route around it entirely, since we know it's a no-op.
dequantize_source_name = node_name_from_input(dequantize_node.input[0])
node_tensor_name = ensure_tensor_name_has_port(node.name)
min_tensor_name = node.name + ":1"
max_tensor_name = node.name + ":2"
inputs_to_rename[node_tensor_name] = dequantize_source_name
inputs_to_rename[min_tensor_name] = dequantize_node.input[1]
inputs_to_rename[max_tensor_name] = dequantize_node.input[2]
# Finally we apply all the rewiring we've marked to the graph.
for node in old_graph.node:
for index, input_full_name in enumerate(node.input):
input_name = ensure_tensor_name_has_port(input_full_name)
if input_name in inputs_to_rename:
node.input[index] = inputs_to_rename[input_name]
self.add_output_graph_node(node)
return self.output_graph
def apply_final_node_renames(self):
"""Applies node renames in self.final_node_renames to self.output_graph."""
old_graph = self.output_graph
self.output_graph = tf.GraphDef()
for node in old_graph.node:
node.name = self.final_node_renames.get(node.name, node.name)
for index, input_name in enumerate(node.input):
node_name = node_name_from_input(input_name)
input_full_name = ensure_tensor_name_has_port(input_name)
if node_name in self.final_node_renames:
node.input[index] = "%s%s" % (self.final_node_renames[node_name],
input_full_name[len(node_name):])
self.add_output_graph_node(node)
return self.output_graph
def remove_dead_nodes(self, output_names):
"""Removes nodes that are no longer needed for inference from the graph."""
old_output_graph = self.output_graph
self.output_graph = graph_util.extract_sub_graph(old_output_graph,
output_names)
def quantize_weights(self, input_graph, quantization_mode):
"""Quantize float Const ops.
There are two modes of operations, both replace float Const ops with
quantized values.
1. If quantization_mode is "weights_rounded", this function replaces float
Const ops with quantized float Const ops - same as the original op, but
float values being mapped to the center of one of 1<<FLAGS.bitdepth buckets.
This does not change the raw model size, but compression algorithms such as
zip (as used for compressing apks) or bzip2 will achieve a very good
compression ratio.
2. For other quantization modes ("MIN_COMBINED" or "MIN_FIRST"), float
Const ops are quantized and replaced by a tuple of four ops to perform
the dequantization at runtime:
* eight-bit Const (bucket indices, same shape as original float Const op
* two float Const ops (min and max value of original float Const op)
* Dequantize op to convert the eight-bit consts to float tensors.
The quantization mode is important because we see accuracy problems when
quantizing weights for different situations depending on the algorithm
used. We haven't figured out exactly what the underlying cause is yet,
unfortunately.
Args:
input_graph: A GraphDef of the model containing float Const ops.
quantization_mode: How to quantize and dequantize the values.
Returns:
A GraphDef of the converted graph.
Raises:
ValueError: If quantization_mode is unsupported.
"""
output_graph = tf.GraphDef()
for input_node in input_graph.node:
should_quantize = False
if input_node.op == "Const":
dtype = tf.as_dtype(input_node.attr["dtype"].type)
if dtype == tf.float32:
should_quantize = True
if should_quantize:
if quantization_mode == "weights_rounded":
output_graph.node.extend(quantize_weight_rounded(input_node))
elif quantization_mode in (b"MIN_COMBINED", b"MIN_FIRST"):
output_graph.node.extend(quantize_weight_eightbit(input_node,
quantization_mode))
else:
raise ValueError("Unsupported quantization mode %s." %
quantization_mode)
else:
output_node = tf.NodeDef()
output_node.CopyFrom(input_node)
output_graph.node.extend([output_node])
return output_graph
def set_input_graph(self, new_input_graph):
self.input_graph = new_input_graph
self.nodes_map = self.create_nodes_map(self.input_graph)
def main(unused_args):
if not tf.gfile.Exists(FLAGS.input):
print("Input graph file '" + FLAGS.input + "' does not exist!")
return -1
known_modes = ["round", "quantize", "eightbit", "weights", "test",
"weights_rounded"]
if not any(FLAGS.mode in s for s in known_modes):
print("mode is '" + FLAGS.mode + "', not in " + ", ".join(known_modes) +
".")
return -1
tf_graph = tf.GraphDef()
with tf.gfile.Open(FLAGS.input, "rb") as f:
data = f.read()
tf_graph.ParseFromString(data)
graph = tf.Graph()
with graph.as_default():
tf.import_graph_def(tf_graph, input_map={}, name="")
quantized_input_range = None
if FLAGS.quantized_input:
quantized_input_range = [FLAGS.quantized_input_min,
FLAGS.quantized_input_max]
fallback_quantization_range = None
if (FLAGS.quantized_fallback_min is not None or
FLAGS.quantized_fallback_max is not None):
assert FLAGS.quantized_fallback_min is not None
assert FLAGS.quantized_fallback_max is not None
fallback_quantization_range = [FLAGS.quantized_fallback_min,
FLAGS.quantized_fallback_max]
rewriter = GraphRewriter(tf_graph, FLAGS.mode, quantized_input_range,
fallback_quantization_range)
output_graph = rewriter.rewrite(FLAGS.output_node_names.split(","))
f = tf.gfile.FastGFile(FLAGS.output, "wb")
f.write(output_graph.SerializeToString())
return 0
if __name__ == "__main__":
tf.app.run()
| apache-2.0 |
akaariai/django | tests/admin_docs/models.py | 82 | 1592 | """
Models for testing various aspects of the djang.contrib.admindocs app
"""
from django.db import models
class Company(models.Model):
name = models.CharField(max_length=200)
class Group(models.Model):
name = models.CharField(max_length=200)
class Family(models.Model):
last_name = models.CharField(max_length=200)
class Person(models.Model):
"""
Stores information about a person, related to :model:`myapp.Company`.
**Notes**
Use ``save_changes()`` when saving this object.
``company``
Field storing :model:`myapp.Company` where the person works.
(DESCRIPTION)
.. raw:: html
:file: admin_docs/evilfile.txt
.. include:: admin_docs/evilfile.txt
"""
first_name = models.CharField(max_length=200, help_text="The person's first name")
last_name = models.CharField(max_length=200, help_text="The person's last name")
company = models.ForeignKey(Company, help_text="place of work")
family = models.ForeignKey(Family, related_name='+', null=True)
groups = models.ManyToManyField(Group, help_text="has membership")
def _get_full_name(self):
return "%s %s" % (self.first_name, self.last_name)
def add_image(self):
pass
def delete_image(self):
pass
def save_changes(self):
pass
def set_status(self):
pass
def get_full_name(self):
"""
Get the full name of the person
"""
return self._get_full_name()
def get_status_count(self):
return 0
def get_groups_list(self):
return []
| bsd-3-clause |
krzk/tizen-tv-rpi-linux | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
glensc/libcomps | libcomps/src/python/tests/test_merge_comps.py | 1 | 1916 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import utest
try:
import _libpycomps as libcomps
except ImportError:
import libcomps
class TestMergeComps(unittest.TestCase):
def setUp(self):
self.maxDiff = None
self.comps = libcomps.Comps()
def test_merge_1_1(self):
# first identity test
c1 = libcomps.Comps()
c1.fromxml_f("comps/comps_part1.xml")
self.comps += c1
self.comps += c1
self.assertEqual(self.comps, c1)
self.assertEqual(self.comps.xml_str(), c1.xml_str())
def test_merge_2_2(self):
# second identity test
c2 = libcomps.Comps()
c2.fromxml_f("comps/comps_part2.xml")
self.comps += c2
self.comps += c2
self.assertEqual(self.comps, c2)
self.assertEqual(self.comps.xml_str(), c2.xml_str())
def test_merge_1_2(self):
# c2 overrides c1
c1 = libcomps.Comps()
c1.fromxml_f("comps/comps_part1.xml")
c2 = libcomps.Comps()
c2.fromxml_f("comps/comps_part2.xml")
self.comps += c1
self.comps += c2
merged_comps = libcomps.Comps()
merged_comps.fromxml_f("comps/merged_comps_1_2.xml")
self.assertEqual(self.comps.xml_str(), merged_comps.xml_str())
self.assertTrue(self.comps == merged_comps)
def test_merge_2_1(self):
# c1 overrides c2
c1 = libcomps.Comps()
c1.fromxml_f("comps/comps_part1.xml")
c2 = libcomps.Comps()
c2.fromxml_f("comps/comps_part2.xml")
self.comps += c2
self.comps += c1
merged_comps = libcomps.Comps()
merged_comps.fromxml_f("comps/merged_comps_2_1.xml")
self.assertEqual(self.comps.xml_str(), merged_comps.xml_str())
self.assertTrue(self.comps == merged_comps)
if __name__ == "__main__":
unittest.main(testRunner = utest.MyRunner)
| gpl-2.0 |
starrify/scrapy | scrapy/downloadermiddlewares/redirect.py | 6 | 4501 | import logging
from urllib.parse import urljoin, urlparse
from w3lib.url import safe_url_string
from scrapy.http import HtmlResponse
from scrapy.utils.response import get_meta_refresh
from scrapy.exceptions import IgnoreRequest, NotConfigured
logger = logging.getLogger(__name__)
class BaseRedirectMiddleware:
enabled_setting = 'REDIRECT_ENABLED'
def __init__(self, settings):
if not settings.getbool(self.enabled_setting):
raise NotConfigured
self.max_redirect_times = settings.getint('REDIRECT_MAX_TIMES')
self.priority_adjust = settings.getint('REDIRECT_PRIORITY_ADJUST')
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings)
def _redirect(self, redirected, request, spider, reason):
ttl = request.meta.setdefault('redirect_ttl', self.max_redirect_times)
redirects = request.meta.get('redirect_times', 0) + 1
if ttl and redirects <= self.max_redirect_times:
redirected.meta['redirect_times'] = redirects
redirected.meta['redirect_ttl'] = ttl - 1
redirected.meta['redirect_urls'] = request.meta.get('redirect_urls', []) + [request.url]
redirected.meta['redirect_reasons'] = request.meta.get('redirect_reasons', []) + [reason]
redirected.dont_filter = request.dont_filter
redirected.priority = request.priority + self.priority_adjust
logger.debug("Redirecting (%(reason)s) to %(redirected)s from %(request)s",
{'reason': reason, 'redirected': redirected, 'request': request},
extra={'spider': spider})
return redirected
else:
logger.debug("Discarding %(request)s: max redirections reached",
{'request': request}, extra={'spider': spider})
raise IgnoreRequest("max redirections reached")
def _redirect_request_using_get(self, request, redirect_url):
redirected = request.replace(url=redirect_url, method='GET', body='')
redirected.headers.pop('Content-Type', None)
redirected.headers.pop('Content-Length', None)
return redirected
class RedirectMiddleware(BaseRedirectMiddleware):
"""
Handle redirection of requests based on response status
and meta-refresh html tag.
"""
def process_response(self, request, response, spider):
if (
request.meta.get('dont_redirect', False)
or response.status in getattr(spider, 'handle_httpstatus_list', [])
or response.status in request.meta.get('handle_httpstatus_list', [])
or request.meta.get('handle_httpstatus_all', False)
):
return response
allowed_status = (301, 302, 303, 307, 308)
if 'Location' not in response.headers or response.status not in allowed_status:
return response
location = safe_url_string(response.headers['Location'])
if response.headers['Location'].startswith(b'//'):
request_scheme = urlparse(request.url).scheme
location = request_scheme + '://' + location.lstrip('/')
redirected_url = urljoin(request.url, location)
if response.status in (301, 307, 308) or request.method == 'HEAD':
redirected = request.replace(url=redirected_url)
return self._redirect(redirected, request, spider, response.status)
redirected = self._redirect_request_using_get(request, redirected_url)
return self._redirect(redirected, request, spider, response.status)
class MetaRefreshMiddleware(BaseRedirectMiddleware):
enabled_setting = 'METAREFRESH_ENABLED'
def __init__(self, settings):
super().__init__(settings)
self._ignore_tags = settings.getlist('METAREFRESH_IGNORE_TAGS')
self._maxdelay = settings.getint('METAREFRESH_MAXDELAY')
def process_response(self, request, response, spider):
if (
request.meta.get('dont_redirect', False)
or request.method == 'HEAD'
or not isinstance(response, HtmlResponse)
):
return response
interval, url = get_meta_refresh(response,
ignore_tags=self._ignore_tags)
if url and interval < self._maxdelay:
redirected = self._redirect_request_using_get(request, url)
return self._redirect(redirected, request, spider, 'meta refresh')
return response
| bsd-3-clause |
jemmyw/ansible-modules-extras | system/at.py | 146 | 6419 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2014, Richard Isaacson <richard.c.isaacson@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: at
short_description: Schedule the execution of a command or script file via the at command.
description:
- Use this module to schedule a command or script file to run once in the future.
- All jobs are executed in the 'a' queue.
version_added: "1.5"
options:
command:
description:
- A command to be executed in the future.
required: false
default: null
script_file:
description:
- An existing script file to be executed in the future.
required: false
default: null
count:
description:
- The count of units in the future to execute the command or script file.
required: true
units:
description:
- The type of units in the future to execute the command or script file.
required: true
choices: ["minutes", "hours", "days", "weeks"]
state:
description:
- The state dictates if the command or script file should be evaluated as present(added) or absent(deleted).
required: false
choices: ["present", "absent"]
default: "present"
unique:
description:
- If a matching job is present a new job will not be added.
required: false
default: false
requirements:
- at
author: "Richard Isaacson (@risaacson)"
'''
EXAMPLES = '''
# Schedule a command to execute in 20 minutes as root.
- at: command="ls -d / > /dev/null" count=20 units="minutes"
# Match a command to an existing job and delete the job.
- at: command="ls -d / > /dev/null" state="absent"
# Schedule a command to execute in 20 minutes making sure it is unique in the queue.
- at: command="ls -d / > /dev/null" unique=true count=20 units="minutes"
'''
import os
import tempfile
def add_job(module, result, at_cmd, count, units, command, script_file):
at_command = "%s -f %s now + %s %s" % (at_cmd, script_file, count, units)
rc, out, err = module.run_command(at_command, check_rc=True)
if command:
os.unlink(script_file)
result['changed'] = True
def delete_job(module, result, at_cmd, command, script_file):
for matching_job in get_matching_jobs(module, at_cmd, script_file):
at_command = "%s -d %s" % (at_cmd, matching_job)
rc, out, err = module.run_command(at_command, check_rc=True)
result['changed'] = True
if command:
os.unlink(script_file)
module.exit_json(**result)
def get_matching_jobs(module, at_cmd, script_file):
matching_jobs = []
atq_cmd = module.get_bin_path('atq', True)
# Get list of job numbers for the user.
atq_command = "%s" % atq_cmd
rc, out, err = module.run_command(atq_command, check_rc=True)
current_jobs = out.splitlines()
if len(current_jobs) == 0:
return matching_jobs
# Read script_file into a string.
script_file_string = open(script_file).read().strip()
# Loop through the jobs.
# If the script text is contained in a job add job number to list.
for current_job in current_jobs:
split_current_job = current_job.split()
at_command = "%s -c %s" % (at_cmd, split_current_job[0])
rc, out, err = module.run_command(at_command, check_rc=True)
if script_file_string in out:
matching_jobs.append(split_current_job[0])
# Return the list.
return matching_jobs
def create_tempfile(command):
filed, script_file = tempfile.mkstemp(prefix='at')
fileh = os.fdopen(filed, 'w')
fileh.write(command)
fileh.close()
return script_file
def main():
module = AnsibleModule(
argument_spec = dict(
command=dict(required=False,
type='str'),
script_file=dict(required=False,
type='str'),
count=dict(required=False,
type='int'),
units=dict(required=False,
default=None,
choices=['minutes', 'hours', 'days', 'weeks'],
type='str'),
state=dict(required=False,
default='present',
choices=['present', 'absent'],
type='str'),
unique=dict(required=False,
default=False,
type='bool')
),
mutually_exclusive=[['command', 'script_file']],
required_one_of=[['command', 'script_file']],
supports_check_mode=False
)
at_cmd = module.get_bin_path('at', True)
command = module.params['command']
script_file = module.params['script_file']
count = module.params['count']
units = module.params['units']
state = module.params['state']
unique = module.params['unique']
if (state == 'present') and (not count or not units):
module.fail_json(msg="present state requires count and units")
result = {'state': state, 'changed': False}
# If command transform it into a script_file
if command:
script_file = create_tempfile(command)
# if absent remove existing and return
if state == 'absent':
delete_job(module, result, at_cmd, command, script_file)
# if unique if existing return unchanged
if unique:
if len(get_matching_jobs(module, at_cmd, script_file)) != 0:
if command:
os.unlink(script_file)
module.exit_json(**result)
result['script_file'] = script_file
result['count'] = count
result['units'] = units
add_job(module, result, at_cmd, count, units, command, script_file)
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
RunningLight/machinekit | share/gscreen/skins/industrial/industrial_handler.py | 20 | 17237 | import hal
import gtk
import gladevcp.makepins # needed for the dialog's calulator widget
import pango
_MAN = 0;_MDI = 1;_AUTO = 2;_LOCKTOGGLE = 1
# This is a handler file for using Gscreen's infrastructure
# to load a completely custom glade screen
# The only things that really matters is that it's saved as a GTK builder project,
# the toplevel window is caller window1 (The default name) and you connect a destroy
# window signal else you can't close down linuxcnc
class HandlerClass:
# This will be pretty standard to gain access to everything
# emc is for control and status of linuxcnc
# data is important data from gscreen and linuxcnc
# widgets is all the widgets from the glade files
# gscreen is for access to gscreens methods
def __init__(self, halcomp,builder,useropts,gscreen):
self.emc = gscreen.emc
self.data = gscreen.data
self.widgets = gscreen.widgets
self.gscreen = gscreen
# This is a new method that calls a gscreen method to toggle the DRO units
# Gscreen's regular unit button saves the state
# for startup, This one just changes it for the session
def on_metric_select_clicked(self,widget):
data = (self.data.dro_units -1) * -1
self.gscreen.set_dro_units(data,False)
for i in ("1","2","3"):
for letter in self.data.axis_list:
axis = "dro_%s%s"% (letter,i)
try:
self.widgets[axis].set_property("display_units_mm",data)
except:
pass
def on_diameter_mode_pressed(self, widget):
data = widget.get_active()
print "switch diam mode",data
self.gscreen.set_diameter_mode(data)
for i in ("1","2","3"):
axis = "dro_x%s"% (i)
if data:
self.widgets[axis].set_to_diameter()
else:
self.widgets[axis].set_to_radius()
# This is a new method for our button
# we selected this method name in the glade file as a signal callback
def on_estop_clicked(self,*args):
print "estop"
if self.data.estopped:
self.emc.estop_reset(1)
self.gscreen.add_alarm_entry("Machine Estop Reset")
else:
self.emc.machine_off(1)
self.emc.estop(1)
self.widgets.on_label.set_text("Machine Off")
self.gscreen.add_alarm_entry("Machine Estopped and Powered Off")
return True
# This is a new method for our new button
# we selected this method name in the glade file as a signal callback
def on_machine_state_clicked(self,widget):
if self.data.estopped:
return
elif not self.data.machine_on:
self.emc.machine_on(1)
self.widgets.on_label.set_text("Machine On")
self.gscreen.add_alarm_entry("Machine powered on")
else:
self.emc.machine_off(1)
self.widgets.on_label.set_text("Machine Off")
self.gscreen.add_alarm_entry("Machine powered off")
# display the main tab and set the mode to setup
def on_setup_button_clicked(self,widget):
self.widgets.notebook_main.set_current_page(0)
self.data.mode_order = _MAN,_MDI,_AUTO
label = self.data.mode_labels
self.widgets.button_mode.set_label(label[self.data.mode_order[0]])
self.gscreen.mode_changed(self.data.mode_order[0])
self.toggle_modes(widget)
# display the main tab and set the mode to run
def on_run_button_clicked(self,widget):
self.widgets.notebook_main.set_current_page(0)
self.data.mode_order = _AUTO,_MAN,_MDI
label = self.data.mode_labels
self.widgets.button_mode.set_label(label[self.data.mode_order[0]])
self.gscreen.mode_changed(self.data.mode_order[0])
self.toggle_modes(widget)
# display the main tab and set the mode to MDI
def on_mdi_button_clicked(self,widget):
self.widgets.notebook_main.set_current_page(0)
self.data.mode_order = _MDI,_MAN,_AUTO
label = self.data.mode_labels
self.widgets.button_mode.set_label(label[self.data.mode_order[0]])
self.gscreen.mode_changed(self.data.mode_order[0])
self.toggle_modes(widget)
# This is called when the system button is toggled
# If the page is not showing it displays a unlock code dialog
# unless you have already unlocked the page.
# if that returns true then the page is shown
# otherwise the button is untoggled and the page is not shown
# if you press the system buttonn when the system page is already showing
# it will relock the page
def on_system_button_clicked(self,widget):
if self.widgets.notebook_main.get_current_page() == 4:
self.gscreen.block("system_button")
widget.set_active(True)
self.gscreen.unblock("system_button")
global _LOCKTOGGLE
_LOCKTOGGLE=1
self.widgets.system_button.set_label(" System\n(Locked)")
self.gscreen.add_alarm_entry("System page re-locked")
self.on_setup_button_clicked(self.widgets.setup_button)
return
if not self.system_dialog():
self.gscreen.block("system_button")
widget.set_active(False)
self.gscreen.unblock("system_button")
return
self.widgets.notebook_main.set_current_page(4)
self.toggle_modes(widget)
self.widgets.system_button.set_label(" System\n ")
# Display the tooledit tab
def on_tooledit_button_clicked(self,widget):
self.widgets.notebook_main.set_current_page(3)
self.toggle_modes(widget)
# Display the offsetpage tab
def on_offsetpage_button_clicked(self,widget):
self.widgets.notebook_main.set_current_page(2)
self.toggle_modes(widget)
# This toggles the buttons so only one is presses at any one time
def toggle_modes(self,widget):
temp = "setup_button","mdi_button","run_button","tooledit_button","system_button","offsetpage_button"
for i in temp:
state = False
if self.widgets[i] == widget: state = True
self.gscreen.block(i)
self.widgets[i].set_active(state)
self.gscreen.unblock(i)
def on_button_edit_clicked(self,widget):
state = widget.get_active()
if not state:
self.gscreen.edited_gcode_check()
self.widgets.notebook_main.set_current_page(0)
self.widgets.notebook_main.set_show_tabs(not (state))
self.gscreen.edit_mode(state)
if not state and self.widgets.button_full_view.get_active():
self.gscreen.set_full_graphics_view(True)
if self.data.edit_mode:
self.widgets.mode_select_box.hide()
self.widgets.search_box.show()
self.widgets.button_edit.set_label("Exit\nEdit")
else:
self.widgets.mode_select_box.show()
self.widgets.search_box.hide()
self.widgets.button_edit.set_label("Edit")
self.widgets.notebook_main.set_show_tabs(False)
# This dialog is for unlocking the system tab
# The unlock code number is defined at the top of the page
def system_dialog(self):
global _LOCKTOGGLE
if _LOCKTOGGLE == 0: return True
dialog = gtk.Dialog("Enter System Unlock Code",
self.widgets.window1,
gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT,
gtk.STOCK_OK, gtk.RESPONSE_ACCEPT))
label = gtk.Label("Enter System Unlock Code")
label.modify_font(pango.FontDescription("sans 20"))
calc = gladevcp.Calculator()
dialog.vbox.pack_start(label)
dialog.vbox.add(calc)
calc.set_value("")
calc.set_property("font","sans 20")
calc.set_editable(True)
calc.entry.connect("activate", lambda w : dialog.emit('response',gtk.RESPONSE_ACCEPT))
dialog.parse_geometry("400x400")
dialog.set_decorated(False)
dialog.show_all()
calc.num_pad_only(True)
calc.integer_entry_only(True)
response = dialog.run()
code = calc.get_value()
dialog.destroy()
if response == gtk.RESPONSE_ACCEPT:
if code == int(self.data.unlock_code):
self.gscreen.add_alarm_entry("System page unlocked")
_LOCKTOGGLE = 0
return True
_LOCKTOGGLE = 1
return False
def on_abs_colorbutton_color_set(self,widget):
self.gscreen.set_abs_color()
color = self.data.abs_color
fg_color = pango.AttrForeground(color[0],color[1],color[2], 0, 11)
for i in self.data.axis_list:
axis = "dro_%s1"% i
attr = self.widgets[axis].get_attributes()
attr.insert(fg_color)
self.widgets[axis].set_attributes(attr)
def on_rel_colorbutton_color_set(self,widget):
self.gscreen.set_rel_color()
color = self.data.rel_color
fg_color = pango.AttrForeground(color[0],color[1],color[2], 0, 11)
for i in self.data.axis_list:
axis = "dro_%s2"% i
attr = self.widgets[axis].get_attributes()
attr.insert(fg_color)
self.widgets[axis].set_attributes(attr)
def on_dtg_colorbutton_color_set(self,widget):
self.gscreen.set_dtg_color()
color = self.data.dtg_color
fg_color = pango.AttrForeground(color[0],color[1],color[2], 0, 11)
for i in self.data.axis_list:
axis = "dro_%s3"% i
attr = self.widgets[axis].get_attributes()
attr.insert(fg_color)
self.widgets[axis].set_attributes(attr)
def on_hal_status_not_all_homed(self,widget,data):
temp =[]
for letter in self.data.axis_list:
axnum = "xyzabcuvws".index(letter)
if str(axnum) in data:
self.widgets["home_%s"%letter].set_text(" ")
temp.append(" %s"%letter.upper())
self.gscreen.add_alarm_entry(_("There are unhomed axes: %s"%temp))
def on_hal_status_axis_homed(self,widget,data):
for letter in self.data.axis_list:
axnum = "xyzabcuvws".index(letter)
if str(axnum) in data:
self.widgets["home_%s"%letter].set_text("*")
else:
self.widgets["home_%s"%letter].set_text(" ")
def on_show_dtg_pressed(self, widget):
data = widget.get_active()
self.widgets.dtg_vbox.set_visible(data)
self.gscreen.set_show_dtg(data)
# Connect to gscreens regular signals and add a couple more
def connect_signals(self,handlers):
self.gscreen.connect_signals(handlers)
# connect to handler file callbacks:
self.gscreen.widgets.metric_select.connect("clicked", self.on_metric_select_clicked)
self.gscreen.widgets.diameter_mode.connect("clicked", self.on_diameter_mode_pressed)
temp = "setup_button","mdi_button","run_button","tooledit_button","system_button","offsetpage_button"
for cb in temp:
i = "_sighandler_%s"% (cb)
self.data[i] = int(self.widgets[cb].connect("toggled", self["on_%s_clicked"%cb]))
self.widgets.hal_status.connect("not-all-homed",self.on_hal_status_not_all_homed)
self.widgets.hal_status.connect("homed",self.on_hal_status_axis_homed)
self.widgets.abs_colorbutton.connect("color-set", self.on_abs_colorbutton_color_set)
self.widgets.rel_colorbutton.connect("color-set", self.on_rel_colorbutton_color_set)
self.widgets.dtg_colorbutton.connect("color-set", self.on_dtg_colorbutton_color_set)
self.widgets.unlock_number.connect("value-changed",self.gscreen.on_unlock_number_value_changed)
self.widgets.show_dtg.connect("clicked", self.on_show_dtg_pressed)
# We don't want Gscreen to initialize ALL it's regular widgets because this custom
# screen doesn't have them all -just most of them. So we call the ones we want
def initialize_widgets(self):
self.gscreen.init_show_windows()
self.gscreen.init_dynamic_tabs()
#self.gscreen.init_axis_frames()
#self.gscreen.init_dro_colors()
self.gscreen.init_screen2()
self.gscreen.init_fullscreen1()
self.gscreen.init_gremlin()
self.gscreen.init_manual_spindle_controls()
self.gscreen.init_dro_colors()
self.init_dro() # local function
self.gscreen.init_audio()
self.gscreen.init_desktop_notify()
self.gscreen.init_statusbar()
self.gscreen.init_entry()
self.gscreen.init_tooleditor()
self.gscreen.init_offsetpage()
self.gscreen.init_embeded_terminal()
self.gscreen.init_themes()
self.gscreen.init_screen1_geometry()
self.gscreen.init_running_options()
self.gscreen.init_hide_cursor()
#self.gscreen.init_mode()
self.gscreen.mode_changed(self.data.mode_order[0])
self.gscreen.init_sensitive_on_off()
self.gscreen.init_sensitive_run_idle()
self.gscreen.init_sensitive_all_homed()
self.gscreen.init_sensitive_override_mode()
self.gscreen.init_sensitive_graphics_mode()
self.gscreen.init_sensitive_origin_mode()
self.init_sensitive_edit_mode() # local function
for i in ("setup_button","mdi_button","run_button","tooledit_button","offsetpage_button","button_index_tool"):
self.data.sensitive_override_mode.append(i)
self.data.sensitive_graphics_mode.append(i)
self.data.sensitive_origin_mode.append(i)
self.widgets["spindle-at-speed"].set_property("on_color","black")
self.gscreen.init_unlock_code()
self.gscreen.init_state()
for i in self.data.axis_list:
self.widgets["dro_%s1"%i].show()
self.widgets["dro_%s2"%i].show()
self.widgets["dro_%s3"%i].show()
self.widgets["axis_%s"%i].show()
self.widgets["home_%s"%i].show()
#self.widgets.offsetpage1.set_highlight_color("lightblue")
self.widgets.offsetpage1.set_font("sans 18")
self.widgets.offsetpage1.set_row_visible("1",False)
self.widgets.tooledit1.set_font("sans 18")
if self.data.embedded_keyboard:
self.gscreen.launch_keyboard()
def init_sensitive_edit_mode(self):
self.data.sensitive_edit_mode = ["button_graphics","button_override","button_restart","button_cycle_start","button_single_step",
"run_button","setup_button","mdi_button","system_button","tooledit_button","ignore_limits",
"offsetpage_button"]
def init_dro(self):
self.on_abs_colorbutton_color_set(None)
self.on_rel_colorbutton_color_set(None)
self.on_dtg_colorbutton_color_set(None)
self.widgets.show_dtg.set_active(self.data.show_dtg)
self.on_show_dtg_pressed(self.widgets.show_dtg)
self.gscreen.init_dro()
data = self.data.dro_units
for i in ("1","2","3"):
for letter in self.data.axis_list:
axis = "dro_%s%s"% (letter,i)
try:
self.widgets[axis].set_property("display_units_mm",data)
except:
pass
# every 100 milli seconds this gets called
# we add calls to the regular functions for the widgets we are using.
# and add any extra calls/code
def periodic(self):
self.update_mdi_spindle_button() # local method
self.gscreen.update_spindle_bar()
#self.gscreen.update_dro()
self.gscreen.update_active_gcodes()
self.gscreen.update_active_mcodes()
self.gscreen.update_aux_coolant_pins()
self.gscreen.update_feed_speed_label()
self.gscreen.update_tool_label()
self.gscreen.update_coolant_leds()
self.gscreen.update_estop_led()
self.gscreen.update_machine_on_led()
self.gscreen.update_limit_override()
self.gscreen.update_override_label()
self.gscreen.update_jog_rate_label()
self.gscreen.update_mode_label()
self.gscreen.update_units_button_label()
# spindle controls
def update_mdi_spindle_button(self):
self.widgets.at_speed_label.set_label(_("%d RPM"%abs(self.data.spindle_speed)))
label = self.widgets.spindle_control.get_label()
speed = self.data.spindle_speed
if speed == 0 and not label == _("Start"):
temp = _("Start")
self.widgets["spindle-at-speed"].set_property("on_color","black")
elif speed and not label == _("Stop"):
temp = _("Stop")
self.widgets["spindle-at-speed"].set_property("on_color","green")
else: return
self.widgets.spindle_control.set_label(temp)
def __getitem__(self, item):
return getattr(self, item)
def __setitem__(self, item, value):
return setattr(self, item, value)
# standard handler call
def get_handlers(halcomp,builder,useropts,gscreen):
return [HandlerClass(halcomp,builder,useropts,gscreen)]
| lgpl-2.1 |
J861449197/edx-platform | common/lib/xmodule/xmodule/modulestore/split_mongo/mongo_connection.py | 35 | 20486 | """
Segregation of pymongo functions from the data modeling mechanisms for split modulestore.
"""
import datetime
import cPickle as pickle
import math
import zlib
import pymongo
import pytz
import re
from contextlib import contextmanager
from time import time
# Import this just to export it
from pymongo.errors import DuplicateKeyError # pylint: disable=unused-import
from django.core.cache import get_cache, InvalidCacheBackendError
import dogstats_wrapper as dog_stats_api
from contracts import check, new_contract
from mongodb_proxy import autoretry_read, MongoProxy
from xmodule.exceptions import HeartbeatFailure
from xmodule.modulestore import BlockData
from xmodule.modulestore.split_mongo import BlockKey
new_contract('BlockData', BlockData)
def round_power_2(value):
"""
Return value rounded up to the nearest power of 2.
"""
if value == 0:
return 0
return math.pow(2, math.ceil(math.log(value, 2)))
class Tagger(object):
"""
An object used by :class:`QueryTimer` to allow timed code blocks
to add measurements and tags to the timer.
"""
def __init__(self, default_sample_rate):
self.added_tags = []
self.measures = []
self.sample_rate = default_sample_rate
def measure(self, name, size):
"""
Record a measurement of the timed data. This would be something to
indicate the size of the value being timed.
Arguments:
name: The name of the measurement.
size (float): The size of the measurement.
"""
self.measures.append((name, size))
def tag(self, **kwargs):
"""
Add tags to the timer.
Arguments:
**kwargs: Each keyword is treated as a tag name, and the
value of the argument is the tag value.
"""
self.added_tags.extend(kwargs.items())
@property
def tags(self):
"""
Return all tags for this (this includes any tags added with :meth:`tag`,
and also all of the added measurements, bucketed into powers of 2).
"""
return [
'{}:{}'.format(name, round_power_2(size))
for name, size in self.measures
] + [
'{}:{}'.format(name, value)
for name, value in self.added_tags
]
class QueryTimer(object):
"""
An object that allows timing a block of code while also recording measurements
about that code.
"""
def __init__(self, metric_base, sample_rate=1):
"""
Arguments:
metric_base: The prefix to be used for all queries captured
with this :class:`QueryTimer`.
"""
self._metric_base = metric_base
self._sample_rate = sample_rate
@contextmanager
def timer(self, metric_name, course_context):
"""
Contextmanager which acts as a timer for the metric ``metric_name``,
but which also yields a :class:`Tagger` object that allows the timed block
of code to add tags and quantity measurements. Tags are added verbatim to the
timer output. Measurements are recorded as histogram measurements in their own,
and also as bucketed tags on the timer measurement.
Arguments:
metric_name: The name used to aggregate all of these metrics.
course_context: The course which the query is being made for.
"""
tagger = Tagger(self._sample_rate)
metric_name = "{}.{}".format(self._metric_base, metric_name)
start = time()
try:
yield tagger
finally:
end = time()
tags = tagger.tags
tags.append('course:{}'.format(course_context))
for name, size in tagger.measures:
dog_stats_api.histogram(
'{}.{}'.format(metric_name, name),
size,
timestamp=end,
tags=[tag for tag in tags if not tag.startswith('{}:'.format(metric_name))],
sample_rate=tagger.sample_rate,
)
dog_stats_api.histogram(
'{}.duration'.format(metric_name),
end - start,
timestamp=end,
tags=tags,
sample_rate=tagger.sample_rate,
)
dog_stats_api.increment(
metric_name,
timestamp=end,
tags=tags,
sample_rate=tagger.sample_rate,
)
TIMER = QueryTimer(__name__, 0.01)
def structure_from_mongo(structure, course_context=None):
"""
Converts the 'blocks' key from a list [block_data] to a map
{BlockKey: block_data}.
Converts 'root' from [block_type, block_id] to BlockKey.
Converts 'blocks.*.fields.children' from [[block_type, block_id]] to [BlockKey].
N.B. Does not convert any other ReferenceFields (because we don't know which fields they are at this level).
Arguments:
structure: The document structure to convert
course_context (CourseKey): For metrics gathering, the CourseKey
for the course that this data is being processed for.
"""
with TIMER.timer('structure_from_mongo', course_context) as tagger:
tagger.measure('blocks', len(structure['blocks']))
check('seq[2]', structure['root'])
check('list(dict)', structure['blocks'])
for block in structure['blocks']:
if 'children' in block['fields']:
check('list(list[2])', block['fields']['children'])
structure['root'] = BlockKey(*structure['root'])
new_blocks = {}
for block in structure['blocks']:
if 'children' in block['fields']:
block['fields']['children'] = [BlockKey(*child) for child in block['fields']['children']]
new_blocks[BlockKey(block['block_type'], block.pop('block_id'))] = BlockData(**block)
structure['blocks'] = new_blocks
return structure
def structure_to_mongo(structure, course_context=None):
"""
Converts the 'blocks' key from a map {BlockKey: block_data} to
a list [block_data], inserting BlockKey.type as 'block_type'
and BlockKey.id as 'block_id'.
Doesn't convert 'root', since namedtuple's can be inserted
directly into mongo.
"""
with TIMER.timer('structure_to_mongo', course_context) as tagger:
tagger.measure('blocks', len(structure['blocks']))
check('BlockKey', structure['root'])
check('dict(BlockKey: BlockData)', structure['blocks'])
for block in structure['blocks'].itervalues():
if 'children' in block.fields:
check('list(BlockKey)', block.fields['children'])
new_structure = dict(structure)
new_structure['blocks'] = []
for block_key, block in structure['blocks'].iteritems():
new_block = dict(block.to_storable())
new_block.setdefault('block_type', block_key.type)
new_block['block_id'] = block_key.id
new_structure['blocks'].append(new_block)
return new_structure
class CourseStructureCache(object):
"""
Wrapper around django cache object to cache course structure objects.
The course structures are pickled and compressed when cached.
If the 'course_structure_cache' doesn't exist, then don't do anything for
for set and get.
"""
def __init__(self):
self.no_cache_found = False
try:
self.cache = get_cache('course_structure_cache')
except InvalidCacheBackendError:
self.no_cache_found = True
def get(self, key, course_context=None):
"""Pull the compressed, pickled struct data from cache and deserialize."""
if self.no_cache_found:
return None
with TIMER.timer("CourseStructureCache.get", course_context) as tagger:
compressed_pickled_data = self.cache.get(key)
tagger.tag(from_cache=str(compressed_pickled_data is not None).lower())
if compressed_pickled_data is None:
# Always log cache misses, because they are unexpected
tagger.sample_rate = 1
return None
tagger.measure('compressed_size', len(compressed_pickled_data))
pickled_data = zlib.decompress(compressed_pickled_data)
tagger.measure('uncompressed_size', len(pickled_data))
return pickle.loads(pickled_data)
def set(self, key, structure, course_context=None):
"""Given a structure, will pickle, compress, and write to cache."""
if self.no_cache_found:
return None
with TIMER.timer("CourseStructureCache.set", course_context) as tagger:
pickled_data = pickle.dumps(structure, pickle.HIGHEST_PROTOCOL)
tagger.measure('uncompressed_size', len(pickled_data))
# 1 = Fastest (slightly larger results)
compressed_pickled_data = zlib.compress(pickled_data, 1)
tagger.measure('compressed_size', len(compressed_pickled_data))
# Stuctures are immutable, so we set a timeout of "never"
self.cache.set(key, compressed_pickled_data, None)
class MongoConnection(object):
"""
Segregation of pymongo functions from the data modeling mechanisms for split modulestore.
"""
def __init__(
self, db, collection, host, port=27017, tz_aware=True, user=None, password=None,
asset_collection=None, retry_wait_time=0.1, **kwargs
):
"""
Create & open the connection, authenticate, and provide pointers to the collections
"""
if kwargs.get('replicaSet') is None:
kwargs.pop('replicaSet', None)
mongo_class = pymongo.MongoClient
else:
mongo_class = pymongo.MongoReplicaSetClient
_client = mongo_class(
host=host,
port=port,
tz_aware=tz_aware,
**kwargs
)
self.database = MongoProxy(
pymongo.database.Database(_client, db),
wait_time=retry_wait_time
)
if user is not None and password is not None:
self.database.authenticate(user, password)
self.course_index = self.database[collection + '.active_versions']
self.structures = self.database[collection + '.structures']
self.definitions = self.database[collection + '.definitions']
# every app has write access to the db (v having a flag to indicate r/o v write)
# Force mongo to report errors, at the expense of performance
# pymongo docs suck but explanation:
# http://api.mongodb.org/java/2.10.1/com/mongodb/WriteConcern.html
self.course_index.write_concern = {'w': 1}
self.structures.write_concern = {'w': 1}
self.definitions.write_concern = {'w': 1}
def heartbeat(self):
"""
Check that the db is reachable.
"""
if self.database.connection.alive():
return True
else:
raise HeartbeatFailure("Can't connect to {}".format(self.database.name), 'mongo')
def get_structure(self, key, course_context=None):
"""
Get the structure from the persistence mechanism whose id is the given key.
This method will use a cached version of the structure if it is availble.
"""
with TIMER.timer("get_structure", course_context) as tagger_get_structure:
cache = CourseStructureCache()
structure = cache.get(key, course_context)
tagger_get_structure.tag(from_cache=str(bool(structure)).lower())
if not structure:
# Always log cache misses, because they are unexpected
tagger_get_structure.sample_rate = 1
with TIMER.timer("get_structure.find_one", course_context) as tagger_find_one:
doc = self.structures.find_one({'_id': key})
tagger_find_one.measure("blocks", len(doc['blocks']))
structure = structure_from_mongo(doc, course_context)
tagger_find_one.sample_rate = 1
cache.set(key, structure, course_context)
return structure
@autoretry_read()
def find_structures_by_id(self, ids, course_context=None):
"""
Return all structures that specified in ``ids``.
Arguments:
ids (list): A list of structure ids
"""
with TIMER.timer("find_structures_by_id", course_context) as tagger:
tagger.measure("requested_ids", len(ids))
docs = [
structure_from_mongo(structure, course_context)
for structure in self.structures.find({'_id': {'$in': ids}})
]
tagger.measure("structures", len(docs))
return docs
@autoretry_read()
def find_structures_derived_from(self, ids, course_context=None):
"""
Return all structures that were immediately derived from a structure listed in ``ids``.
Arguments:
ids (list): A list of structure ids
"""
with TIMER.timer("find_structures_derived_from", course_context) as tagger:
tagger.measure("base_ids", len(ids))
docs = [
structure_from_mongo(structure, course_context)
for structure in self.structures.find({'previous_version': {'$in': ids}})
]
tagger.measure("structures", len(docs))
return docs
@autoretry_read()
def find_ancestor_structures(self, original_version, block_key, course_context=None):
"""
Find all structures that originated from ``original_version`` that contain ``block_key``.
Arguments:
original_version (str or ObjectID): The id of a structure
block_key (BlockKey): The id of the block in question
"""
with TIMER.timer("find_ancestor_structures", course_context) as tagger:
docs = [
structure_from_mongo(structure, course_context)
for structure in self.structures.find({
'original_version': original_version,
'blocks': {
'$elemMatch': {
'block_id': block_key.id,
'block_type': block_key.type,
'edit_info.update_version': {
'$exists': True,
},
},
},
})
]
tagger.measure("structures", len(docs))
return docs
def insert_structure(self, structure, course_context=None):
"""
Insert a new structure into the database.
"""
with TIMER.timer("insert_structure", course_context) as tagger:
tagger.measure("blocks", len(structure["blocks"]))
self.structures.insert(structure_to_mongo(structure, course_context))
def get_course_index(self, key, ignore_case=False):
"""
Get the course_index from the persistence mechanism whose id is the given key
"""
with TIMER.timer("get_course_index", key):
if ignore_case:
query = {
key_attr: re.compile(u'^{}$'.format(re.escape(getattr(key, key_attr))), re.IGNORECASE)
for key_attr in ('org', 'course', 'run')
}
else:
query = {
key_attr: getattr(key, key_attr)
for key_attr in ('org', 'course', 'run')
}
return self.course_index.find_one(query)
def find_matching_course_indexes(self, branch=None, search_targets=None, org_target=None, course_context=None):
"""
Find the course_index matching particular conditions.
Arguments:
branch: If specified, this branch must exist in the returned courses
search_targets: If specified, this must be a dictionary specifying field values
that must exist in the search_targets of the returned courses
org_target: If specified, this is an ORG filter so that only course_indexs are
returned for the specified ORG
"""
with TIMER.timer("find_matching_course_indexes", course_context):
query = {}
if branch is not None:
query['versions.{}'.format(branch)] = {'$exists': True}
if search_targets:
for key, value in search_targets.iteritems():
query['search_targets.{}'.format(key)] = value
if org_target:
query['org'] = org_target
return self.course_index.find(query)
def insert_course_index(self, course_index, course_context=None):
"""
Create the course_index in the db
"""
with TIMER.timer("insert_course_index", course_context):
course_index['last_update'] = datetime.datetime.now(pytz.utc)
self.course_index.insert(course_index)
def update_course_index(self, course_index, from_index=None, course_context=None):
"""
Update the db record for course_index.
Arguments:
from_index: If set, only update an index if it matches the one specified in `from_index`.
"""
with TIMER.timer("update_course_index", course_context):
if from_index:
query = {"_id": from_index["_id"]}
# last_update not only tells us when this course was last updated but also helps
# prevent collisions
if 'last_update' in from_index:
query['last_update'] = from_index['last_update']
else:
query = {
'org': course_index['org'],
'course': course_index['course'],
'run': course_index['run'],
}
course_index['last_update'] = datetime.datetime.now(pytz.utc)
self.course_index.update(query, course_index, upsert=False,)
def delete_course_index(self, course_key):
"""
Delete the course_index from the persistence mechanism whose id is the given course_index
"""
with TIMER.timer("delete_course_index", course_key):
query = {
key_attr: getattr(course_key, key_attr)
for key_attr in ('org', 'course', 'run')
}
return self.course_index.remove(query)
def get_definition(self, key, course_context=None):
"""
Get the definition from the persistence mechanism whose id is the given key
"""
with TIMER.timer("get_definition", course_context) as tagger:
definition = self.definitions.find_one({'_id': key})
tagger.measure("fields", len(definition['fields']))
tagger.tag(block_type=definition['block_type'])
return definition
def get_definitions(self, definitions, course_context=None):
"""
Retrieve all definitions listed in `definitions`.
"""
with TIMER.timer("get_definitions", course_context) as tagger:
tagger.measure('definitions', len(definitions))
definitions = self.definitions.find({'_id': {'$in': definitions}})
return definitions
def insert_definition(self, definition, course_context=None):
"""
Create the definition in the db
"""
with TIMER.timer("insert_definition", course_context) as tagger:
tagger.measure('fields', len(definition['fields']))
tagger.tag(block_type=definition['block_type'])
self.definitions.insert(definition)
def ensure_indexes(self):
"""
Ensure that all appropriate indexes are created that are needed by this modulestore, or raise
an exception if unable to.
This method is intended for use by tests and administrative commands, and not
to be run during server startup.
"""
self.course_index.create_index(
[
('org', pymongo.ASCENDING),
('course', pymongo.ASCENDING),
('run', pymongo.ASCENDING)
],
unique=True,
background=True
)
| agpl-3.0 |
tarikkdiry/Flock | sqlalchemy-workspace/lib/python2.7/site-packages/wheel/signatures/ed25519py.py | 565 | 1695 | # -*- coding: utf-8 -*-
import warnings
import os
from collections import namedtuple
from . import djbec
__all__ = ['crypto_sign', 'crypto_sign_open', 'crypto_sign_keypair', 'Keypair',
'PUBLICKEYBYTES', 'SECRETKEYBYTES', 'SIGNATUREBYTES']
PUBLICKEYBYTES=32
SECRETKEYBYTES=64
SIGNATUREBYTES=64
Keypair = namedtuple('Keypair', ('vk', 'sk')) # verifying key, secret key
def crypto_sign_keypair(seed=None):
"""Return (verifying, secret) key from a given seed, or os.urandom(32)"""
if seed is None:
seed = os.urandom(PUBLICKEYBYTES)
else:
warnings.warn("ed25519ll should choose random seed.",
RuntimeWarning)
if len(seed) != 32:
raise ValueError("seed must be 32 random bytes or None.")
skbytes = seed
vkbytes = djbec.publickey(skbytes)
return Keypair(vkbytes, skbytes+vkbytes)
def crypto_sign(msg, sk):
"""Return signature+message given message and secret key.
The signature is the first SIGNATUREBYTES bytes of the return value.
A copy of msg is in the remainder."""
if len(sk) != SECRETKEYBYTES:
raise ValueError("Bad signing key length %d" % len(sk))
vkbytes = sk[PUBLICKEYBYTES:]
skbytes = sk[:PUBLICKEYBYTES]
sig = djbec.signature(msg, skbytes, vkbytes)
return sig + msg
def crypto_sign_open(signed, vk):
"""Return message given signature+message and the verifying key."""
if len(vk) != PUBLICKEYBYTES:
raise ValueError("Bad verifying key length %d" % len(vk))
rc = djbec.checkvalid(signed[:SIGNATUREBYTES], signed[SIGNATUREBYTES:], vk)
if not rc:
raise ValueError("rc != True", rc)
return signed[SIGNATUREBYTES:]
| bsd-3-clause |
jiajiax/crosswalk-test-suite | wrt/wrt-packertool2-android-tests/packertool2/projectdirtest.py | 4 | 4789 | #!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Hongjuan, Wang<hongjuanx.wang@intel.com>
import unittest
import os
import sys
import commands
import shutil
import stat
import comm
class TestPackertoolsFunctions(unittest.TestCase):
def test_projectdir_readOnly(self):
comm.setUp()
os.chdir(comm.Pck_Tools)
manifestPath = comm.ConstPath + "/../testapp/example/manifest.json"
if not os.path.exists(comm.Pck_Tools + "readOnly"):
os.mkdir("readOnly")
os.chmod("readOnly", stat.S_IREAD)
cmd = "python make_apk.py --package=org.xwalk.example --arch=%s --mode=%s --manifest=%s --project-dir=readOnly" % \
(comm.ARCH, comm.MODE, manifestPath)
packstatus = commands.getstatusoutput(cmd)
errormsg = "OSError: [Errno 13] Permission denied"
self.assertNotEqual(packstatus[0], 0)
self.assertIn(errormsg, packstatus[1])
self.assertIn(comm.AppName, os.listdir(comm.Pck_Tools))
try:
shutil.rmtree(comm.Pck_Tools + "readOnly")
os.remove(comm.Pck_Tools + comm.AppName)
except Exception as e:
os.system("rm -rf " + comm.Pck_Tools + "readOnly &>/dev/null")
os.system("rm -rf " + comm.Pck_Tools + "*apk &>/dev/null")
def test_projectdir_existFile(self):
comm.setUp()
os.chdir(comm.Pck_Tools)
manifestPath = comm.ConstPath + "/../testapp/example/manifest.json"
if "existFile.txt" not in os.listdir(comm.Pck_Tools):
os.mknod("existFile.txt")
cmd = "python make_apk.py --package=org.xwalk.example --arch=%s --mode=%s --manifest=%s --project-dir=existFile.txt" % \
(comm.ARCH, comm.MODE, manifestPath)
packstatus = commands.getstatusoutput(cmd)
errormsg = "Unable to create a project directory during the build"
self.assertEqual(packstatus[0], 0)
self.assertIn(errormsg, packstatus[1])
self.assertIn(comm.AppName, os.listdir(comm.Pck_Tools))
os.remove(comm.Pck_Tools + "/existFile.txt")
os.remove(comm.Pck_Tools + comm.AppName)
def test_projectdir_existDir(self):
comm.setUp()
os.chdir(comm.Pck_Tools)
manifestPath = comm.ConstPath + "/../testapp/example/manifest.json"
if not os.path.exists(comm.Pck_Tools + "testapp"):
os.makedirs("testapp/testapp")
cmd = "python make_apk.py --package=org.xwalk.example --arch=%s --mode=%s --manifest=%s --project-dir=testapp/testapp" % \
(comm.ARCH, comm.MODE, manifestPath)
packstatus = commands.getstatusoutput(cmd)
self.assertEqual(packstatus[0], 0)
buildDir = comm.Pck_Tools + "testapp/testapp/Example"
buildList = os.listdir(buildDir)
self.assertTrue(os.path.isdir(buildDir + "/res"))
self.assertIn("res", buildList)
self.assertTrue(os.path.isfile(buildDir + "/build.xml"))
self.assertIn("build.xml", buildList)
try:
shutil.rmtree(comm.Pck_Tools + "testapp")
os.remove(comm.Pck_Tools + comm.AppName)
except Exception as e:
os.system("rm -rf " + comm.Pck_Tools + "testapp &>/dev/null")
os.system("rm -rf " + comm.Pck_Tools + "*apk &>/dev/null")
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
bastik/youtube-dl | youtube_dl/extractor/cinchcast.py | 177 | 1678 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
unified_strdate,
xpath_text,
)
class CinchcastIE(InfoExtractor):
_VALID_URL = r'https?://player\.cinchcast\.com/.*?assetId=(?P<id>[0-9]+)'
_TEST = {
# Actual test is run in generic, look for undergroundwellness
'url': 'http://player.cinchcast.com/?platformId=1&assetType=single&assetId=7141703',
'only_matching': True,
}
def _real_extract(self, url):
video_id = self._match_id(url)
doc = self._download_xml(
'http://www.blogtalkradio.com/playerasset/mrss?assetType=single&assetId=%s' % video_id,
video_id)
item = doc.find('.//item')
title = xpath_text(item, './title', fatal=True)
date_str = xpath_text(
item, './{http://developer.longtailvideo.com/trac/}date')
upload_date = unified_strdate(date_str, day_first=False)
# duration is present but wrong
formats = [{
'format_id': 'main',
'url': item.find('./{http://search.yahoo.com/mrss/}content').attrib['url'],
}]
backup_url = xpath_text(
item, './{http://developer.longtailvideo.com/trac/}backupContent')
if backup_url:
formats.append({
'preference': 2, # seems to be more reliable
'format_id': 'backup',
'url': backup_url,
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'upload_date': upload_date,
'formats': formats,
}
| unlicense |
microcom/odoo | addons/gamification/models/res_users.py | 47 | 3150 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import osv
from challenge import MAX_VISIBILITY_RANKING
class res_users_gamification_group(osv.Model):
""" Update of res.users class
- if adding groups to an user, check gamification.challenge linked to
this group, and the user. This is done by overriding the write method.
"""
_name = 'res.users'
_inherit = ['res.users']
def get_serialised_gamification_summary(self, cr, uid, excluded_categories=None, context=None):
return self._serialised_goals_summary(cr, uid, user_id=uid, excluded_categories=excluded_categories, context=context)
def _serialised_goals_summary(self, cr, uid, user_id, excluded_categories=None, context=None):
"""Return a serialised list of goals assigned to the user, grouped by challenge
:excluded_categories: list of challenge categories to exclude in search
[
{
'id': <gamification.challenge id>,
'name': <gamification.challenge name>,
'visibility_mode': <visibility {ranking,personal}>,
'currency': <res.currency id>,
'lines': [(see gamification_challenge._get_serialized_challenge_lines() format)]
},
]
"""
all_goals_info = []
challenge_obj = self.pool.get('gamification.challenge')
domain = [('user_ids', 'in', uid), ('state', '=', 'inprogress')]
if excluded_categories and isinstance(excluded_categories, list):
domain.append(('category', 'not in', excluded_categories))
user = self.browse(cr, uid, uid, context=context)
challenge_ids = challenge_obj.search(cr, uid, domain, context=context)
for challenge in challenge_obj.browse(cr, uid, challenge_ids, context=context):
# serialize goals info to be able to use it in javascript
lines = challenge_obj._get_serialized_challenge_lines(cr, uid, challenge, user_id, restrict_top=MAX_VISIBILITY_RANKING, context=context)
if lines:
all_goals_info.append({
'id': challenge.id,
'name': challenge.name,
'visibility_mode': challenge.visibility_mode,
'currency': user.company_id.currency_id.id,
'lines': lines,
})
return all_goals_info
def get_challenge_suggestions(self, cr, uid, context=None):
"""Return the list of challenges suggested to the user"""
challenge_info = []
challenge_obj = self.pool.get('gamification.challenge')
challenge_ids = challenge_obj.search(cr, uid, [('invited_user_ids', 'in', uid), ('state', '=', 'inprogress')], context=context)
for challenge in challenge_obj.browse(cr, uid, challenge_ids, context=context):
values = {
'id': challenge.id,
'name': challenge.name,
'description': challenge.description,
}
challenge_info.append(values)
return challenge_info
| agpl-3.0 |
Kabele/bootstrap | test-infra/s3_cache.py | 2166 | 5734 | #!/usr/bin/env python2.7
# pylint: disable=C0301
from __future__ import absolute_import, unicode_literals, print_function, division
from sys import argv
from os import environ, stat, chdir, remove as _delete_file
from os.path import dirname, basename, abspath, realpath, expandvars
from hashlib import sha256
from subprocess import check_call as run
from json import load, dump as save
from contextlib import contextmanager
from datetime import datetime
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.exception import S3ResponseError
CONFIG_FILE = './S3Cachefile.json'
UPLOAD_TODO_FILE = './S3CacheTodo.json'
BYTES_PER_MB = 1024 * 1024
@contextmanager
def timer():
start = datetime.utcnow()
yield
end = datetime.utcnow()
elapsed = end - start
print("\tDone. Took", int(elapsed.total_seconds()), "second(s).")
@contextmanager
def todo_file(writeback=True):
try:
with open(UPLOAD_TODO_FILE, 'rt') as json_file:
todo = load(json_file)
except (IOError, OSError, ValueError):
todo = {}
yield todo
if writeback:
try:
with open(UPLOAD_TODO_FILE, 'wt') as json_file:
save(todo, json_file)
except (OSError, IOError) as save_err:
print("Error saving {}:".format(UPLOAD_TODO_FILE), save_err)
def _sha256_of_file(filename):
hasher = sha256()
with open(filename, 'rb') as input_file:
hasher.update(input_file.read())
file_hash = hasher.hexdigest()
print('sha256({}) = {}'.format(filename, file_hash))
return file_hash
def _delete_file_quietly(filename):
try:
_delete_file(filename)
except (OSError, IOError):
pass
def mark_needs_uploading(cache_name):
with todo_file() as todo:
todo[cache_name] = True
def mark_uploaded(cache_name):
with todo_file() as todo:
todo.pop(cache_name, None)
def need_to_upload(cache_name):
with todo_file(writeback=False) as todo:
return todo.get(cache_name, False)
def _tarball_size(directory):
kib = stat(_tarball_filename_for(directory)).st_size // BYTES_PER_MB
return "{} MiB".format(kib)
def _tarball_filename_for(directory):
return abspath('./{}.tar.gz'.format(basename(directory)))
def _create_tarball(directory):
print("Creating tarball of {}...".format(directory))
with timer():
run(['tar', '-czf', _tarball_filename_for(directory), '-C', dirname(directory), basename(directory)])
def _extract_tarball(directory):
print("Extracting tarball of {}...".format(directory))
with timer():
run(['tar', '-xzf', _tarball_filename_for(directory), '-C', dirname(directory)])
def download(directory):
mark_uploaded(cache_name) # reset
try:
print("Downloading {} tarball from S3...".format(cache_name))
with timer():
key.get_contents_to_filename(_tarball_filename_for(directory))
except S3ResponseError as err:
mark_needs_uploading(cache_name)
raise SystemExit("Cached {} download failed!".format(cache_name))
print("Downloaded {}.".format(_tarball_size(directory)))
_extract_tarball(directory)
print("{} successfully installed from cache.".format(cache_name))
def upload(directory):
_create_tarball(directory)
print("Uploading {} tarball to S3... ({})".format(cache_name, _tarball_size(directory)))
with timer():
key.set_contents_from_filename(_tarball_filename_for(directory))
print("{} cache successfully updated.".format(cache_name))
mark_uploaded(cache_name)
if __name__ == '__main__':
# Uses environment variables:
# AWS_ACCESS_KEY_ID -- AWS Access Key ID
# AWS_SECRET_ACCESS_KEY -- AWS Secret Access Key
argv.pop(0)
if len(argv) != 2:
raise SystemExit("USAGE: s3_cache.py <download | upload> <cache name>")
mode, cache_name = argv
script_dir = dirname(realpath(__file__))
chdir(script_dir)
try:
with open(CONFIG_FILE, 'rt') as config_file:
config = load(config_file)
except (IOError, OSError, ValueError) as config_err:
print(config_err)
raise SystemExit("Error when trying to load config from JSON file!")
try:
cache_info = config[cache_name]
key_file = expandvars(cache_info["key"])
fallback_cmd = cache_info["generate"]
directory = expandvars(cache_info["cache"])
except (TypeError, KeyError) as load_err:
print(load_err)
raise SystemExit("Config for cache named {!r} is missing or malformed!".format(cache_name))
try:
try:
BUCKET_NAME = environ['TWBS_S3_BUCKET']
except KeyError:
raise SystemExit("TWBS_S3_BUCKET environment variable not set!")
conn = S3Connection()
bucket = conn.lookup(BUCKET_NAME)
if bucket is None:
raise SystemExit("Could not access bucket!")
key_file_hash = _sha256_of_file(key_file)
key = Key(bucket, key_file_hash)
key.storage_class = 'REDUCED_REDUNDANCY'
if mode == 'download':
download(directory)
elif mode == 'upload':
if need_to_upload(cache_name):
upload(directory)
else:
print("No need to upload anything.")
else:
raise SystemExit("Unrecognized mode {!r}".format(mode))
except BaseException as exc:
if mode != 'download':
raise
print("Error!:", exc)
print("Unable to download from cache.")
print("Running fallback command to generate cache directory {!r}: {}".format(directory, fallback_cmd))
with timer():
run(fallback_cmd, shell=True)
| mit |
saumishr/django | tests/regressiontests/admin_custom_urls/models.py | 42 | 1467 | from functools import update_wrapper
from django.contrib import admin
from django.db import models
class Action(models.Model):
name = models.CharField(max_length=50, primary_key=True)
description = models.CharField(max_length=70)
def __unicode__(self):
return self.name
class ActionAdmin(admin.ModelAdmin):
"""
A ModelAdmin for the Action model that changes the URL of the add_view
to '<app name>/<model name>/!add/'
The Action model has a CharField PK.
"""
list_display = ('name', 'description')
def remove_url(self, name):
"""
Remove all entries named 'name' from the ModelAdmin instance URL
patterns list
"""
return filter(lambda e: e.name != name, super(ActionAdmin, self).get_urls())
def get_urls(self):
# Add the URL of our custom 'add_view' view to the front of the URLs
# list. Remove the existing one(s) first
from django.conf.urls import patterns, url
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.module_name
view_name = '%s_%s_add' % info
return patterns('',
url(r'^!add/$', wrap(self.add_view), name=view_name),
) + self.remove_url(view_name)
admin.site.register(Action, ActionAdmin)
| bsd-3-clause |
rerobins/django_auth_addon | django_auth_addon/backend/google_authentication.py | 1 | 3221 | from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.backends import ModelBackend
from apiclient.discovery import build
import httplib2
import json
from uuid import uuid4
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
from django.conf import settings
from django.template.defaultfilters import slugify
from django_auth_addon.models import GooglePlusCredentialsModel
SERVICE = build('plus', 'v1')
class GooglePlusBackend(ModelBackend):
def authenticate(self, access_code=None):
if access_code is None:
return None
try:
oauth_flow = flow_from_clientsecrets(settings.CLIENT_SECRETS, scope='')
oauth_flow.redirect_uri = 'postmessage'
self.credentials = oauth_flow.step2_exchange(access_code)
except FlowExchangeError:
return None
# Check that the access token is valid.
access_token = self.credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# If there was an error in the access token info, abort.
if result.get('error') is not None:
return None
access_token = self.credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
h = httplib2.Http()
token_info = json.loads(h.request(url, 'GET')[1])
# http = httplib2.Http()
# http = self.credentials.authorize(http)
# # Get a list of people that this user has shared with this app.
# google_request = SERVICE.people().get(userId='me')
# people_document = google_request.execute(http=http)
# context['given_name'] = self.people_document['name']['givenName']
# context['family_name'] = self.people_document['name']['familyName']
# Check to see if there is a google plus credential object with the provided user id from google
google_plus_credentials = GooglePlusCredentialsModel.objects.filter(gplus_id=token_info['user_id'])
if len(google_plus_credentials) == 0:
credentials = GooglePlusCredentialsModel()
credentials.gplus_id = token_info['user_id']
# Need to create a whole new user object and move on.
user = User.objects.create_user(get_username(), token_info['email'])
credentials.user = user
user.save()
credentials.save()
else:
# Check to see if the credentials object has a user and then return it.
user = google_plus_credentials[0].user
return user
def get_username():
max_length = 30
username = slugify(uuid4().get_hex()[:max_length])
while not is_valid_username(username):
username = slugify(uuid4().get_hex()[:max_length])
return username
def is_valid_username(username):
if username is None:
return False
user_list = User.objects.filter(username=username)
return len(user_list) == 0
| bsd-3-clause |
openpolis/rst2pdf-patched-docutils-0.8 | rst2pdf/tests/input/sphinx-issue162/conf.py | 9 | 7189 | # -*- coding: utf-8 -*-
#
# issue162 documentation build configuration file, created by
# sphinx-quickstart on Tue Aug 18 22:54:33 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['rst2pdf.pdfbuilder']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'issue162'
copyright = u'2009, RA'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'test'
# The full version, including alpha/beta/rc tags.
release = 'test'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'issue162doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'issue162.tex', u'issue162 Documentation',
u'RA', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# -- Options for PDF output --------------------------------------------------
# Grouping the document tree into PDF files. List of tuples
# (source start file, target name, title, author).
pdf_documents = [
('index', u'MyProject', u'My Project', u'Author Name'),
]
# A comma-separated list of custom stylesheets. Example:
pdf_stylesheets = ['borland']
# Create a compressed PDF
# Use True/False or 1/0
# Example: compressed=True
#pdf_compressed=False
# A colon-separated list of folders to search for fonts. Example:
# pdf_font_path=['/usr/share/fonts', '/usr/share/texmf-dist/fonts/']
# Language to be used for hyphenation support
pdf_language="en_US"
# If false, no index is generated.
pdf_use_index = False
# If false, no modindex is generated.
pdf_use_modindex = False
# If false, no coverpage is generated.
pdf_use_coverpage = False
pdf_verbosity=0
pdf_invariant = True
| mit |
jinankjain/zamboni | apps/amo/tests/test_storage_utils.py | 7 | 4706 | from functools import partial
import os
import tempfile
import unittest
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage as storage
from nose.tools import eq_
from amo.storage_utils import (walk_storage, copy_stored_file,
move_stored_file, rm_stored_dir)
from amo.utils import rm_local_tmp_dir
def test_storage_walk():
tmp = tempfile.mkdtemp()
jn = partial(os.path.join, tmp)
try:
storage.save(jn('file1.txt'), ContentFile(''))
storage.save(jn('one/file1.txt'), ContentFile(''))
storage.save(jn('one/file2.txt'), ContentFile(''))
storage.save(jn('one/two/file1.txt'), ContentFile(''))
storage.save(jn('one/three/file1.txt'), ContentFile(''))
storage.save(jn('four/five/file1.txt'), ContentFile(''))
storage.save(jn(u'four/kristi\u2603/kristi\u2603.txt'),
ContentFile(''))
results = [(dir, set(subdirs), set(files))
for dir, subdirs, files in sorted(walk_storage(tmp))]
yield (eq_, results.pop(0), (tmp, set(['four', 'one']), set(['file1.txt'])))
yield (eq_, results.pop(0), (jn('four'),
set(['five', 'kristi\xe2\x98\x83']), set([])))
yield (eq_, results.pop(0), (jn('four/five'), set([]), set(['file1.txt'])))
yield (eq_, results.pop(0), (jn('four/kristi\xe2\x98\x83'), set([]),
set(['kristi\xe2\x98\x83.txt'])))
yield (eq_, results.pop(0), (jn('one'), set(['three', 'two']),
set(['file1.txt', 'file2.txt'])))
yield (eq_, results.pop(0), (jn('one/three'), set([]), set(['file1.txt'])))
yield (eq_, results.pop(0), (jn('one/two'), set([]), set(['file1.txt'])))
yield (eq_, len(results), 0)
finally:
rm_local_tmp_dir(tmp)
def test_rm_stored_dir():
tmp = tempfile.mkdtemp()
jn = partial(os.path.join, tmp)
try:
storage.save(jn('file1.txt'), ContentFile('<stuff>'))
storage.save(jn('one/file1.txt'), ContentFile(''))
storage.save(jn('one/two/file1.txt'), ContentFile('moar stuff'))
storage.save(jn(u'one/kristi\u0107/kristi\u0107.txt'),
ContentFile(''))
rm_stored_dir(jn('one'))
yield (eq_, storage.exists(jn('one')), False)
yield (eq_, storage.exists(jn('one/file1.txt')), False)
yield (eq_, storage.exists(jn('one/two')), False)
yield (eq_, storage.exists(jn('one/two/file1.txt')), False)
yield (eq_, storage.exists(jn(u'one/kristi\u0107/kristi\u0107.txt')),
False)
yield (eq_, storage.exists(jn('file1.txt')), True)
finally:
rm_local_tmp_dir(tmp)
class TestFileOps(unittest.TestCase):
def setUp(self):
self.tmp = tempfile.mkdtemp()
def tearDown(self):
rm_local_tmp_dir(self.tmp)
def path(self, path):
return os.path.join(self.tmp, path)
def contents(self, path):
with storage.open(path, 'rb') as fp:
return fp.read()
def newfile(self, name, contents):
src = self.path(name)
storage.save(src, ContentFile(contents))
return src
def test_copy(self):
src = self.newfile('src.txt', '<contents>')
dest = self.path('somedir/dest.txt')
copy_stored_file(src, dest)
eq_(self.contents(dest), '<contents>')
def test_self_copy(self):
src = self.newfile('src.txt', '<contents>')
dest = self.path('src.txt')
copy_stored_file(src, dest)
eq_(self.contents(dest), '<contents>')
def test_move(self):
src = self.newfile('src.txt', '<contents>')
dest = self.path('somedir/dest.txt')
move_stored_file(src, dest)
eq_(self.contents(dest), '<contents>')
eq_(storage.exists(src), False)
def test_non_ascii(self):
src = self.newfile(u'kristi\u0107.txt',
u'ivan kristi\u0107'.encode('utf8'))
dest = self.path(u'somedir/kristi\u0107.txt')
copy_stored_file(src, dest)
eq_(self.contents(dest), 'ivan kristi\xc4\x87')
def test_copy_chunking(self):
src = self.newfile('src.txt', '<contents>')
dest = self.path('somedir/dest.txt')
copy_stored_file(src, dest, chunk_size=1)
eq_(self.contents(dest), '<contents>')
def test_move_chunking(self):
src = self.newfile('src.txt', '<contents>')
dest = self.path('somedir/dest.txt')
move_stored_file(src, dest, chunk_size=1)
eq_(self.contents(dest), '<contents>')
eq_(storage.exists(src), False)
| bsd-3-clause |
ianj-als/pcl | src/pclc/parser/component.py | 1 | 3739 | #
# Copyright Capita Translation and Interpreting 2013
#
# This file is part of Pipeline Creation Language (PCL).
#
# Pipeline Creation Language (PCL) is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pipeline Creation Language (PCL) is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pipeline Creation Language (PCL). If not, see <http://www.gnu.org/licenses/>.
#
from entity import Entity
class Component(Entity):
def __init__(self,
filename,
lineno,
identifier,
inputs,
outputs,
configuration,
declarations,
definition,
is_leaf):
Entity.__init__(self, filename, lineno)
self.identifier = identifier
self.inputs = inputs
self.outputs = outputs
self.configuration = configuration
self.declarations = declarations
self.definition = definition
self.is_leaf = is_leaf
@staticmethod
def getLeafComponent(filename,
lineno,
identifier,
inputs,
outputs,
configuration,
do_commands):
class LeafDefinition(Entity):
def __init__(self):
self.filename = filename
self.lineno = do_commands[0].lineno
self.do_commands = do_commands
def accept(self, visitor):
for cmd in self.do_commands:
cmd.accept(visitor)
return Component(filename,
lineno,
identifier,
inputs,
outputs,
configuration,
list(),
LeafDefinition(),
True)
@staticmethod
def getNodeComponent(filename,
lineno,
identifier,
inputs,
outputs,
configuration,
declarations,
definition):
return Component(filename,
lineno,
identifier,
inputs,
outputs,
configuration,
declarations,
definition,
False)
def accept(self, visitor):
visitor.visit(self)
for decl in self.declarations:
decl.accept(visitor)
self.definition.accept(visitor)
def __str__(self):
return str(self.identifier)
def __repr__(self):
return "<Component:\n\tname = %s,\n\tinputs = %s,\n\toutputs = %s," \
"\n\tconfiguration = %s,\n\tdeclarations = %s,\n\tdefinition = %s," \
"\n\tentity = %s>" % \
(self.identifier.__repr__(), self.inputs.__repr__(),
self.outputs.__repr__(), self.configuration.__repr__(),
self.declarations.__repr__(), self.definition.__repr__(),
super(Component, self).__repr__())
| gpl-3.0 |
ambitioninc/django-activatable-model | activatable_model/validation.py | 1 | 2222 | from itertools import chain
from activatable_model.models import BaseActivatableModel
from django.apps import apps
from django.core.exceptions import ValidationError
from django.db import models
def get_activatable_models():
all_models = chain(*[app.get_models() for app in apps.get_app_configs()])
return [model for model in all_models if issubclass(model, BaseActivatableModel)]
def validate_activatable_models():
"""
Raises a ValidationError for any ActivatableModel that has ForeignKeys or OneToOneFields that will
cause cascading deletions to occur. This function also raises a ValidationError if the activatable
model has not defined a Boolean field with the field name defined by the ACTIVATABLE_FIELD_NAME variable
on the model.
"""
for model in get_activatable_models():
# Verify the activatable model has an activatable boolean field
activatable_field = next((
f for f in model._meta.fields
if f.__class__ == models.BooleanField and f.name == model.ACTIVATABLE_FIELD_NAME
), None)
if activatable_field is None:
raise ValidationError((
'Model {0} is an activatable model. It must define an activatable BooleanField that '
'has a field name of model.ACTIVATABLE_FIELD_NAME (which defaults to is_active)'.format(model)
))
# Ensure all foreign keys and onetoone fields will not result in cascade deletions if not cascade deletable
if not model.ALLOW_CASCADE_DELETE:
for field in model._meta.fields:
if field.__class__ in (models.ForeignKey, models.OneToOneField):
if field.remote_field.on_delete == models.CASCADE:
raise ValidationError((
'Model {0} is an activatable model. All ForeignKey and OneToOneFields '
'must set on_delete methods to something other than CASCADE (the default). '
'If you want to explicitely allow cascade deletes, then you must set the '
'ALLOW_CASCADE_DELETE=True class variable on your model.'
).format(model))
| mit |
SamYaple/ansible-modules-extras | windows/win_iis_website.py | 69 | 3682 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Henrik Wallström <henrik@wallstroms.nu>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: win_iis_website
version_added: "2.0"
short_description: Configures a IIS Web site.
description:
- Creates, Removes and configures a IIS Web site
options:
name:
description:
- Names of web site
required: true
default: null
aliases: []
site_id:
description:
- Explicitly set the IIS numeric ID for a site. Note that this value cannot be changed after the website has been created.
required: false
version_added: "2.1"
default: null
state:
description:
- State of the web site
choices:
- started
- restarted
- stopped
- absent
required: false
default: null
aliases: []
physical_path:
description:
- The physical path on the remote host to use for the new site. The specified folder must already exist.
required: false
default: null
aliases: []
application_pool:
description:
- The application pool in which the new site executes.
required: false
default: null
aliases: []
port:
description:
- The port to bind to / use for the new site.
required: false
default: null
aliases: []
ip:
description:
- The IP address to bind to / use for the new site.
required: false
default: null
aliases: []
hostname:
description:
- The host header to bind to / use for the new site.
required: false
default: null
aliases: []
ssl:
description:
- Enables HTTPS binding on the site..
required: false
default: null
aliases: []
parameters:
description:
- Custom site Parameters from string where properties are seperated by a pipe and property name/values by colon Ex. "foo:1|bar:2"
required: false
default: null
aliases: []
author: Henrik Wallström
'''
EXAMPLES = '''
# This return information about an existing host
$ ansible -i vagrant-inventory -m win_iis_website -a "name='Default Web Site'" window
host | success >> {
"changed": false,
"site": {
"ApplicationPool": "DefaultAppPool",
"Bindings": [
"*:80:"
],
"ID": 1,
"Name": "Default Web Site",
"PhysicalPath": "%SystemDrive%\\inetpub\\wwwroot",
"State": "Stopped"
}
}
# This stops an existing site.
$ ansible -i hosts -m win_iis_website -a "name='Default Web Site' state=stopped" host
# This creates a new site.
$ ansible -i hosts -m win_iis_website -a "name=acme physical_path=c:\\sites\\acme" host
# Change logfile .
$ ansible -i hosts -m win_iis_website -a "name=acme physical_path=c:\\sites\\acme" host
# Playbook example
---
- name: Acme IIS site
win_iis_website:
name: "Acme"
state: started
port: 80
ip: 127.0.0.1
hostname: acme.local
application_pool: "acme"
physical_path: 'c:\\sites\\acme'
parameters: 'logfile.directory:c:\\sites\\logs'
register: website
'''
| gpl-3.0 |
valtech-mooc/edx-platform | lms/djangoapps/mobile_api/users/views.py | 8 | 9282 | """
Views for user API
"""
from django.shortcuts import redirect
from django.utils import dateparse
from rest_framework import generics, views
from rest_framework.decorators import api_view
from rest_framework.response import Response
from opaque_keys.edx.keys import UsageKey
from opaque_keys import InvalidKeyError
from courseware.access import is_mobile_available_for_user
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor
from courseware.views import get_current_child, save_positions_recursively_up
from student.models import CourseEnrollment, User
from xblock.fields import Scope
from xblock.runtime import KeyValueStore
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from .serializers import CourseEnrollmentSerializer, UserSerializer
from .. import errors
from ..utils import mobile_view, mobile_course_access
@mobile_view(is_user=True)
class UserDetail(generics.RetrieveAPIView):
"""
**Use Case**
Get information about the specified user and
access other resources the user has permissions for.
Users are redirected to this endpoint after logging in.
You can use the **course_enrollments** value in
the response to get a list of courses the user is enrolled in.
**Example request**:
GET /api/mobile/v0.5/users/{username}
**Response Values**
* id: The ID of the user.
* username: The username of the currently logged in user.
* email: The email address of the currently logged in user.
* name: The full name of the currently logged in user.
* course_enrollments: The URI to list the courses the currently logged
in user is enrolled in.
"""
queryset = (
User.objects.all()
.select_related('profile', 'course_enrollments')
)
serializer_class = UserSerializer
lookup_field = 'username'
@mobile_view(is_user=True)
class UserCourseStatus(views.APIView):
"""
**Use Case**
Get or update the ID of the module that the specified user last visited in the specified course.
**Example request**:
GET /api/mobile/v0.5/users/{username}/course_status_info/{course_id}
PATCH /api/mobile/v0.5/users/{username}/course_status_info/{course_id}
body:
last_visited_module_id={module_id}
modification_date={date}
The modification_date is optional. If it is present, the update will only take effect
if the modification_date is later than the modification_date saved on the server.
**Response Values**
* last_visited_module_id: The ID of the last module visited by the user in the course.
* last_visited_module_path: The ID of the modules in the path from the
last visited module to the course module.
"""
http_method_names = ["get", "patch"]
def _last_visited_module_path(self, request, course):
"""
Returns the path from the last module visited by the current user in the given course up to
the course module. If there is no such visit, the first item deep enough down the course
tree is used.
"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2)
course_module = get_module_for_descriptor(request.user, request, course, field_data_cache, course.id)
path = [course_module]
chapter = get_current_child(course_module, min_depth=2)
if chapter is not None:
path.append(chapter)
section = get_current_child(chapter, min_depth=1)
if section is not None:
path.append(section)
path.reverse()
return path
def _get_course_info(self, request, course):
"""
Returns the course status
"""
path = self._last_visited_module_path(request, course)
path_ids = [unicode(module.location) for module in path]
return Response({
"last_visited_module_id": path_ids[0],
"last_visited_module_path": path_ids,
})
def _update_last_visited_module_id(self, request, course, module_key, modification_date):
"""
Saves the module id if the found modification_date is less recent than the passed modification date
"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2)
try:
module_descriptor = modulestore().get_item(module_key)
except ItemNotFoundError:
return Response(errors.ERROR_INVALID_MODULE_ID, status=400)
module = get_module_for_descriptor(request.user, request, module_descriptor, field_data_cache, course.id)
if modification_date:
key = KeyValueStore.Key(
scope=Scope.user_state,
user_id=request.user.id,
block_scope_id=course.location,
field_name=None
)
student_module = field_data_cache.find(key)
if student_module:
original_store_date = student_module.modified
if modification_date < original_store_date:
# old modification date so skip update
return self._get_course_info(request, course)
save_positions_recursively_up(request.user, request, field_data_cache, module)
return self._get_course_info(request, course)
@mobile_course_access(depth=2)
def get(self, request, course, *args, **kwargs): # pylint: disable=unused-argument
"""
Get the ID of the module that the specified user last visited in the specified course.
"""
return self._get_course_info(request, course)
@mobile_course_access(depth=2)
def patch(self, request, course, *args, **kwargs): # pylint: disable=unused-argument
"""
Update the ID of the module that the specified user last visited in the specified course.
"""
module_id = request.DATA.get("last_visited_module_id")
modification_date_string = request.DATA.get("modification_date")
modification_date = None
if modification_date_string:
modification_date = dateparse.parse_datetime(modification_date_string)
if not modification_date or not modification_date.tzinfo:
return Response(errors.ERROR_INVALID_MODIFICATION_DATE, status=400)
if module_id:
try:
module_key = UsageKey.from_string(module_id)
except InvalidKeyError:
return Response(errors.ERROR_INVALID_MODULE_ID, status=400)
return self._update_last_visited_module_id(request, course, module_key, modification_date)
else:
# The arguments are optional, so if there's no argument just succeed
return self._get_course_info(request, course)
@mobile_view(is_user=True)
class UserCourseEnrollmentsList(generics.ListAPIView):
"""
**Use Case**
Get information about the courses the currently logged in user is
enrolled in.
**Example request**:
GET /api/mobile/v0.5/users/{username}/course_enrollments/
**Response Values**
* created: The date the course was created.
* mode: The type of certificate registration for this course: honor or
certified.
* is_active: Whether the course is currently active; true or false.
* certificate: Information about the user's earned certificate in the course.
* url: URL to the downloadable version of the certificate, if exists.
* course: A collection of data about the course:
* course_about: The URI to get the data for the course About page.
* course_updates: The URI to get data for course updates.
* number: The course number.
* org: The organization that created the course.
* video_outline: The URI to get the list of all vides the user can
access in the course.
* id: The unique ID of the course.
* latest_updates: Reserved for future use.
* end: The end date of the course.
* name: The name of the course.
* course_handouts: The URI to get data for course handouts.
* start: The data and time the course starts.
* course_image: The path to the course image.
"""
queryset = CourseEnrollment.objects.all()
serializer_class = CourseEnrollmentSerializer
lookup_field = 'username'
def get_queryset(self):
enrollments = self.queryset.filter(
user__username=self.kwargs['username'],
is_active=True
).order_by('created').reverse()
return [
enrollment for enrollment in enrollments
if enrollment.course and is_mobile_available_for_user(self.request.user, enrollment.course)
]
@api_view(["GET"])
@mobile_view()
def my_user_info(request):
"""
Redirect to the currently-logged-in user's info page
"""
return redirect("user-detail", username=request.user.username)
| agpl-3.0 |
sankhesh/VTK | Rendering/Core/Testing/Python/TestCameraInterpolator.py | 20 | 3316 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
lut = vtk.vtkLookupTable()
lut.SetHueRange(0.6, 0)
lut.SetSaturationRange(1.0, 0)
lut.SetValueRange(0.5, 1.0)
# Read the data: a height field results
demReader = vtk.vtkDEMReader()
demReader.SetFileName(VTK_DATA_ROOT + "/Data/SainteHelens.dem")
demReader.Update()
lo = demReader.GetOutput().GetScalarRange()[0]
hi = demReader.GetOutput().GetScalarRange()[1]
surface = vtk.vtkImageDataGeometryFilter()
surface.SetInputConnection(demReader.GetOutputPort())
warp = vtk.vtkWarpScalar()
warp.SetInputConnection(surface.GetOutputPort())
warp.SetScaleFactor(1)
warp.UseNormalOn()
warp.SetNormal(0, 0, 1)
warp.Update()
normals = vtk.vtkPolyDataNormals()
normals.SetInputData(warp.GetPolyDataOutput())
normals.SetFeatureAngle(60)
normals.SplittingOff()
demMapper = vtk.vtkPolyDataMapper()
demMapper.SetInputConnection(normals.GetOutputPort())
demMapper.SetScalarRange(lo, hi)
demMapper.SetLookupTable(lut)
demActor = vtk.vtkLODActor()
demActor.SetMapper(demMapper)
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(demActor)
ren1.SetBackground(0, 0, 0)
renWin.SetSize(300, 300)
ren1.SetBackground(0.1, 0.2, 0.4)
# render the image
#
renWin.Render()
view1 = vtk.vtkCamera()
view1.SetClippingRange(30972.2, 35983.7)
view1.SetFocalPoint(562835, 5.11498e+006, 2294.5)
view1.SetPosition(562835, 5.11498e+006, 35449.9)
view1.SetViewAngle(30)
view1.SetViewUp(0, 1, 0)
view2 = vtk.vtkCamera()
view2.SetClippingRange(9013.43, 13470.4)
view2.SetFocalPoint(562835, 5.11498e+006, 2294.5)
view2.SetPosition(562835, 5.11498e+006, 13269.4)
view2.SetViewAngle(30)
view2.SetViewUp(0, 1, 0)
view3 = vtk.vtkCamera()
view3.SetClippingRange(4081.2, 13866.4)
view3.SetFocalPoint(562853, 5.11586e+006, 2450.05)
view3.SetPosition(562853, 5.1144e+006, 10726.6)
view3.SetViewAngle(30)
view3.SetViewUp(0, 0.984808, 0.173648)
view4 = vtk.vtkCamera()
view4.SetClippingRange(14.0481, 14048.1)
view4.SetFocalPoint(562880, 5.11652e+006, 2733.15)
view4.SetPosition(562974, 5.11462e+006, 6419.98)
view4.SetViewAngle(30)
view4.SetViewUp(0.0047047, 0.888364, 0.459116)
view5 = vtk.vtkCamera()
view5.SetClippingRange(14.411, 14411)
view5.SetFocalPoint(562910, 5.11674e+006, 3027.15)
view5.SetPosition(562414, 5.11568e+006, 3419.87)
view5.SetViewAngle(30)
view5.SetViewUp(-0.0301976, 0.359864, 0.932516)
interpolator = vtk.vtkCameraInterpolator()
interpolator.SetInterpolationTypeToSpline()
interpolator.AddCamera(0, view1)
interpolator.AddCamera(5, view2)
interpolator.AddCamera(7.5, view3)
interpolator.AddCamera(9.0, view4)
interpolator.AddCamera(11.0, view5)
camera = vtk.vtkCamera()
ren1.SetActiveCamera(camera)
def animate():
numSteps = 500
min = interpolator.GetMinimumT()
max = interpolator.GetMaximumT()
i = 0
while i <= numSteps:
t = float(i) * (max - min) / float(numSteps)
interpolator.InterpolateCamera(t, camera)
renWin.Render()
i += 1
interpolator.InterpolateCamera(8.2, camera)
# animate()
#iren.Start()
| bsd-3-clause |
k0001/mscrap | mscrap/mscrap/constants.py | 1 | 16105 | # -*- coding: utf-8 -*-
DISTRITOS_NORM = {
u'rio negro' : u'Río Negro',
u'catamarca' : u'Catamarca',
u'santiago del estero' : u'Santiago del Estero',
u'la rioja' : u'La Rioja',
u'buenos aires' : u'Buenos Aires',
u'cordoba' : u'Córdoba',
u'santa fe' : u'Santa Fé',
u'entre rios' : u'Entre Ríos',
u'santa cruz' : u'Santa Cruz',
u'tucuman' : u'Tucumán',
u'ciudad de buenos aires' : u'Ciudad Autónoma de Buenos Aires',
u'ciudad autonoma de buenos aires' : u'Ciudad Autónoma de Buenos Aires',
u'corrientes' : u'Corrientes',
u'mendoza' : u'Mendoza',
u'tierra del fuego' : u'Tierra del Fuego',
u'tierra del fuego, antartida e islas del atlantico sur'
: u'Tierra del Fuego',
u'jujuy' : u'Jujuy',
u'san luis' : u'San Luis',
u'neuquen' : u'Neuquen',
u'formosa' : u'Formosa',
u'chubut' : u'Chubut',
u'san juan' : u'San Juan',
u'salta' : u'Salta',
u'chaco' : u'Chaco',
u'la pampa' : u'La Pampa',
u'misiones' : u'Misiones' }
FIRMANTES_SPECIAL_NORM = {
u'parlamentaria mixta revisora de cuentas de la administracion' :
u'Comisión Parlamentaria Mixta Revisora de Cuentas de la Administración',
u'h camara de diputados':
u'Cámara de Diputados',
u'bicameral permanente de tramite legislativo - ley 26122':
u'Comisión Bicamiral Permanente de Trámite Legislativo' }
BLOQUES_NORM = {
u'accion republica' : u'Acción Republicana',
u'alianza coalicion civica' : u'Alianza Coalición Cívica',
u'ari autonomo 8 +' : u'ARI Autónomo 8 +',
u'ari' : u'ARI',
u'autodeterminacion y libertad' : u'Autodeterminación y Libertad',
u'bloque buenos aires' : u'Bloque Buenos Aires',
u'bloque tucuman' : u'Bloque Tucumán',
u'buenos aires para todos en proyecto sur' : u'Buenos Aires para Todos en Proyecto Sur',
u'buenos aires por el cambio' : u'Buenos Aires por el Cambio',
u'coalicion civica - ari - gen - upt' : u'Coalición Cívica - ARI - GEN - UPT',
u'coalicion civica - ari' : u'Coalición Cívica - ARI',
u'coalicion civica' : u'Alianza Coalición Cívica',
u'compromiso para el cambio' : u'Compromiso para el Cambio',
u'concertacion entrerriana' : u'Concertación Entrerriana',
u'concertacion plural' : u'Concertación Plural',
u'consenso federal' : u'Consenso Federal',
u'convergencia federal' : u'Convergencia Federal',
u'convergencia' : u'Convergencia',
u'cordoba federal' : u'Córdoba Federal',
u'corriente de pensamiento federal' : u'Corriente de Pensamiento Federal',
u'corriente peronista federal' : u'Corriente Peronista Federal',
u'cruzada renov' : u'Cruzada Renovadora',
u'de la concertacion' : u'De la Concertación',
u'democracia igualitaria y participativa (d.i.p.)' : u'Democracia Igualitaria y Participativa (D.I.P.)',
u'democrata de mendoza' : u'Demócrata de Mendoza',
u'democrata progresista' : u'Demócrata prograsista',
u'democrata' : u'Demócrata',
u'dialogo por buenos aires' : u'Dialogo por Buenos Aires',
u'dignidad peronista' : u'Dignidad Peronista',
u'dignidad y justicia' : u'Dignidad y Justicia',
u'emancipacion y justicia' : u'Emancipacion y Justicia',
u'encuentro de los rionegrinos' : u'Encuentro de los Rionegrinos',
u'encuentro popular' : u'Encuentro Popular',
u'encuentro popular y social' : u'Encuentro Popular y Social',
u'encuentro' : u'Encuentro',
u'esperanza federal' : u'Esperanza Federal',
u'eva peron' : u'Eva Perón',
u'federalismo santafesino' : u'Federalismo Santafesino',
u'federalismo y liberacion' : u'Federalismo y Liberacion',
u'federal santafesino' : u'Federal Santafesino',
u'fiscal' : u'Fiscal',
u'frente civico - cordoba' : u'Frente Cívico - Córdoba',
u'frente civico de la provincia de cordoba' : u'Frente Civico de la Provincia de Cordoba',
u'frente civico jujeño' : u'Frente Cívico Jujeño',
u'frente civico por santiago' : u'Frente Cívico por Santiago',
u'frente civico tucuman' : u'Frente Cívico Tucumán',
u'frente civico y social de catamarca' : u'Frente Cívico y Social de Catamarca',
u'frente civico y social' : u'Frente Cívico y Social',
u'frente del movimiento popular' : u'Frente del Movimiento Popular',
u'frente de todos' : u'Frente De Todos',
u'frente de unidad provincial' : u'Frente de Unidad Provincial',
u'frente grande' : u'Frente Grande',
u'frente justicia union y libertad - frejuli' : u'Frente Justicia Unión y Libertad - FREJULI',
u'frente movimiento popular' : u'Frente Movimiento Popular',
u'frente para el cambio' : u'Frente Para el Cambio',
u'frente para la victoria - partido bloquista' : u'Frente para la Victoria - Partido Bloquista',
u'frente para la victoria - pj' : u'Frente para la Victoria - PJ',
u'frente peronista federal' : u'Frente Peronista Federal',
u'frente peronista' : u'Frente Peronista',
u'frente popular bonaerense' : u'Frente Popular Bonaerense',
u'frente por los derechos ciudadanos' : u'Frente por los Derechos Ciudadanos',
u'frente produccion y trabajo' : u'Frente Producción y Trabajo',
u'frepaso' : u'FREPASO',
u'fza republicana' : u'Fuerza Republicana',
u'gen' : u'GEN',
u'guardia peronista' : u'Guardia Peronista',
u'izquierda unida' : u'Izquierda Unida',
u'justicialismo republicano' : u'Justicialismo Republicano',
u'justicialista 17 de octubre' : u'Justicialista 17 de Octubre',
u'justicialista 8 de octubre' : u'Justicialista 8 de Octubre',
u'justicialista dialog ar' : u'Justicialista DIALOG AR',
u'justicialista nacional' : u'Justicialista Nacional',
u'justicialista para el dialogo de los argentinos' : u'Justicialista para el Dialogo de los Argentinos',
u'justicialista san luis' : u'Justicialista San Luis',
u'justicialista' : u'Justicialista',
u'la argentina de los valores' : u'La Argentina de los Valores',
u'lealtad y dignidad justicialista' : u'Lealtad y Dignidad Justicialista',
u'libres del sur' : u'Libres del Sur',
u'memoria y democracia' : u'Memoria y Democracia',
u'mmta' : u'MMTA',
u'movimiento independiente' : u'Movimiento Independiente',
u'movimiento pampa sur' : u'Movimiento Pampa Sur',
u'movimiento popular neuquino' : u'Movimiento Popular Neuquino',
u'movimiento proyecto sur' : u'Movimiento Proyecto Sur',
u'mov pop fueguino' : u'Movimiento Popular Fueguino',
u'mov pop neuquino' : u'Movimiento Popular Neuquino',
u'nacional sindical' : u'Nacional Sindical',
u'no integra bloque' : None,
u'-' : None,
u'nucleo unidad peronista' : u'Nucleo Unidad Peronista',
u'nuevo encuentro popular y solidario' : u'Nuevo Encuentro Popular y Solidario',
u'nuevo encuentro' : u'Nuevo Encuentro',
u'nuevo espacio entrerriano' : u'Nuevo Espacio Entrerriano',
u'partido de la concertacion - forja' : u'Partido de la Concertación - Forja',
u'partido de la victoria' : u'Partido de la Victoria',
u'partido federal fueguino' : u'Partido Federal Fueguino',
u'partido intransigente' : u'Partido Intransigente',
u'partido justicialista la pampa' : u'Partido Justicialista La Pampa',
u'partido liberal de corrientes' : u'Partido Liberal de Corrientes',
u'partido nuevo contra corrup. por honest. y transp.' : u'Partido Nuevo contra Corrupción por la Honestidad y la Transparencia',
u'partido nuevo' : u'Partido Nuevo',
u'partido renovador de salta' : u'Partido Renovador de Salta',
u'partido socialista' : u'Partido Socialista',
u'peronismo federal' : u'Peronismo Federal',
u'peronismo jujeno' : u'Peronismo Jujeño',
u'peronismo peronista' : u'Peronismo Peronista',
u'peronismo popular' : u'Peronismo Popular',
u'peronista cordoba ciudadana' : u'Peronista Córdoba Ciudadana',
u'peronista federal' : u'Peronista Federal',
u'peronista salteno' : u'Peronista Salteño',
u'peronista' : u'Peronista',
u'pj frente para la victoria' : u'PJ - Frente para la Victoria',
u'polo social' : u'Polo Social',
u'por la verdad' : u'Por la Verdad',
u'por tierra del fuego' : u'Por Tierra del Fuego',
u'produccion y trabajo' : u'Produccion y Trabajo',
u'propuesta peronista' : u'Propuesta Peronista',
u'propuesta republicana' : u'Propuesta Republicana',
u'pro' : u'PRO',
u'proyecto buenos aires federal' : u'Proyecto Buenos Aires Federal',
u'proyecto corrientes' : u'Proyecto Corrientes',
u'proyecto progresista' : u'Proyecto Progresista',
u'proyecto social alternativo' : u'Proyecto Social Alternativo',
u'puntano independiente' : u'Puntano Independiente',
u'radical independiente' : u'Radical Independiente',
u'radical rionegrino' : u'Radical Rionegrino',
u'recrear para el crecimiento' : u'Recrear para el Crecimiento',
u'redes' : u'Renovación y Desarrollo Social (REDES)',
u'renovador de salta' : u'Renovador de Salta',
u'renovador' : u'Renovador',
u'salta somos todos' : u'Salta Somos Todos',
u'santa fe en movimiento' : u'Santa Fé en Movimiento',
u'santa fe federal' : u'Santa Fé Federal',
u'si por la unidad popular' : u'SI por la Unidad Popular',
u'soberania popular' : u'Soberania Popular',
u'socialista del mijd' : u'Socialista del MIJD',
u'social patagonico' : u'Social Patagonico',
u'solidaridad e igualdad (si) - ari (t.d.f.)' : u'Solaridad e Igualdad (SI) - ARI (TDF)',
u'solidaridad e igualdad (si) - ari (tdf) ' : u'Solaridad e Igualdad (SI) - ARI (TDF)',
u'solidaridad e igualdad (si) - proyecto progresista' : u'Solaridad e Igualdad (SI) - Proyecto Progresista',
u'solidaridad e igualdad (si)' : u'Solaridad e Igualdad (SI)',
u'trabajo y dignidad' : u'Trabajo y Dignidad',
u'ucede' : u'UCEDE',
u'ucr' : u'Unión Civica Radical',
u'udeso salta' : u'UDESO Salta',
u'unidad federalista' : u'Unidad Federalista',
u'unidad para el desarrollo social y la equidad' : u'Unidad para el Desarrollo Social y la Equidad',
u'unidad popular' : u'Unidad Popular',
u'union celeste y blanca' : u'Unión Celeste y Blanca',
u'union celeste y blanco' : u'Unión Celeste y Blanco',
u'union civica radical' : u'Unión Civica Radical',
u'union peronista' : u'Unión Peronista',
u'union por argentina' : u'Unión por Argentina',
u'union por san juan' : u'Unión por San Juan',
u'union por todos' : u'Union por Todos',
u"unipersonal 'ricardo balbin'" : u'Unipersonal Ricardo Balbin',
u'unipersonal' : u'Unipersonal',
u'valores para mi pais' : u'Valores para mi País',
u'vecinalista - partido nuevo' : u'Vecinalista - Partido Nuevo',
u'vida y compromiso' : u'Vida y Compromiso'
}
| bsd-3-clause |
lahosken/pants | src/python/pants/stats/statsdb.py | 16 | 5058 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import sqlite3
from contextlib import contextmanager
from pants.subsystem.subsystem import Subsystem
from pants.util.dirutil import safe_mkdir_for
class StatsDBError(Exception): pass
class StatsDBFactory(Subsystem):
options_scope = 'statsdb'
@classmethod
def register_options(cls, register):
super(StatsDBFactory, cls).register_options(register)
register('--path',
default=os.path.join(register.bootstrap.pants_bootstrapdir, 'stats', 'statsdb.sqlite'),
help='Location of statsdb file.')
def get_db(self):
"""Returns a StatsDB instance configured by this factory."""
ret = StatsDB(self.get_options().path)
ret.ensure_tables()
return ret
class StatsDB(object):
def __init__(self, path):
super(StatsDB, self).__init__()
self._path = path
def ensure_tables(self):
with self._cursor() as c:
def create_index(tab, col):
c.execute("""CREATE INDEX IF NOT EXISTS {tab}_{col}_idx ON {tab}({col})""".format(
tab=tab, col=col))
c.execute("""
CREATE TABLE IF NOT EXISTS run_info (
id TEXT PRIMARY KEY,
timestamp INTEGER, -- Seconds since the epoch.
machine TEXT,
user TEXT,
version TEXT,
buildroot TEXT,
outcome TEXT,
cmd_line TEXT
)
""")
create_index('run_info', 'cmd_line')
def create_timings_table(tab):
c.execute("""
CREATE TABLE IF NOT EXISTS {tab} (
run_info_id TEXT,
label TEXT,
timing INTEGER, -- Milliseconds
FOREIGN KEY (run_info_id) REFERENCES run_info(id)
)
""".format(tab=tab))
create_index(tab, 'label')
create_timings_table('cumulative_timings')
create_timings_table('self_timings')
def insert_stats(self, stats):
try:
with self._cursor() as c:
ri = stats['run_info']
try:
c.execute("""INSERT INTO run_info VALUES (?, ?, ?, ?, ?, ?, ?, ?)""",
[ri['id'], int(float(ri['timestamp'])), ri['machine'], ri['user'],
ri['version'], ri['buildroot'], ri['outcome'], ri['cmd_line']])
except KeyError as e:
raise StatsDBError('Failed to insert stats. Key {} not found in RunInfo: {}'.format(
e.args[0], str(ri)))
rid = ri['id']
for table in ['cumulative_timings', 'self_timings']:
timings = stats[table]
for timing in timings:
try:
c.execute("""INSERT INTO {} VALUES (?, ?, ?)""".format(table),
[rid, timing['label'], self._to_ms(timing['timing'])])
except KeyError as e:
raise StatsDBError('Failed to insert stats. Key {} not found in timing: {}'.format(
e.args[0], str(timing)))
except KeyError as e:
raise StatsDBError('Failed to insert stats. Key {} not found in stats object.'.format(
e.args[0]))
def get_stats_for_cmd_line(self, timing_table, cmd_line_like):
"""Returns a generator over all (label, timing) pairs for a given cmd line.
:param timing_table: One of 'cumulative_timings' or 'self_timings'.
:param cmd_line_like: Look at all cmd lines that are LIKE this string, in the sql sense.
"""
with self._cursor() as c:
for row in c.execute("""
SELECT t.label, t.timing
FROM {} AS t INNER JOIN run_info AS ri ON (t.run_info_id=ri.id)
WHERE ri.cmd_line LIKE ?
""".format(timing_table), [cmd_line_like]):
yield row
def get_aggregated_stats_for_cmd_line(self, timing_table, cmd_line_like):
"""Returns a generator over aggregated stats for a given cmd line.
:param timing_table: One of 'cumulative_timings' or 'self_timings'.
:param cmd_line_like: Look at all cmd lines that are LIKE this string, in the sql sense.
"""
with self._cursor() as c:
for row in c.execute("""
SELECT date(ri.timestamp, 'unixepoch') as dt, t.label as label, count(*), sum(t.timing)
FROM {} AS t INNER JOIN run_info AS ri ON (t.run_info_id=ri.id)
WHERE ri.cmd_line LIKE ?
GROUP BY dt, label
ORDER BY dt, label
""".format(timing_table), [cmd_line_like]):
yield row
@staticmethod
def _to_ms(timing_secs):
"""Convert a string representing a float of seconds to an int representing milliseconds."""
return int(float(timing_secs) * 1000 + 0.5)
@contextmanager
def _connection(self):
safe_mkdir_for(self._path)
conn = sqlite3.connect(self._path)
yield conn
conn.commit()
conn.close()
@contextmanager
def _cursor(self):
with self._connection() as conn:
yield conn.cursor()
| apache-2.0 |
bhupennewalkar1337/erpnext | erpnext/accounts/doctype/sales_invoice/pos.py | 2 | 10968 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, json
from frappe import _
from frappe.utils import nowdate
from erpnext.setup.utils import get_exchange_rate
from erpnext.stock.get_item_details import get_pos_profile
from erpnext.accounts.party import get_party_account_currency
from erpnext.controllers.accounts_controller import get_taxes_and_charges
@frappe.whitelist()
def get_pos_data():
doc = frappe.new_doc('Sales Invoice')
doc.is_pos = 1;
pos_profile = get_pos_profile(doc.company) or {}
if not doc.company: doc.company = pos_profile.get('company')
doc.update_stock = pos_profile.get('update_stock')
if pos_profile.get('name'):
pos_profile = frappe.get_doc('POS Profile', pos_profile.get('name'))
company_data = get_company_data(doc.company)
update_pos_profile_data(doc, pos_profile, company_data)
update_multi_mode_option(doc, pos_profile)
default_print_format = pos_profile.get('print_format') or "Point of Sale"
print_template = frappe.db.get_value('Print Format', default_print_format, 'html')
return {
'doc': doc,
'default_customer': pos_profile.get('customer'),
'items': get_items_list(pos_profile),
'customers': get_customers_list(pos_profile),
'serial_no_data': get_serial_no_data(pos_profile, doc.company),
'batch_no_data': get_batch_no_data(),
'tax_data': get_item_tax_data(),
'price_list_data': get_price_list_data(doc.selling_price_list),
'bin_data': get_bin_data(pos_profile),
'pricing_rules': get_pricing_rule_data(doc),
'print_template': print_template,
'pos_profile': pos_profile,
'meta': {
'invoice': frappe.get_meta('Sales Invoice'),
'items': frappe.get_meta('Sales Invoice Item'),
'taxes': frappe.get_meta('Sales Taxes and Charges')
}
}
def get_company_data(company):
return frappe.get_all('Company', fields = ["*"], filters= {'name': company})[0]
def update_pos_profile_data(doc, pos_profile, company_data):
doc.campaign = pos_profile.get('campaign')
doc.write_off_account = pos_profile.get('write_off_account') or \
company_data.write_off_account
doc.change_amount_account = pos_profile.get('change_amount_account') or \
company_data.default_cash_account
doc.taxes_and_charges = pos_profile.get('taxes_and_charges')
if doc.taxes_and_charges:
update_tax_table(doc)
doc.currency = pos_profile.get('currency') or company_data.default_currency
doc.conversion_rate = 1.0
if doc.currency != company_data.default_currency:
doc.conversion_rate = get_exchange_rate(doc.currency, company_data.default_currency)
doc.selling_price_list = pos_profile.get('selling_price_list') or \
frappe.db.get_value('Selling Settings', None, 'selling_price_list')
doc.naming_series = pos_profile.get('naming_series') or 'SINV-'
doc.letter_head = pos_profile.get('letter_head') or company_data.default_letter_head
doc.ignore_pricing_rule = pos_profile.get('ignore_pricing_rule') or 0
doc.apply_discount_on = pos_profile.get('apply_discount_on') if pos_profile.get('apply_discount') else ''
doc.customer_group = pos_profile.get('customer_group') or get_root('Customer Group')
doc.territory = pos_profile.get('territory') or get_root('Territory')
def get_root(table):
root = frappe.db.sql(""" select name from `tab%(table)s` having
min(lft)"""%{'table': table}, as_dict=1)
return root[0].name
def update_multi_mode_option(doc, pos_profile):
from frappe.model import default_fields
if not pos_profile or not pos_profile.get('payments'):
for payment in get_mode_of_payment(doc):
payments = doc.append('payments', {})
payments.mode_of_payment = payment.parent
payments.account = payment.default_account
payments.type = payment.type
return
for payment_mode in pos_profile.payments:
payment_mode = payment_mode.as_dict()
for fieldname in default_fields:
if fieldname in payment_mode:
del payment_mode[fieldname]
doc.append('payments', payment_mode)
def get_mode_of_payment(doc):
return frappe.db.sql(""" select mpa.default_account, mpa.parent, mp.type as type from `tabMode of Payment Account` mpa,
`tabMode of Payment` mp where mpa.parent = mp.name and mpa.company = %(company)s""", {'company': doc.company}, as_dict=1)
def update_tax_table(doc):
taxes = get_taxes_and_charges('Sales Taxes and Charges Template', doc.taxes_and_charges)
for tax in taxes:
doc.append('taxes', tax)
def get_items_list(pos_profile):
cond = "1=1"
item_groups = []
if pos_profile.get('item_groups'):
# Get items based on the item groups defined in the POS profile
cond = "item_group in (%s)"%(', '.join(['%s']*len(pos_profile.get('item_groups'))))
item_groups = [d.item_group for d in pos_profile.get('item_groups')]
return frappe.db.sql("""
select
name, item_code, item_name, description, item_group, expense_account, has_batch_no,
has_serial_no, expense_account, selling_cost_center, stock_uom, image,
default_warehouse, is_stock_item
from
tabItem
where
disabled = 0 and has_variants = 0 and is_sales_item = 1 and {cond}
""".format(cond=cond), tuple(item_groups), as_dict=1)
def get_customers_list(pos_profile):
cond = "1=1"
customer_groups = []
if pos_profile.get('customer_groups'):
# Get customers based on the customer groups defined in the POS profile
cond = "customer_group in (%s)"%(', '.join(['%s']*len(pos_profile.get('customer_groups'))))
customer_groups = [d.customer_group for d in pos_profile.get('customer_groups')]
return frappe.db.sql(""" select name, customer_name, customer_group,
territory from tabCustomer where disabled = 0
and {cond}""".format(cond=cond), tuple(customer_groups), as_dict=1) or {}
def get_serial_no_data(pos_profile, company):
# get itemwise serial no data
# example {'Nokia Lumia 1020': {'SN0001': 'Pune'}}
# where Nokia Lumia 1020 is item code, SN0001 is serial no and Pune is warehouse
cond = "1=1"
if pos_profile.get('update_stock') and pos_profile.get('warehouse'):
cond = "warehouse = '{0}'".format(pos_profile.get('warehouse'))
serial_nos = frappe.db.sql("""select name, warehouse, item_code from `tabSerial No` where {0}
and company = %(company)s """.format(cond), {'company': company}, as_dict=1)
itemwise_serial_no = {}
for sn in serial_nos:
if sn.item_code not in itemwise_serial_no:
itemwise_serial_no.setdefault(sn.item_code, {})
itemwise_serial_no[sn.item_code][sn.name] = sn.warehouse
return itemwise_serial_no
def get_batch_no_data():
# get itemwise batch no data
# exmaple: {'LED-GRE': [Batch001, Batch002]}
# where LED-GRE is item code, SN0001 is serial no and Pune is warehouse
itemwise_batch = {}
batches = frappe.db.sql("""select name, item from `tabBatch`
where ifnull(expiry_date, '4000-10-10') >= curdate()""", as_dict=1)
for batch in batches:
if batch.item not in itemwise_batch:
itemwise_batch.setdefault(batch.item, [])
itemwise_batch[batch.item].append(batch.name)
return itemwise_batch
def get_item_tax_data():
# get default tax of an item
# example: {'Consulting Services': {'Excise 12 - TS': '12.000'}}
itemwise_tax = {}
taxes = frappe.db.sql(""" select parent, tax_type, tax_rate from `tabItem Tax`""", as_dict=1)
for tax in taxes:
if tax.parent not in itemwise_tax:
itemwise_tax.setdefault(tax.parent, {})
itemwise_tax[tax.parent][tax.tax_type] = tax.tax_rate
return itemwise_tax
def get_price_list_data(selling_price_list):
itemwise_price_list = {}
price_lists = frappe.db.sql("""Select ifnull(price_list_rate, 0) as price_list_rate,
item_code from `tabItem Price` ip where price_list = %(price_list)s""",
{'price_list': selling_price_list}, as_dict=1)
for item in price_lists:
itemwise_price_list[item.item_code] = item.price_list_rate
return itemwise_price_list
def get_bin_data(pos_profile):
itemwise_bin_data = {}
cond = "1=1"
if pos_profile.get('warehouse'):
cond = "warehouse = '{0}'".format(pos_profile.get('warehouse'))
bin_data = frappe.db.sql(""" select item_code, warehouse, actual_qty from `tabBin`
where actual_qty > 0 and {cond}""".format(cond=cond), as_dict=1)
for bins in bin_data:
if bins.item_code not in itemwise_bin_data:
itemwise_bin_data.setdefault(bins.item_code, {})
itemwise_bin_data[bins.item_code][bins.warehouse] = bins.actual_qty
return itemwise_bin_data
def get_pricing_rule_data(doc):
pricing_rules = ""
if doc.ignore_pricing_rule == 0:
pricing_rules = frappe.db.sql(""" Select * from `tabPricing Rule` where docstatus < 2
and ifnull(for_price_list, '') in (%(price_list)s, '') and selling = 1
and ifnull(company, '') in (%(company)s, '') and disable = 0 and %(date)s
between ifnull(valid_from, '2000-01-01') and ifnull(valid_upto, '2500-12-31')
order by priority desc, name desc""",
{'company': doc.company, 'price_list': doc.selling_price_list, 'date': nowdate()}, as_dict=1)
return pricing_rules
@frappe.whitelist()
def make_invoice(doc_list):
if isinstance(doc_list, basestring):
doc_list = json.loads(doc_list)
name_list = []
for docs in doc_list:
for name, doc in docs.items():
if not frappe.db.exists('Sales Invoice',
{'offline_pos_name': name, 'docstatus': ("<", "2")}):
validate_records(doc)
si_doc = frappe.new_doc('Sales Invoice')
si_doc.offline_pos_name = name
si_doc.update(doc)
submit_invoice(si_doc, name)
name_list.append(name)
else:
name_list.append(name)
return name_list
def validate_records(doc):
validate_customer(doc)
validate_item(doc)
def validate_customer(doc):
if not frappe.db.exists('Customer', doc.get('customer')):
customer_doc = frappe.new_doc('Customer')
customer_doc.customer_name = doc.get('customer')
customer_doc.customer_type = 'Company'
customer_doc.customer_group = doc.get('customer_group')
customer_doc.territory = doc.get('territory')
customer_doc.save(ignore_permissions = True)
frappe.db.commit()
doc['customer'] = customer_doc.name
def validate_item(doc):
for item in doc.get('items'):
if not frappe.db.exists('Item', item.get('item_code')):
item_doc = frappe.new_doc('Item')
item_doc.name = item.get('item_code')
item_doc.item_code = item.get('item_code')
item_doc.item_name = item.get('item_name')
item_doc.description = item.get('description')
item_doc.default_warehouse = item.get('warehouse')
item_doc.stock_uom = item.get('stock_uom')
item_doc.item_group = item.get('item_group')
item_doc.save(ignore_permissions=True)
frappe.db.commit()
def submit_invoice(si_doc, name):
try:
si_doc.insert()
si_doc.submit()
except Exception, e:
if frappe.message_log: frappe.message_log.pop()
frappe.db.rollback()
save_invoice(e, si_doc, name)
def save_invoice(e, si_doc, name):
if not frappe.db.exists('Sales Invoice', {'offline_pos_name': name}):
si_doc.docstatus = 0
si_doc.flags.ignore_mandatory = True
si_doc.insert()
frappe.log_error(frappe.get_traceback())
| gpl-3.0 |
piqueserver/piqueserver | piqueserver/scripts/protect.py | 2 | 2409 | """
Protects areas against block destroying/building.
Commands
^^^^^^^^
* ``/protect <area coordinates>`` puts an area under protected status *admin only*
* ``/protect`` clears all protected areas *admin only*
.. codeauthor:: hompy
"""
from piqueserver.commands import command, admin
from pyspades.common import coordinates
@command(admin_only=True)
def protect(connection, value=None):
protocol = connection.protocol
if value is None:
protocol.protected = None
protocol.send_chat('All areas unprotected', irc=True)
else:
if protocol.protected is None:
protocol.protected = set()
pos = coordinates(value)
protocol.protected.symmetric_difference_update([pos])
message = 'The area at %s is now %s' % (
value.upper(), 'protected' if pos in protocol.protected else 'unprotected')
protocol.send_chat(message, irc=True)
def apply_script(protocol, connection, config):
class ProtectConnection(connection):
def _block_available(self, x, y, z):
if not self.god and self.protocol.is_protected(x, y, z):
return False
def on_block_build_attempt(self, x, y, z):
if not self.god and self.protocol.is_protected(x, y, z):
return False
return connection.on_block_build_attempt(self, x, y, z)
def on_line_build_attempt(self, points):
if not self.god:
for point in points:
if self.protocol.is_protected(*point):
return False
return connection.on_line_build_attempt(self, points)
class ProtectProtocol(protocol):
protected = None
def on_map_change(self, map):
self.protected = set(coordinates(s) for s in
getattr(self.map_info.info, 'protected', []))
protocol.on_map_change(self, map)
def is_indestructable(self, x, y, z):
if self.is_protected(x, y, z):
return True
return protocol.is_indestructable(self, x, y, z)
def is_protected(self, x, y, z):
if self.protected:
for sx, sy in self.protected:
if x >= sx and y >= sy and x < sx + 64 and y < sy + 64:
return True
return False
return ProtectProtocol, ProtectConnection
| gpl-3.0 |
gratefulfrog/lib | python/pymol/colorramping.py | 1 | 13994 | import math
class ColorPoint:
"""
Simple color-storage class; stores way-points on a color ramp
"""
def __init__(self,idx,col,colType):
# index, X-coordinate, on a palette
self.idx = idx
# color; usually an RGBA quad
self.color = col
# One of ColorTypes members
self.colorType = colType
def __str__(self):
return "(Index=%d; Color=(%0.3f,%0.3f,%0.3f,%0.3f); ColorType=%d)" % (self.idx, self.color[0], self.color[1], self.color[2], self.color[3], self.colorType)
class ColorTypes:
"""
Simple enumerated type for internal color formats
"""
RGBAi = 0
RGBAf = 1
HEX6 = 2
HEX8 = 3
class ColorRamp:
"""
Model for a simple color ramp
See __main__ below for usage.
"""
# we assume linear ramps for now
LINEAR = 0
GAUSSIAN = 1
EXPONENTIAL = 2
CIRCLE_RADIUS = 3
def __init__(self, nColors, *args, **kwargs):
# size of this ramp
self.nColors = nColors
# the list of RGBA float values
self.ramp = []
# ordered array of color indices
self.keys = {}
# ready to use; boolean; we need at least two
# color points to define a ramp
self.ready = False
#
if 'handle' in kwargs:
self.handle = kwargs['handle']
if 'name' in kwargs:
self.name = kwargs['name']
# list of unique ids for objects on the map canvas
self.canvas_ids = {}
def __str__(self):
"""
instances created with ColorRamp(nColors=XYZ,name="foo") will return "foo"
otherwise a long-debug-friendly description is returned.
"""
if getattr(self,'name',None)!=None:
return self.name
else:
s = "Object Name: Nameless\n"
s+= "Ready to use: " + str(self.ready) + "\n"
s+= "Keys: " + str(self.keys.keys()) + "\n"
for k in self.keys:
s += "Color[%d] = %s\n" % (k,self.keys[k])
s += "ColorRamp with %d colors follows...\n" % self.nColors
if self.ready:
s += str(self.getRamp()) + "\n"
else:
s += "[]\n"
return s
def addColor(self, idx, col, colType=ColorTypes.RGBAf, colScale=1.0):
"""
adds color, 'col', to ramp at index 'idx'. If 'idx' exists, this
function overwrites the value
"""
# check user input: color location
# if beyond ends of ramp, make end of ramp
if idx<0:
idx=0
elif idx>self.nColors-1:
idx=self.nColors-1
# check user input: color format
if type(col) != ().__class__ or len(col)!=4:
print "Error: Colors must be spefied as a RGBA tuple with four values."
print "Error: %s was given instead." % str(col)
return
# check user input: color type format
if colType not in (ColorTypes.RGBAi, ColorTypes.RGBAf):
print "Error: Color type specification must be either, "
print "Error: ColorRamp.RGBAi or ColorRamp.RGBAf"
return
userCol = None
# convert color type if needed
if colType==ColorTypes.RGBAf:
userCol = col
elif colType==ColorTypes.RGBAi:
userCol = map(lambda c: float(c)/float(colScale), col)
# create a ColorPoint and insert it
self.keys[idx] = ColorPoint(idx, userCol, colType)
# is this ramp yet good to use?
self.updateReady()
# what else do we need to do to modify the model?
def checkPoint(self, pt, startX, X):
"""
Checks if there is a point between startX and X.
"""
ret_x = startX
if startX < X:
for x in range(int(startX)+1, int(X)+1):
if x in self.keys:
break
ret_x = x
elif startX > X:
for x in range(int(startX)-1, int(X)-1, -1):
if x in self.keys:
break
ret_x = x
return ret_x
def getPoint(self, pt):
"""
Returns a true index (horizontal potision) of a given point.
"""
if pt in self.canvas_ids:
return self.canvas_ids[pt]
return None
def getRampList(self):
"""
Returns a list of floats representing the color ramp.
"""
ramp_list = []
for x in range(0,360):
if x in self.keys:
col = list(self.keys[x].color)
ramp_list.append(float(x))
ramp_list.append(float(col[0]))
ramp_list.append(float(col[1]))
ramp_list.append(float(col[2]))
ramp_list.append(float(col[3]))
return ramp_list
def movePoint(self,pt,X,alpha):
if pt not in self.canvas_ids:
# print "Error: Could not move pt(%d)." % pt
return
idx = self.canvas_ids[pt]
if idx in self.keys:
col = list(self.keys[idx].color)
else:
# print "Logic error no such index in self.keys"
return
col[3] = alpha
# prevent extreme points from being replaced
if X <= 0:
return
if X >= 359:
return
self.removeColor(idx)
# prevent extreme points from moving horizontally
if idx == 0:
X = 0
if idx == 359:
X = 359
self.addColor(X, tuple(col))
def removePoint(self,pt):
if pt not in self.canvas_ids:
# print "Error: Could not remove pt(%d)." % pt
return
idx = self.canvas_ids[pt]
if idx <= 0 or idx >= 359:
return
self.removeColor(idx)
def removeColor(self, idx):
# check user input
if idx not in self.keys: return
if idx<0 or idx>self.nColors-1: return
# remove the point
del self.keys[idx]
# is this ramp still good to use?
self.updateReady()
def updateReady(self):
# are we ready to use?
self.ready = (0 in self.keys and self.nColors-1 in self.keys)
def updateRamp(self):
# if idx is specified then it was either added or removed
# so adjust the ramp about that point
if not self.ready:
# no use in updating a ramp w/o proper colors
print "Msg: This color ramp is not yet ready to use. Please add"
print "Msg: at least two colors at the ramp's extreme points 0 and %d" % (self.nColors-1)
return
# OPTIMIZATION TODO:
# if idx!=None and idx no in self.keys, then the point
# was removed, just update around those pts
# if idx!=None and does exists in self.keys, then they
# just added this point, so update the pts around it
self.ramp = []
keyList = self.keys.keys()
keyList.sort()
keyList.reverse()
lowerId = keyList.pop()
while len(keyList)>0:
upperId = keyList.pop()
# number of colors in between
span = int(abs(upperId-lowerId))
# get the actual colors
lowerCol, upperCol = self.keys[lowerId].color, self.keys[upperId].color
for x in range(span):
# linear mixing components
cUpper = float(x) / float(span)
cLower = 1.0 - cUpper
self.ramp.append((cLower * lowerCol[0] + cUpper * upperCol[0],
cLower * lowerCol[1] + cUpper * upperCol[1],
cLower * lowerCol[2] + cUpper * upperCol[2],
cLower * lowerCol[3] + cUpper * upperCol[3]))
lowerId = upperId
# fix the off-by one error
self.ramp.append(upperCol)
assert len(self.ramp)==self.nColors, "ColorRamp Logic Error: This ramp supports %d colors ONLY, but %d were found in the ramp." % (self.nColors, len(self.ramp))
def getRamp(self, colType=ColorTypes.RGBAf, colScale=1.0):
# update the ramp and return it
self.updateRamp()
if colType==ColorTypes.RGBAf:
if colScale==1.0:
return self.ramp
elif colType==ColorTypes.HEX6:
colScale = 255
return map(lambda col: "#%02x%02x%02x" % (colScale*col[0],colScale*col[1],colScale*col[2]), self.ramp)
elif colType==ColorTypes.HEX8:
colScale = 255
return map(lambda col: "#%02x%02x%02x%02x" % (colScale*col[0],colScale*col[1],colScale*col[2],colScale*col[3]), self.ramp)
def toPhotoImageString(self,nRows=1):
oneLine = "{" + " ".join(self.getRamp(ColorTypes.HEX6)) + "}"
if nRows==1:
return oneLine
else:
return " ".join([oneLine]*nRows)
# this belongs in the view
def toPhotoImage(self,nRows=1):
try:
from Tkinter import PhotoImage
except ImportError, e:
print "Error: could not import Tk. No image."
print "Error: ", e
return None
img = PhotoImage(width=self.nColors, height=nRows)
img.put(self.toPhotoImageString(nRows))
return img
def toCanvas(self,canvas,width,height,padX=0,padY=0):
r = self.CIRCLE_RADIUS
tmpKeys = self.keys.keys()
tmpKeys.sort()
# plottable window area; wH, wW
wH = height-2*padY
wW = width -2*padX
for idx in range(len(tmpKeys)):
pt1 = self.keys[tmpKeys[idx]]
origX1, origY1 = pt1.idx, pt1.color[3]
x1 = int(float(origX1)/float(self.nColors) * wW)
y1 = self.alphaToY(origY1)
y1 = int(wH * (1.0-float(y1)))
x1 += padX
y1 += padY
# if not last loop, then draw the line
if idx+1<len(tmpKeys):
pt2 = self.keys[tmpKeys[idx+1]]
origX2, origY2 = pt2.idx, pt2.color[3]
x2 = int(float(origX2)/float(self.nColors) * wW)
y2 = self.alphaToY(origY2)
y2 = int(wH * (1.0-float(y2)))
x2 += padX
y2 += padY
# plot the pt
unique_id = canvas.create_line((x1,y1,x2,y2),fill="black",width=1.0,tags="colorPt")
self.canvas_ids[unique_id] = idx
origColor = pt1.color
# convert the color from RGBA --> HEX6
colHEX6 = "#%02x%02x%02x" % (origColor[0]*255., origColor[1]*255., origColor[2]*255.)
# plot the pt
unique_id = canvas.create_oval((x1-r,y1-r,x1+r,y1+r),fill=colHEX6,tags="colorPt")
self.canvas_ids[unique_id] = tmpKeys[idx]
def clearCanvas(self):
for x in self.canvas_ids:
self.canvas.delete(x)
def getHandle(self):
return self.handle
def setHandle(self,handle):
self.handle = handle
def yToAlpha(self,y):
if y<=0:
return 0.0
elif y>=1:
return 1.0
else:
# return y
return (10.**y-1.) / 9.
def alphaToY(self,alpha):
if alpha<=0:
return 0.
elif alpha>=1:
return 1.0
else:
# return alpha
return math.log(1.0+9.*alpha,10.)
if __name__=="__main__":
c = ColorRamp(256,name="C")
# add some colors
c.addColor(1,(0,0,0,0))
print c
c.addColor(2,(1,1,1,1))
print c
c.addColor(250,(0.5, 0.5, 0.5, 0.5))
print c
# range checking
c.addColor(-1, (0,0,0,0))
print c
c.addColor(256, (1,2,3,4))
print c
# color scaling
c.addColor(45, (128, 255, 64, 32), colType=ColorTypes.RGBAi, colScale=255)
print c
# remove a color
c.removeColor(2)
print c
# range checking
c.removeColor(-1)
print c
# range checking
c.removeColor(2000)
print c
# check ready to use
c.addColor(0, (0,0,0,0))
print c
c.addColor(8, (1.0, 0.4, 0.0, 0.0))
print c
c.addColor(255, (1,1,1,1))
print c
# check ramp types
d = ColorRamp(32)
d.addColor(0, (0,0,0,0), colType=ColorTypes.RGBAi, colScale=255)
d.addColor(31, (255,255,255,255), colType=ColorTypes.RGBAi, colScale=255)
d.addColor(15, (1.0, 0.0, 0.0, 1.0))
print "Color Ramp as RGAf"
print d.getRamp()
print "Color Ramp as HEX6"
print d.getRamp(ColorTypes.HEX6)
print "Color Ramp as HEX8"
print d.getRamp(ColorTypes.HEX8)
print "Does adding/removing a pt screw up the model?"
f = ColorRamp(360)
# end pts
f.addColor(0, (0,0,0,0))
f.addColor(359, (1,1,1,1))
print f
print "Adding a pt"
f.addColor(7, (1.0, 0.0, 0.5, 0.25))
print f
print "Removing a pt"
f.removeColor(7)
print f
print "Add some more colors"
f.addColor(90, (1.0, 0.0, 0.0, 1.0))
f.addColor(270, (0.0, 0.0, 1.0, 1.0))
f.addColor(180, (0.0, 1.0, 0.0, 1.0))
print "Checking hex8 vlaues"
print f.getRamp(ColorTypes.HEX8)
print "To PhotoImage String: nRows=1"
print f.toPhotoImageString()
print "To PhotoImage String: nRows=32"
print f.toPhotoImageString(16)
try:
from Tkinter import *
root = Tk()
padX, padY = 30, 30
canvas = Canvas(root,height=10+padY,width=360+padX)
print "Try to make a color ramp image"
img = f.toPhotoImage(10)
canvas.create_image((padX/2, padY/2),image=img,anchor=NW)
canvas.pack()
root.mainloop()
except ImportError, e:
print "WARNING: Tkinter not installed for this Python version."
print "WARNING: Skipping the Tkinter test"
| gpl-2.0 |
mancoast/CPythonPyc_test | cpython/231_string_tests.py | 7 | 30084 | """
Common tests shared by test_str, test_unicode, test_userstring and test_string.
"""
import unittest, string, sys
from test import test_support
from UserList import UserList
class Sequence:
def __init__(self, seq='wxyz'): self.seq = seq
def __len__(self): return len(self.seq)
def __getitem__(self, i): return self.seq[i]
class BadSeq1(Sequence):
def __init__(self): self.seq = [7, 'hello', 123L]
class BadSeq2(Sequence):
def __init__(self): self.seq = ['a', 'b', 'c']
def __len__(self): return 8
class CommonTest(unittest.TestCase):
# This testcase contains test that can be used in all
# stringlike classes. Currently this is str, unicode
# UserString and the string module.
# The type to be tested
# Change in subclasses to change the behaviour of fixtesttype()
type2test = None
# All tests pass their arguments to the testing methods
# as str objects. fixtesttype() can be used to propagate
# these arguments to the appropriate type
def fixtype(self, obj):
if isinstance(obj, str):
return self.__class__.type2test(obj)
elif isinstance(obj, list):
return [self.fixtype(x) for x in obj]
elif isinstance(obj, tuple):
return tuple([self.fixtype(x) for x in obj])
elif isinstance(obj, dict):
return dict([
(self.fixtype(key), self.fixtype(value))
for (key, value) in obj.iteritems()
])
else:
return obj
# check that object.method(*args) returns result
def checkequal(self, result, object, methodname, *args):
result = self.fixtype(result)
object = self.fixtype(object)
args = self.fixtype(args)
realresult = getattr(object, methodname)(*args)
self.assertEqual(
result,
realresult
)
# if the original is returned make sure that
# this doesn't happen with subclasses
if object == realresult:
class subtype(self.__class__.type2test):
pass
object = subtype(object)
realresult = getattr(object, methodname)(*args)
self.assert_(object is not realresult)
# check that object.method(*args) raises exc
def checkraises(self, exc, object, methodname, *args):
object = self.fixtype(object)
args = self.fixtype(args)
self.assertRaises(
exc,
getattr(object, methodname),
*args
)
# call object.method(*args) without any checks
def checkcall(self, object, methodname, *args):
object = self.fixtype(object)
args = self.fixtype(args)
getattr(object, methodname)(*args)
def test_capitalize(self):
self.checkequal(' hello ', ' hello ', 'capitalize')
self.checkequal('Hello ', 'Hello ','capitalize')
self.checkequal('Hello ', 'hello ','capitalize')
self.checkequal('Aaaa', 'aaaa', 'capitalize')
self.checkequal('Aaaa', 'AaAa', 'capitalize')
self.checkraises(TypeError, 'hello', 'capitalize', 42)
def test_count(self):
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(1, 'aaa', 'count', 'a', -1)
self.checkequal(3, 'aaa', 'count', 'a', -10)
self.checkequal(2, 'aaa', 'count', 'a', 0, -1)
self.checkequal(0, 'aaa', 'count', 'a', 0, -10)
self.checkraises(TypeError, 'hello', 'count')
self.checkraises(TypeError, 'hello', 'count', 42)
def test_find(self):
self.checkequal(0, 'abcdefghiabc', 'find', 'abc')
self.checkequal(9, 'abcdefghiabc', 'find', 'abc', 1)
self.checkequal(-1, 'abcdefghiabc', 'find', 'def', 4)
self.checkraises(TypeError, 'hello', 'find')
self.checkraises(TypeError, 'hello', 'find', 42)
def test_rfind(self):
self.checkequal(9, 'abcdefghiabc', 'rfind', 'abc')
self.checkequal(12, 'abcdefghiabc', 'rfind', '')
self.checkequal(0, 'abcdefghiabc', 'rfind', 'abcd')
self.checkequal(-1, 'abcdefghiabc', 'rfind', 'abcz')
self.checkraises(TypeError, 'hello', 'rfind')
self.checkraises(TypeError, 'hello', 'rfind', 42)
def test_index(self):
self.checkequal(0, 'abcdefghiabc', 'index', '')
self.checkequal(3, 'abcdefghiabc', 'index', 'def')
self.checkequal(0, 'abcdefghiabc', 'index', 'abc')
self.checkequal(9, 'abcdefghiabc', 'index', 'abc', 1)
self.checkraises(ValueError, 'abcdefghiabc', 'index', 'hib')
self.checkraises(ValueError, 'abcdefghiab', 'index', 'abc', 1)
self.checkraises(ValueError, 'abcdefghi', 'index', 'ghi', 8)
self.checkraises(ValueError, 'abcdefghi', 'index', 'ghi', -1)
self.checkraises(TypeError, 'hello', 'index')
self.checkraises(TypeError, 'hello', 'index', 42)
def test_rindex(self):
self.checkequal(12, 'abcdefghiabc', 'rindex', '')
self.checkequal(3, 'abcdefghiabc', 'rindex', 'def')
self.checkequal(9, 'abcdefghiabc', 'rindex', 'abc')
self.checkequal(0, 'abcdefghiabc', 'rindex', 'abc', 0, -1)
self.checkraises(ValueError, 'abcdefghiabc', 'rindex', 'hib')
self.checkraises(ValueError, 'defghiabc', 'rindex', 'def', 1)
self.checkraises(ValueError, 'defghiabc', 'rindex', 'abc', 0, -1)
self.checkraises(ValueError, 'abcdefghi', 'rindex', 'ghi', 0, 8)
self.checkraises(ValueError, 'abcdefghi', 'rindex', 'ghi', 0, -1)
self.checkraises(TypeError, 'hello', 'rindex')
self.checkraises(TypeError, 'hello', 'rindex', 42)
def test_lower(self):
self.checkequal('hello', 'HeLLo', 'lower')
self.checkequal('hello', 'hello', 'lower')
self.checkraises(TypeError, 'hello', 'lower', 42)
def test_upper(self):
self.checkequal('HELLO', 'HeLLo', 'upper')
self.checkequal('HELLO', 'HELLO', 'upper')
self.checkraises(TypeError, 'hello', 'upper', 42)
def test_expandtabs(self):
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs')
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs', 8)
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs', 4)
self.checkequal('abc\r\nab def\ng hi', 'abc\r\nab\tdef\ng\thi', 'expandtabs', 4)
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs')
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs', 8)
self.checkequal('abc\r\nab\r\ndef\ng\r\nhi', 'abc\r\nab\r\ndef\ng\r\nhi', 'expandtabs', 4)
self.checkraises(TypeError, 'hello', 'expandtabs', 42, 42)
def test_split(self):
self.checkequal(['this', 'is', 'the', 'split', 'function'],
'this is the split function', 'split')
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', '|')
self.checkequal(['a', 'b', 'c|d'], 'a|b|c|d', 'split', '|', 2)
self.checkequal(['a', 'b c d'], 'a b c d', 'split', None, 1)
self.checkequal(['a', 'b', 'c d'], 'a b c d', 'split', None, 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'split', None, 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'split', None, 4)
self.checkequal(['a b c d'], 'a b c d', 'split', None, 0)
self.checkequal(['a', 'b', 'c d'], 'a b c d', 'split', None, 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d ', 'split')
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//')
self.checkequal(['endcase ', ''], 'endcase test', 'split', 'test')
self.checkraises(TypeError, 'hello', 'split', 42, 42, 42)
def test_strip(self):
self.checkequal('hello', ' hello ', 'strip')
self.checkequal('hello ', ' hello ', 'lstrip')
self.checkequal(' hello', ' hello ', 'rstrip')
self.checkequal('hello', 'hello', 'strip')
# strip/lstrip/rstrip with None arg
self.checkequal('hello', ' hello ', 'strip', None)
self.checkequal('hello ', ' hello ', 'lstrip', None)
self.checkequal(' hello', ' hello ', 'rstrip', None)
self.checkequal('hello', 'hello', 'strip', None)
# strip/lstrip/rstrip with str arg
self.checkequal('hello', 'xyzzyhelloxyzzy', 'strip', 'xyz')
self.checkequal('helloxyzzy', 'xyzzyhelloxyzzy', 'lstrip', 'xyz')
self.checkequal('xyzzyhello', 'xyzzyhelloxyzzy', 'rstrip', 'xyz')
self.checkequal('hello', 'hello', 'strip', 'xyz')
# strip/lstrip/rstrip with unicode arg
if test_support.have_unicode:
self.checkequal(unicode('hello', 'ascii'), 'xyzzyhelloxyzzy',
'strip', unicode('xyz', 'ascii'))
self.checkequal(unicode('helloxyzzy', 'ascii'), 'xyzzyhelloxyzzy',
'lstrip', unicode('xyz', 'ascii'))
self.checkequal(unicode('xyzzyhello', 'ascii'), 'xyzzyhelloxyzzy',
'rstrip', unicode('xyz', 'ascii'))
self.checkequal(unicode('hello', 'ascii'), 'hello',
'strip', unicode('xyz', 'ascii'))
self.checkraises(TypeError, 'hello', 'strip', 42, 42)
self.checkraises(TypeError, 'hello', 'lstrip', 42, 42)
self.checkraises(TypeError, 'hello', 'rstrip', 42, 42)
def test_ljust(self):
self.checkequal('abc ', 'abc', 'ljust', 10)
self.checkequal('abc ', 'abc', 'ljust', 6)
self.checkequal('abc', 'abc', 'ljust', 3)
self.checkequal('abc', 'abc', 'ljust', 2)
self.checkraises(TypeError, 'abc', 'ljust')
def test_rjust(self):
self.checkequal(' abc', 'abc', 'rjust', 10)
self.checkequal(' abc', 'abc', 'rjust', 6)
self.checkequal('abc', 'abc', 'rjust', 3)
self.checkequal('abc', 'abc', 'rjust', 2)
self.checkraises(TypeError, 'abc', 'rjust')
def test_center(self):
self.checkequal(' abc ', 'abc', 'center', 10)
self.checkequal(' abc ', 'abc', 'center', 6)
self.checkequal('abc', 'abc', 'center', 3)
self.checkequal('abc', 'abc', 'center', 2)
self.checkraises(TypeError, 'abc', 'center')
def test_swapcase(self):
self.checkequal('hEllO CoMPuTErS', 'HeLLo cOmpUteRs', 'swapcase')
self.checkraises(TypeError, 'hello', 'swapcase', 42)
def test_replace(self):
self.checkequal('one@two!three!', 'one!two!three!', 'replace', '!', '@', 1)
self.checkequal('onetwothree', 'one!two!three!', 'replace', '!', '')
self.checkequal('one@two@three!', 'one!two!three!', 'replace', '!', '@', 2)
self.checkequal('one@two@three@', 'one!two!three!', 'replace', '!', '@', 3)
self.checkequal('one@two@three@', 'one!two!three!', 'replace', '!', '@', 4)
self.checkequal('one!two!three!', 'one!two!three!', 'replace', '!', '@', 0)
self.checkequal('one@two@three@', 'one!two!three!', 'replace', '!', '@')
self.checkequal('one!two!three!', 'one!two!three!', 'replace', 'x', '@')
self.checkequal('one!two!three!', 'one!two!three!', 'replace', 'x', '@', 2)
self.checkequal('-a-b-c-', 'abc', 'replace', '', '-')
self.checkequal('-a-b-c', 'abc', 'replace', '', '-', 3)
self.checkequal('abc', 'abc', 'replace', '', '-', 0)
self.checkequal('', '', 'replace', '', '')
self.checkequal('abc', 'abc', 'replace', 'ab', '--', 0)
self.checkequal('abc', 'abc', 'replace', 'xy', '--')
# Next three for SF bug 422088: [OSF1 alpha] string.replace(); died with
# MemoryError due to empty result (platform malloc issue when requesting
# 0 bytes).
self.checkequal('', '123', 'replace', '123', '')
self.checkequal('', '123123', 'replace', '123', '')
self.checkequal('x', '123x123', 'replace', '123', '')
self.checkraises(TypeError, 'hello', 'replace')
self.checkraises(TypeError, 'hello', 'replace', 42)
self.checkraises(TypeError, 'hello', 'replace', 42, 'h')
self.checkraises(TypeError, 'hello', 'replace', 'h', 42)
def test_zfill(self):
self.checkequal('123', '123', 'zfill', 2)
self.checkequal('123', '123', 'zfill', 3)
self.checkequal('0123', '123', 'zfill', 4)
self.checkequal('+123', '+123', 'zfill', 3)
self.checkequal('+123', '+123', 'zfill', 4)
self.checkequal('+0123', '+123', 'zfill', 5)
self.checkequal('-123', '-123', 'zfill', 3)
self.checkequal('-123', '-123', 'zfill', 4)
self.checkequal('-0123', '-123', 'zfill', 5)
self.checkequal('000', '', 'zfill', 3)
self.checkequal('34', '34', 'zfill', 1)
self.checkequal('0034', '34', 'zfill', 4)
self.checkraises(TypeError, '123', 'zfill')
class MixinStrUnicodeUserStringTest:
# additional tests that only work for
# stringlike objects, i.e. str, unicode, UserString
# (but not the string module)
def test_islower(self):
self.checkequal(False, '', 'islower')
self.checkequal(True, 'a', 'islower')
self.checkequal(False, 'A', 'islower')
self.checkequal(False, '\n', 'islower')
self.checkequal(True, 'abc', 'islower')
self.checkequal(False, 'aBc', 'islower')
self.checkequal(True, 'abc\n', 'islower')
self.checkraises(TypeError, 'abc', 'islower', 42)
def test_isupper(self):
self.checkequal(False, '', 'isupper')
self.checkequal(False, 'a', 'isupper')
self.checkequal(True, 'A', 'isupper')
self.checkequal(False, '\n', 'isupper')
self.checkequal(True, 'ABC', 'isupper')
self.checkequal(False, 'AbC', 'isupper')
self.checkequal(True, 'ABC\n', 'isupper')
self.checkraises(TypeError, 'abc', 'isupper', 42)
def test_istitle(self):
self.checkequal(False, '', 'istitle')
self.checkequal(False, 'a', 'istitle')
self.checkequal(True, 'A', 'istitle')
self.checkequal(False, '\n', 'istitle')
self.checkequal(True, 'A Titlecased Line', 'istitle')
self.checkequal(True, 'A\nTitlecased Line', 'istitle')
self.checkequal(True, 'A Titlecased, Line', 'istitle')
self.checkequal(False, 'Not a capitalized String', 'istitle')
self.checkequal(False, 'Not\ta Titlecase String', 'istitle')
self.checkequal(False, 'Not--a Titlecase String', 'istitle')
self.checkequal(False, 'NOT', 'istitle')
self.checkraises(TypeError, 'abc', 'istitle', 42)
def test_isspace(self):
self.checkequal(False, '', 'isspace')
self.checkequal(False, 'a', 'isspace')
self.checkequal(True, ' ', 'isspace')
self.checkequal(True, '\t', 'isspace')
self.checkequal(True, '\r', 'isspace')
self.checkequal(True, '\n', 'isspace')
self.checkequal(True, ' \t\r\n', 'isspace')
self.checkequal(False, ' \t\r\na', 'isspace')
self.checkraises(TypeError, 'abc', 'isspace', 42)
def test_isalpha(self):
self.checkequal(False, '', 'isalpha')
self.checkequal(True, 'a', 'isalpha')
self.checkequal(True, 'A', 'isalpha')
self.checkequal(False, '\n', 'isalpha')
self.checkequal(True, 'abc', 'isalpha')
self.checkequal(False, 'aBc123', 'isalpha')
self.checkequal(False, 'abc\n', 'isalpha')
self.checkraises(TypeError, 'abc', 'isalpha', 42)
def test_isalnum(self):
self.checkequal(False, '', 'isalnum')
self.checkequal(True, 'a', 'isalnum')
self.checkequal(True, 'A', 'isalnum')
self.checkequal(False, '\n', 'isalnum')
self.checkequal(True, '123abc456', 'isalnum')
self.checkequal(True, 'a1b3c', 'isalnum')
self.checkequal(False, 'aBc000 ', 'isalnum')
self.checkequal(False, 'abc\n', 'isalnum')
self.checkraises(TypeError, 'abc', 'isalnum', 42)
def test_isdigit(self):
self.checkequal(False, '', 'isdigit')
self.checkequal(False, 'a', 'isdigit')
self.checkequal(True, '0', 'isdigit')
self.checkequal(True, '0123456789', 'isdigit')
self.checkequal(False, '0123456789a', 'isdigit')
self.checkraises(TypeError, 'abc', 'isdigit', 42)
def test_title(self):
self.checkequal(' Hello ', ' hello ', 'title')
self.checkequal('Hello ', 'hello ', 'title')
self.checkequal('Hello ', 'Hello ', 'title')
self.checkequal('Format This As Title String', "fOrMaT thIs aS titLe String", 'title')
self.checkequal('Format,This-As*Title;String', "fOrMaT,thIs-aS*titLe;String", 'title', )
self.checkequal('Getint', "getInt", 'title')
self.checkraises(TypeError, 'hello', 'title', 42)
def test_splitlines(self):
self.checkequal(['abc', 'def', '', 'ghi'], "abc\ndef\n\rghi", 'splitlines')
self.checkequal(['abc', 'def', '', 'ghi'], "abc\ndef\n\r\nghi", 'splitlines')
self.checkequal(['abc', 'def', 'ghi'], "abc\ndef\r\nghi", 'splitlines')
self.checkequal(['abc', 'def', 'ghi'], "abc\ndef\r\nghi\n", 'splitlines')
self.checkequal(['abc', 'def', 'ghi', ''], "abc\ndef\r\nghi\n\r", 'splitlines')
self.checkequal(['', 'abc', 'def', 'ghi', ''], "\nabc\ndef\r\nghi\n\r", 'splitlines')
self.checkequal(['\n', 'abc\n', 'def\r\n', 'ghi\n', '\r'], "\nabc\ndef\r\nghi\n\r", 'splitlines', 1)
self.checkraises(TypeError, 'abc', 'splitlines', 42, 42)
def test_startswith(self):
self.checkequal(True, 'hello', 'startswith', 'he')
self.checkequal(True, 'hello', 'startswith', 'hello')
self.checkequal(False, 'hello', 'startswith', 'hello world')
self.checkequal(True, 'hello', 'startswith', '')
self.checkequal(False, 'hello', 'startswith', 'ello')
self.checkequal(True, 'hello', 'startswith', 'ello', 1)
self.checkequal(True, 'hello', 'startswith', 'o', 4)
self.checkequal(False, 'hello', 'startswith', 'o', 5)
self.checkequal(True, 'hello', 'startswith', '', 5)
self.checkequal(False, 'hello', 'startswith', 'lo', 6)
self.checkequal(True, 'helloworld', 'startswith', 'lowo', 3)
self.checkequal(True, 'helloworld', 'startswith', 'lowo', 3, 7)
self.checkequal(False, 'helloworld', 'startswith', 'lowo', 3, 6)
# test negative indices
self.checkequal(True, 'hello', 'startswith', 'he', 0, -1)
self.checkequal(True, 'hello', 'startswith', 'he', -53, -1)
self.checkequal(False, 'hello', 'startswith', 'hello', 0, -1)
self.checkequal(False, 'hello', 'startswith', 'hello world', -1, -10)
self.checkequal(False, 'hello', 'startswith', 'ello', -5)
self.checkequal(True, 'hello', 'startswith', 'ello', -4)
self.checkequal(False, 'hello', 'startswith', 'o', -2)
self.checkequal(True, 'hello', 'startswith', 'o', -1)
self.checkequal(True, 'hello', 'startswith', '', -3, -3)
self.checkequal(False, 'hello', 'startswith', 'lo', -9)
self.checkraises(TypeError, 'hello', 'startswith')
self.checkraises(TypeError, 'hello', 'startswith', 42)
def test_endswith(self):
self.checkequal(True, 'hello', 'endswith', 'lo')
self.checkequal(False, 'hello', 'endswith', 'he')
self.checkequal(True, 'hello', 'endswith', '')
self.checkequal(False, 'hello', 'endswith', 'hello world')
self.checkequal(False, 'helloworld', 'endswith', 'worl')
self.checkequal(True, 'helloworld', 'endswith', 'worl', 3, 9)
self.checkequal(True, 'helloworld', 'endswith', 'world', 3, 12)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', 1, 7)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', 2, 7)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', 3, 7)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', 4, 7)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', 3, 8)
self.checkequal(False, 'ab', 'endswith', 'ab', 0, 1)
self.checkequal(False, 'ab', 'endswith', 'ab', 0, 0)
# test negative indices
self.checkequal(True, 'hello', 'endswith', 'lo', -2)
self.checkequal(False, 'hello', 'endswith', 'he', -2)
self.checkequal(True, 'hello', 'endswith', '', -3, -3)
self.checkequal(False, 'hello', 'endswith', 'hello world', -10, -2)
self.checkequal(False, 'helloworld', 'endswith', 'worl', -6)
self.checkequal(True, 'helloworld', 'endswith', 'worl', -5, -1)
self.checkequal(True, 'helloworld', 'endswith', 'worl', -5, 9)
self.checkequal(True, 'helloworld', 'endswith', 'world', -7, 12)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', -99, -3)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', -8, -3)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', -7, -3)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', 3, -4)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', -8, -2)
self.checkraises(TypeError, 'hello', 'endswith')
self.checkraises(TypeError, 'hello', 'endswith', 42)
def test___contains__(self):
self.checkequal(True, '', '__contains__', '') # vereq('' in '', True)
self.checkequal(True, 'abc', '__contains__', '') # vereq('' in 'abc', True)
self.checkequal(False, 'abc', '__contains__', '\0') # vereq('\0' in 'abc', False)
self.checkequal(True, '\0abc', '__contains__', '\0') # vereq('\0' in '\0abc', True)
self.checkequal(True, 'abc\0', '__contains__', '\0') # vereq('\0' in 'abc\0', True)
self.checkequal(True, '\0abc', '__contains__', 'a') # vereq('a' in '\0abc', True)
self.checkequal(True, 'asdf', '__contains__', 'asdf') # vereq('asdf' in 'asdf', True)
self.checkequal(False, 'asd', '__contains__', 'asdf') # vereq('asdf' in 'asd', False)
self.checkequal(False, '', '__contains__', 'asdf') # vereq('asdf' in '', False)
def test_subscript(self):
self.checkequal(u'a', 'abc', '__getitem__', 0)
self.checkequal(u'c', 'abc', '__getitem__', -1)
self.checkequal(u'a', 'abc', '__getitem__', 0L)
self.checkequal(u'abc', 'abc', '__getitem__', slice(0, 3))
self.checkequal(u'abc', 'abc', '__getitem__', slice(0, 1000))
self.checkequal(u'a', 'abc', '__getitem__', slice(0, 1))
self.checkequal(u'', 'abc', '__getitem__', slice(0, 0))
# FIXME What about negative indizes? This is handled differently by [] and __getitem__(slice)
self.checkraises(TypeError, 'abc', '__getitem__', 'def')
def test_slice(self):
self.checkequal('abc', 'abc', '__getslice__', 0, 1000)
self.checkequal('abc', 'abc', '__getslice__', 0, 3)
self.checkequal('ab', 'abc', '__getslice__', 0, 2)
self.checkequal('bc', 'abc', '__getslice__', 1, 3)
self.checkequal('b', 'abc', '__getslice__', 1, 2)
self.checkequal('', 'abc', '__getslice__', 2, 2)
self.checkequal('', 'abc', '__getslice__', 1000, 1000)
self.checkequal('', 'abc', '__getslice__', 2000, 1000)
self.checkequal('', 'abc', '__getslice__', 2, 1)
# FIXME What about negative indizes? This is handled differently by [] and __getslice__
self.checkraises(TypeError, 'abc', '__getslice__', 'def')
def test_mul(self):
self.checkequal('', 'abc', '__mul__', -1)
self.checkequal('', 'abc', '__mul__', 0)
self.checkequal('abc', 'abc', '__mul__', 1)
self.checkequal('abcabcabc', 'abc', '__mul__', 3)
self.checkraises(TypeError, 'abc', '__mul__')
self.checkraises(TypeError, 'abc', '__mul__', '')
self.checkraises(OverflowError, 10000*'abc', '__mul__', 2000000000)
def test_join(self):
# join now works with any sequence type
# moved here, because the argument order is
# different in string.join (see the test in
# test.test_string.StringTest.test_join)
self.checkequal('a b c d', ' ', 'join', ['a', 'b', 'c', 'd'])
self.checkequal('abcd', '', 'join', ('a', 'b', 'c', 'd'))
self.checkequal('w x y z', ' ', 'join', Sequence())
self.checkequal('abc', 'a', 'join', ('abc',))
self.checkequal('z', 'a', 'join', UserList(['z']))
if test_support.have_unicode:
self.checkequal(unicode('a.b.c'), unicode('.'), 'join', ['a', 'b', 'c'])
self.checkequal(unicode('a.b.c'), '.', 'join', [unicode('a'), 'b', 'c'])
self.checkequal(unicode('a.b.c'), '.', 'join', ['a', unicode('b'), 'c'])
self.checkequal(unicode('a.b.c'), '.', 'join', ['a', 'b', unicode('c')])
self.checkraises(TypeError, '.', 'join', ['a', unicode('b'), 3])
for i in [5, 25, 125]:
self.checkequal(((('a' * i) + '-') * i)[:-1], '-', 'join',
['a' * i] * i)
self.checkequal(((('a' * i) + '-') * i)[:-1], '-', 'join',
('a' * i,) * i)
self.checkraises(TypeError, ' ', 'join', BadSeq1())
self.checkequal('a b c', ' ', 'join', BadSeq2())
self.checkraises(TypeError, ' ', 'join')
self.checkraises(TypeError, ' ', 'join', 7)
self.checkraises(TypeError, ' ', 'join', Sequence([7, 'hello', 123L]))
def test_formatting(self):
self.checkequal('+hello+', '+%s+', '__mod__', 'hello')
self.checkequal('+10+', '+%d+', '__mod__', 10)
self.checkequal('a', "%c", '__mod__', "a")
self.checkequal('a', "%c", '__mod__', "a")
self.checkequal('"', "%c", '__mod__', 34)
self.checkequal('$', "%c", '__mod__', 36)
self.checkequal('10', "%d", '__mod__', 10)
self.checkequal('\x7f', "%c", '__mod__', 0x7f)
for ordinal in (-100, 0x200000):
# unicode raises ValueError, str raises OverflowError
self.checkraises((ValueError, OverflowError), '%c', '__mod__', ordinal)
self.checkequal(' 42', '%3ld', '__mod__', 42)
self.checkequal('0042.00', '%07.2f', '__mod__', 42)
self.checkequal('0042.00', '%07.2F', '__mod__', 42)
self.checkraises(TypeError, 'abc', '__mod__')
self.checkraises(TypeError, '%(foo)s', '__mod__', 42)
self.checkraises(TypeError, '%s%s', '__mod__', (42,))
self.checkraises(TypeError, '%c', '__mod__', (None,))
self.checkraises(ValueError, '%(foo', '__mod__', {})
self.checkraises(TypeError, '%(foo)s %(bar)s', '__mod__', ('foo', 42))
# argument names with properly nested brackets are supported
self.checkequal('bar', '%((foo))s', '__mod__', {'(foo)': 'bar'})
# 100 is a magic number in PyUnicode_Format, this forces a resize
self.checkequal(103*'a'+'x', '%sx', '__mod__', 103*'a')
self.checkraises(TypeError, '%*s', '__mod__', ('foo', 'bar'))
self.checkraises(TypeError, '%10.*f', '__mod__', ('foo', 42.))
self.checkraises(ValueError, '%10', '__mod__', (42,))
def test_floatformatting(self):
# float formatting
for prec in xrange(100):
format = '%%.%if' % prec
value = 0.01
for x in xrange(60):
value = value * 3.141592655 / 3.0 * 10.0
# The formatfloat() code in stringobject.c and
# unicodeobject.c uses a 120 byte buffer and switches from
# 'f' formatting to 'g' at precision 50, so we expect
# OverflowErrors for the ranges x < 50 and prec >= 67.
if x < 50 and prec >= 67:
self.checkraises(OverflowError, format, "__mod__", value)
else:
self.checkcall(format, "__mod__", value)
class MixinStrStringUserStringTest:
# Additional tests for 8bit strings, i.e. str, UserString and
# the string module
def test_maketrans(self):
self.assertEqual(
''.join(map(chr, xrange(256))).replace('abc', 'xyz'),
string.maketrans('abc', 'xyz')
)
self.assertRaises(ValueError, string.maketrans, 'abc', 'xyzw')
def test_translate(self):
table = string.maketrans('abc', 'xyz')
self.checkequal('xyzxyz', 'xyzabcdef', 'translate', table, 'def')
table = string.maketrans('a', 'A')
self.checkequal('Abc', 'abc', 'translate', table)
self.checkequal('xyz', 'xyz', 'translate', table)
self.checkequal('yz', 'xyz', 'translate', table, 'x')
self.checkraises(ValueError, 'xyz', 'translate', 'too short', 'strip')
self.checkraises(ValueError, 'xyz', 'translate', 'too short')
class MixinStrUserStringTest:
# Additional tests that only work with
# 8bit compatible object, i.e. str and UserString
def test_encoding_decoding(self):
codecs = [('rot13', 'uryyb jbeyq'),
('base64', 'aGVsbG8gd29ybGQ=\n'),
('hex', '68656c6c6f20776f726c64'),
('uu', 'begin 666 <data>\n+:&5L;&\\@=V]R;&0 \n \nend\n')]
for encoding, data in codecs:
self.checkequal(data, 'hello world', 'encode', encoding)
self.checkequal('hello world', data, 'decode', encoding)
# zlib is optional, so we make the test optional too...
try:
import zlib
except ImportError:
pass
else:
data = 'x\x9c\xcbH\xcd\xc9\xc9W(\xcf/\xcaI\x01\x00\x1a\x0b\x04]'
self.checkequal(data, 'hello world', 'encode', 'zlib')
self.checkequal('hello world', data, 'decode', 'zlib')
self.checkraises(TypeError, 'xyz', 'decode', 42)
self.checkraises(TypeError, 'xyz', 'encode', 42)
| gpl-3.0 |
mancoast/CPythonPyc_test | fail/331_test_posixpath.py | 1 | 26081 | import itertools
import os
import posixpath
import sys
import unittest
import warnings
from posixpath import realpath, abspath, dirname, basename
from test import support, test_genericpath
try:
import posix
except ImportError:
posix = None
# An absolute path to a temporary filename for testing. We can't rely on TESTFN
# being an absolute path, so we need this.
ABSTFN = abspath(support.TESTFN)
def skip_if_ABSTFN_contains_backslash(test):
"""
On Windows, posixpath.abspath still returns paths with backslashes
instead of posix forward slashes. If this is the case, several tests
fail, so skip them.
"""
found_backslash = '\\' in ABSTFN
msg = "ABSTFN is not a posix path - tests fail"
return [test, unittest.skip(msg)(test)][found_backslash]
def safe_rmdir(dirname):
try:
os.rmdir(dirname)
except OSError:
pass
class PosixPathTest(unittest.TestCase):
def setUp(self):
self.tearDown()
def tearDown(self):
for suffix in ["", "1", "2"]:
support.unlink(support.TESTFN + suffix)
safe_rmdir(support.TESTFN + suffix)
def test_join(self):
self.assertEqual(posixpath.join("/foo", "bar", "/bar", "baz"),
"/bar/baz")
self.assertEqual(posixpath.join("/foo", "bar", "baz"), "/foo/bar/baz")
self.assertEqual(posixpath.join("/foo/", "bar/", "baz/"),
"/foo/bar/baz/")
self.assertEqual(posixpath.join(b"/foo", b"bar", b"/bar", b"baz"),
b"/bar/baz")
self.assertEqual(posixpath.join(b"/foo", b"bar", b"baz"),
b"/foo/bar/baz")
self.assertEqual(posixpath.join(b"/foo/", b"bar/", b"baz/"),
b"/foo/bar/baz/")
def check_error_msg(list_of_args, msg):
"""Check posixpath.join raises friendly TypeErrors."""
for args in (item for perm in list_of_args
for item in itertools.permutations(perm)):
with self.assertRaises(TypeError) as cm:
posixpath.join(*args)
self.assertEqual(msg, cm.exception.args[0])
check_error_msg([[b'bytes', 'str'], [bytearray(b'bytes'), 'str']],
"Can't mix strings and bytes in path components.")
# regression, see #15377
with self.assertRaises(TypeError) as cm:
posixpath.join(None, 'str')
self.assertNotEqual("Can't mix strings and bytes in path components.",
cm.exception.args[0])
def test_split(self):
self.assertEqual(posixpath.split("/foo/bar"), ("/foo", "bar"))
self.assertEqual(posixpath.split("/"), ("/", ""))
self.assertEqual(posixpath.split("foo"), ("", "foo"))
self.assertEqual(posixpath.split("////foo"), ("////", "foo"))
self.assertEqual(posixpath.split("//foo//bar"), ("//foo", "bar"))
self.assertEqual(posixpath.split(b"/foo/bar"), (b"/foo", b"bar"))
self.assertEqual(posixpath.split(b"/"), (b"/", b""))
self.assertEqual(posixpath.split(b"foo"), (b"", b"foo"))
self.assertEqual(posixpath.split(b"////foo"), (b"////", b"foo"))
self.assertEqual(posixpath.split(b"//foo//bar"), (b"//foo", b"bar"))
def splitextTest(self, path, filename, ext):
self.assertEqual(posixpath.splitext(path), (filename, ext))
self.assertEqual(posixpath.splitext("/" + path), ("/" + filename, ext))
self.assertEqual(posixpath.splitext("abc/" + path),
("abc/" + filename, ext))
self.assertEqual(posixpath.splitext("abc.def/" + path),
("abc.def/" + filename, ext))
self.assertEqual(posixpath.splitext("/abc.def/" + path),
("/abc.def/" + filename, ext))
self.assertEqual(posixpath.splitext(path + "/"),
(filename + ext + "/", ""))
path = bytes(path, "ASCII")
filename = bytes(filename, "ASCII")
ext = bytes(ext, "ASCII")
self.assertEqual(posixpath.splitext(path), (filename, ext))
self.assertEqual(posixpath.splitext(b"/" + path),
(b"/" + filename, ext))
self.assertEqual(posixpath.splitext(b"abc/" + path),
(b"abc/" + filename, ext))
self.assertEqual(posixpath.splitext(b"abc.def/" + path),
(b"abc.def/" + filename, ext))
self.assertEqual(posixpath.splitext(b"/abc.def/" + path),
(b"/abc.def/" + filename, ext))
self.assertEqual(posixpath.splitext(path + b"/"),
(filename + ext + b"/", b""))
def test_splitext(self):
self.splitextTest("foo.bar", "foo", ".bar")
self.splitextTest("foo.boo.bar", "foo.boo", ".bar")
self.splitextTest("foo.boo.biff.bar", "foo.boo.biff", ".bar")
self.splitextTest(".csh.rc", ".csh", ".rc")
self.splitextTest("nodots", "nodots", "")
self.splitextTest(".cshrc", ".cshrc", "")
self.splitextTest("...manydots", "...manydots", "")
self.splitextTest("...manydots.ext", "...manydots", ".ext")
self.splitextTest(".", ".", "")
self.splitextTest("..", "..", "")
self.splitextTest("........", "........", "")
self.splitextTest("", "", "")
def test_isabs(self):
self.assertIs(posixpath.isabs(""), False)
self.assertIs(posixpath.isabs("/"), True)
self.assertIs(posixpath.isabs("/foo"), True)
self.assertIs(posixpath.isabs("/foo/bar"), True)
self.assertIs(posixpath.isabs("foo/bar"), False)
self.assertIs(posixpath.isabs(b""), False)
self.assertIs(posixpath.isabs(b"/"), True)
self.assertIs(posixpath.isabs(b"/foo"), True)
self.assertIs(posixpath.isabs(b"/foo/bar"), True)
self.assertIs(posixpath.isabs(b"foo/bar"), False)
def test_basename(self):
self.assertEqual(posixpath.basename("/foo/bar"), "bar")
self.assertEqual(posixpath.basename("/"), "")
self.assertEqual(posixpath.basename("foo"), "foo")
self.assertEqual(posixpath.basename("////foo"), "foo")
self.assertEqual(posixpath.basename("//foo//bar"), "bar")
self.assertEqual(posixpath.basename(b"/foo/bar"), b"bar")
self.assertEqual(posixpath.basename(b"/"), b"")
self.assertEqual(posixpath.basename(b"foo"), b"foo")
self.assertEqual(posixpath.basename(b"////foo"), b"foo")
self.assertEqual(posixpath.basename(b"//foo//bar"), b"bar")
def test_dirname(self):
self.assertEqual(posixpath.dirname("/foo/bar"), "/foo")
self.assertEqual(posixpath.dirname("/"), "/")
self.assertEqual(posixpath.dirname("foo"), "")
self.assertEqual(posixpath.dirname("////foo"), "////")
self.assertEqual(posixpath.dirname("//foo//bar"), "//foo")
self.assertEqual(posixpath.dirname(b"/foo/bar"), b"/foo")
self.assertEqual(posixpath.dirname(b"/"), b"/")
self.assertEqual(posixpath.dirname(b"foo"), b"")
self.assertEqual(posixpath.dirname(b"////foo"), b"////")
self.assertEqual(posixpath.dirname(b"//foo//bar"), b"//foo")
def test_islink(self):
self.assertIs(posixpath.islink(support.TESTFN + "1"), False)
self.assertIs(posixpath.lexists(support.TESTFN + "2"), False)
f = open(support.TESTFN + "1", "wb")
try:
f.write(b"foo")
f.close()
self.assertIs(posixpath.islink(support.TESTFN + "1"), False)
if support.can_symlink():
os.symlink(support.TESTFN + "1", support.TESTFN + "2")
self.assertIs(posixpath.islink(support.TESTFN + "2"), True)
os.remove(support.TESTFN + "1")
self.assertIs(posixpath.islink(support.TESTFN + "2"), True)
self.assertIs(posixpath.exists(support.TESTFN + "2"), False)
self.assertIs(posixpath.lexists(support.TESTFN + "2"), True)
finally:
if not f.close():
f.close()
@staticmethod
def _create_file(filename):
with open(filename, 'wb') as f:
f.write(b'foo')
def test_samefile(self):
test_fn = support.TESTFN + "1"
self._create_file(test_fn)
self.assertTrue(posixpath.samefile(test_fn, test_fn))
self.assertRaises(TypeError, posixpath.samefile)
@unittest.skipIf(
sys.platform.startswith('win'),
"posixpath.samefile does not work on links in Windows")
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
def test_samefile_on_links(self):
test_fn1 = support.TESTFN + "1"
test_fn2 = support.TESTFN + "2"
self._create_file(test_fn1)
os.symlink(test_fn1, test_fn2)
self.assertTrue(posixpath.samefile(test_fn1, test_fn2))
os.remove(test_fn2)
self._create_file(test_fn2)
self.assertFalse(posixpath.samefile(test_fn1, test_fn2))
def test_samestat(self):
test_fn = support.TESTFN + "1"
self._create_file(test_fn)
test_fns = [test_fn]*2
stats = map(os.stat, test_fns)
self.assertTrue(posixpath.samestat(*stats))
@unittest.skipIf(
sys.platform.startswith('win'),
"posixpath.samestat does not work on links in Windows")
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
def test_samestat_on_links(self):
test_fn1 = support.TESTFN + "1"
test_fn2 = support.TESTFN + "2"
self._create_file(test_fn1)
test_fns = (test_fn1, test_fn2)
os.symlink(*test_fns)
stats = map(os.stat, test_fns)
self.assertTrue(posixpath.samestat(*stats))
os.remove(test_fn2)
self._create_file(test_fn2)
stats = map(os.stat, test_fns)
self.assertFalse(posixpath.samestat(*stats))
self.assertRaises(TypeError, posixpath.samestat)
def test_ismount(self):
self.assertIs(posixpath.ismount("/"), True)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertIs(posixpath.ismount(b"/"), True)
def test_ismount_non_existent(self):
# Non-existent mountpoint.
self.assertIs(posixpath.ismount(ABSTFN), False)
try:
os.mkdir(ABSTFN)
self.assertIs(posixpath.ismount(ABSTFN), False)
finally:
safe_rmdir(ABSTFN)
@unittest.skipUnless(support.can_symlink(),
"Test requires symlink support")
def test_ismount_symlinks(self):
# Symlinks are never mountpoints.
try:
os.symlink("/", ABSTFN)
self.assertIs(posixpath.ismount(ABSTFN), False)
finally:
os.unlink(ABSTFN)
@unittest.skipIf(posix is None, "Test requires posix module")
def test_ismount_different_device(self):
# Simulate the path being on a different device from its parent by
# mocking out st_dev.
save_lstat = os.lstat
def fake_lstat(path):
st_ino = 0
st_dev = 0
if path == ABSTFN:
st_dev = 1
st_ino = 1
return posix.stat_result((0, st_ino, st_dev, 0, 0, 0, 0, 0, 0, 0))
try:
os.lstat = fake_lstat
self.assertIs(posixpath.ismount(ABSTFN), True)
finally:
os.lstat = save_lstat
def test_expanduser(self):
self.assertEqual(posixpath.expanduser("foo"), "foo")
self.assertEqual(posixpath.expanduser(b"foo"), b"foo")
try:
import pwd
except ImportError:
pass
else:
self.assertIsInstance(posixpath.expanduser("~/"), str)
self.assertIsInstance(posixpath.expanduser(b"~/"), bytes)
# if home directory == root directory, this test makes no sense
if posixpath.expanduser("~") != '/':
self.assertEqual(
posixpath.expanduser("~") + "/",
posixpath.expanduser("~/")
)
self.assertEqual(
posixpath.expanduser(b"~") + b"/",
posixpath.expanduser(b"~/")
)
self.assertIsInstance(posixpath.expanduser("~root/"), str)
self.assertIsInstance(posixpath.expanduser("~foo/"), str)
self.assertIsInstance(posixpath.expanduser(b"~root/"), bytes)
self.assertIsInstance(posixpath.expanduser(b"~foo/"), bytes)
with support.EnvironmentVarGuard() as env:
env['HOME'] = '/'
self.assertEqual(posixpath.expanduser("~"), "/")
self.assertEqual(posixpath.expanduser("~/foo"), "/foo")
# expanduser should fall back to using the password database
del env['HOME']
home = pwd.getpwuid(os.getuid()).pw_dir
self.assertEqual(posixpath.expanduser("~"), home)
def test_normpath(self):
self.assertEqual(posixpath.normpath(""), ".")
self.assertEqual(posixpath.normpath("/"), "/")
self.assertEqual(posixpath.normpath("//"), "//")
self.assertEqual(posixpath.normpath("///"), "/")
self.assertEqual(posixpath.normpath("///foo/.//bar//"), "/foo/bar")
self.assertEqual(posixpath.normpath("///foo/.//bar//.//..//.//baz"),
"/foo/baz")
self.assertEqual(posixpath.normpath("///..//./foo/.//bar"), "/foo/bar")
self.assertEqual(posixpath.normpath(b""), b".")
self.assertEqual(posixpath.normpath(b"/"), b"/")
self.assertEqual(posixpath.normpath(b"//"), b"//")
self.assertEqual(posixpath.normpath(b"///"), b"/")
self.assertEqual(posixpath.normpath(b"///foo/.//bar//"), b"/foo/bar")
self.assertEqual(posixpath.normpath(b"///foo/.//bar//.//..//.//baz"),
b"/foo/baz")
self.assertEqual(posixpath.normpath(b"///..//./foo/.//bar"),
b"/foo/bar")
@skip_if_ABSTFN_contains_backslash
def test_realpath_curdir(self):
self.assertEqual(realpath('.'), os.getcwd())
self.assertEqual(realpath('./.'), os.getcwd())
self.assertEqual(realpath('/'.join(['.'] * 100)), os.getcwd())
self.assertEqual(realpath(b'.'), os.getcwdb())
self.assertEqual(realpath(b'./.'), os.getcwdb())
self.assertEqual(realpath(b'/'.join([b'.'] * 100)), os.getcwdb())
@skip_if_ABSTFN_contains_backslash
def test_realpath_pardir(self):
self.assertEqual(realpath('..'), dirname(os.getcwd()))
self.assertEqual(realpath('../..'), dirname(dirname(os.getcwd())))
self.assertEqual(realpath('/'.join(['..'] * 100)), '/')
self.assertEqual(realpath(b'..'), dirname(os.getcwdb()))
self.assertEqual(realpath(b'../..'), dirname(dirname(os.getcwdb())))
self.assertEqual(realpath(b'/'.join([b'..'] * 100)), b'/')
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_basic(self):
# Basic operation.
try:
os.symlink(ABSTFN+"1", ABSTFN)
self.assertEqual(realpath(ABSTFN), ABSTFN+"1")
finally:
support.unlink(ABSTFN)
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_relative(self):
try:
os.symlink(posixpath.relpath(ABSTFN+"1"), ABSTFN)
self.assertEqual(realpath(ABSTFN), ABSTFN+"1")
finally:
support.unlink(ABSTFN)
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_symlink_loops(self):
# Bug #930024, return the path unchanged if we get into an infinite
# symlink loop.
try:
old_path = abspath('.')
os.symlink(ABSTFN, ABSTFN)
self.assertEqual(realpath(ABSTFN), ABSTFN)
os.symlink(ABSTFN+"1", ABSTFN+"2")
os.symlink(ABSTFN+"2", ABSTFN+"1")
self.assertEqual(realpath(ABSTFN+"1"), ABSTFN+"1")
self.assertEqual(realpath(ABSTFN+"2"), ABSTFN+"2")
self.assertEqual(realpath(ABSTFN+"1/x"), ABSTFN+"1/x")
self.assertEqual(realpath(ABSTFN+"1/.."), dirname(ABSTFN))
self.assertEqual(realpath(ABSTFN+"1/../x"), dirname(ABSTFN) + "/x")
os.symlink(ABSTFN+"x", ABSTFN+"y")
self.assertEqual(realpath(ABSTFN+"1/../" + basename(ABSTFN) + "y"),
ABSTFN + "y")
self.assertEqual(realpath(ABSTFN+"1/../" + basename(ABSTFN) + "1"),
ABSTFN + "1")
os.symlink(basename(ABSTFN) + "a/b", ABSTFN+"a")
self.assertEqual(realpath(ABSTFN+"a"), ABSTFN+"a/b")
os.symlink("../" + basename(dirname(ABSTFN)) + "/" +
basename(ABSTFN) + "c", ABSTFN+"c")
self.assertEqual(realpath(ABSTFN+"c"), ABSTFN+"c")
# Test using relative path as well.
os.chdir(dirname(ABSTFN))
self.assertEqual(realpath(basename(ABSTFN)), ABSTFN)
finally:
os.chdir(old_path)
support.unlink(ABSTFN)
support.unlink(ABSTFN+"1")
support.unlink(ABSTFN+"2")
support.unlink(ABSTFN+"y")
support.unlink(ABSTFN+"c")
support.unlink(ABSTFN+"a")
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_repeated_indirect_symlinks(self):
# Issue #6975.
try:
os.mkdir(ABSTFN)
os.symlink('../' + basename(ABSTFN), ABSTFN + '/self')
os.symlink('self/self/self', ABSTFN + '/link')
self.assertEqual(realpath(ABSTFN + '/link'), ABSTFN)
finally:
support.unlink(ABSTFN + '/self')
support.unlink(ABSTFN + '/link')
safe_rmdir(ABSTFN)
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_deep_recursion(self):
depth = 10
old_path = abspath('.')
try:
os.mkdir(ABSTFN)
for i in range(depth):
os.symlink('/'.join(['%d' % i] * 10), ABSTFN + '/%d' % (i + 1))
os.symlink('.', ABSTFN + '/0')
self.assertEqual(realpath(ABSTFN + '/%d' % depth), ABSTFN)
# Test using relative path as well.
os.chdir(ABSTFN)
self.assertEqual(realpath('%d' % depth), ABSTFN)
finally:
os.chdir(old_path)
for i in range(depth + 1):
support.unlink(ABSTFN + '/%d' % i)
safe_rmdir(ABSTFN)
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_resolve_parents(self):
# We also need to resolve any symlinks in the parents of a relative
# path passed to realpath. E.g.: current working directory is
# /usr/doc with 'doc' being a symlink to /usr/share/doc. We call
# realpath("a"). This should return /usr/share/doc/a/.
try:
old_path = abspath('.')
os.mkdir(ABSTFN)
os.mkdir(ABSTFN + "/y")
os.symlink(ABSTFN + "/y", ABSTFN + "/k")
os.chdir(ABSTFN + "/k")
self.assertEqual(realpath("a"), ABSTFN + "/y/a")
finally:
os.chdir(old_path)
support.unlink(ABSTFN + "/k")
safe_rmdir(ABSTFN + "/y")
safe_rmdir(ABSTFN)
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_resolve_before_normalizing(self):
# Bug #990669: Symbolic links should be resolved before we
# normalize the path. E.g.: if we have directories 'a', 'k' and 'y'
# in the following hierarchy:
# a/k/y
#
# and a symbolic link 'link-y' pointing to 'y' in directory 'a',
# then realpath("link-y/..") should return 'k', not 'a'.
try:
old_path = abspath('.')
os.mkdir(ABSTFN)
os.mkdir(ABSTFN + "/k")
os.mkdir(ABSTFN + "/k/y")
os.symlink(ABSTFN + "/k/y", ABSTFN + "/link-y")
# Absolute path.
self.assertEqual(realpath(ABSTFN + "/link-y/.."), ABSTFN + "/k")
# Relative path.
os.chdir(dirname(ABSTFN))
self.assertEqual(realpath(basename(ABSTFN) + "/link-y/.."),
ABSTFN + "/k")
finally:
os.chdir(old_path)
support.unlink(ABSTFN + "/link-y")
safe_rmdir(ABSTFN + "/k/y")
safe_rmdir(ABSTFN + "/k")
safe_rmdir(ABSTFN)
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_resolve_first(self):
# Bug #1213894: The first component of the path, if not absolute,
# must be resolved too.
try:
old_path = abspath('.')
os.mkdir(ABSTFN)
os.mkdir(ABSTFN + "/k")
os.symlink(ABSTFN, ABSTFN + "link")
os.chdir(dirname(ABSTFN))
base = basename(ABSTFN)
self.assertEqual(realpath(base + "link"), ABSTFN)
self.assertEqual(realpath(base + "link/k"), ABSTFN + "/k")
finally:
os.chdir(old_path)
support.unlink(ABSTFN + "link")
safe_rmdir(ABSTFN + "/k")
safe_rmdir(ABSTFN)
def test_relpath(self):
(real_getcwd, os.getcwd) = (os.getcwd, lambda: r"/home/user/bar")
try:
curdir = os.path.split(os.getcwd())[-1]
self.assertRaises(ValueError, posixpath.relpath, "")
self.assertEqual(posixpath.relpath("a"), "a")
self.assertEqual(posixpath.relpath(posixpath.abspath("a")), "a")
self.assertEqual(posixpath.relpath("a/b"), "a/b")
self.assertEqual(posixpath.relpath("../a/b"), "../a/b")
self.assertEqual(posixpath.relpath("a", "../b"), "../"+curdir+"/a")
self.assertEqual(posixpath.relpath("a/b", "../c"),
"../"+curdir+"/a/b")
self.assertEqual(posixpath.relpath("a", "b/c"), "../../a")
self.assertEqual(posixpath.relpath("a", "a"), ".")
self.assertEqual(posixpath.relpath("/foo/bar/bat", "/x/y/z"), '../../../foo/bar/bat')
self.assertEqual(posixpath.relpath("/foo/bar/bat", "/foo/bar"), 'bat')
self.assertEqual(posixpath.relpath("/foo/bar/bat", "/"), 'foo/bar/bat')
self.assertEqual(posixpath.relpath("/", "/foo/bar/bat"), '../../..')
self.assertEqual(posixpath.relpath("/foo/bar/bat", "/x"), '../foo/bar/bat')
self.assertEqual(posixpath.relpath("/x", "/foo/bar/bat"), '../../../x')
self.assertEqual(posixpath.relpath("/", "/"), '.')
self.assertEqual(posixpath.relpath("/a", "/a"), '.')
self.assertEqual(posixpath.relpath("/a/b", "/a/b"), '.')
finally:
os.getcwd = real_getcwd
def test_relpath_bytes(self):
(real_getcwdb, os.getcwdb) = (os.getcwdb, lambda: br"/home/user/bar")
try:
curdir = os.path.split(os.getcwdb())[-1]
self.assertRaises(ValueError, posixpath.relpath, b"")
self.assertEqual(posixpath.relpath(b"a"), b"a")
self.assertEqual(posixpath.relpath(posixpath.abspath(b"a")), b"a")
self.assertEqual(posixpath.relpath(b"a/b"), b"a/b")
self.assertEqual(posixpath.relpath(b"../a/b"), b"../a/b")
self.assertEqual(posixpath.relpath(b"a", b"../b"),
b"../"+curdir+b"/a")
self.assertEqual(posixpath.relpath(b"a/b", b"../c"),
b"../"+curdir+b"/a/b")
self.assertEqual(posixpath.relpath(b"a", b"b/c"), b"../../a")
self.assertEqual(posixpath.relpath(b"a", b"a"), b".")
self.assertEqual(posixpath.relpath(b"/foo/bar/bat", b"/x/y/z"), b'../../../foo/bar/bat')
self.assertEqual(posixpath.relpath(b"/foo/bar/bat", b"/foo/bar"), b'bat')
self.assertEqual(posixpath.relpath(b"/foo/bar/bat", b"/"), b'foo/bar/bat')
self.assertEqual(posixpath.relpath(b"/", b"/foo/bar/bat"), b'../../..')
self.assertEqual(posixpath.relpath(b"/foo/bar/bat", b"/x"), b'../foo/bar/bat')
self.assertEqual(posixpath.relpath(b"/x", b"/foo/bar/bat"), b'../../../x')
self.assertEqual(posixpath.relpath(b"/", b"/"), b'.')
self.assertEqual(posixpath.relpath(b"/a", b"/a"), b'.')
self.assertEqual(posixpath.relpath(b"/a/b", b"/a/b"), b'.')
self.assertRaises(TypeError, posixpath.relpath, b"bytes", "str")
self.assertRaises(TypeError, posixpath.relpath, "str", b"bytes")
finally:
os.getcwdb = real_getcwdb
def test_sameopenfile(self):
fname = support.TESTFN + "1"
with open(fname, "wb") as a, open(fname, "wb") as b:
self.assertTrue(posixpath.sameopenfile(a.fileno(), b.fileno()))
class PosixCommonTest(test_genericpath.CommonTest, unittest.TestCase):
pathmodule = posixpath
attributes = ['relpath', 'samefile', 'sameopenfile', 'samestat']
if __name__=="__main__":
unittest.main()
| gpl-3.0 |
jiangyonghang/bitcoin | test/functional/replace-by-fee.py | 10 | 20698 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RBF code."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
MAX_REPLACEMENT_LIMIT = 100
def txToHex(tx):
return bytes_to_hex_str(tx.serialize())
def make_utxo(node, amount, confirmed=True, scriptPubKey=CScript([1])):
"""Create a txout with a given amount and scriptPubKey
Mines coins as needed.
confirmed - txouts created will be confirmed in the blockchain;
unconfirmed otherwise.
"""
fee = 1*COIN
while node.getbalance() < satoshi_round((amount + fee)/COIN):
node.generate(100)
new_addr = node.getnewaddress()
txid = node.sendtoaddress(new_addr, satoshi_round((amount+fee)/COIN))
tx1 = node.getrawtransaction(txid, 1)
txid = int(txid, 16)
i = None
for i, txout in enumerate(tx1['vout']):
if txout['scriptPubKey']['addresses'] == [new_addr]:
break
assert i is not None
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(txid, i))]
tx2.vout = [CTxOut(amount, scriptPubKey)]
tx2.rehash()
signed_tx = node.signrawtransaction(txToHex(tx2))
txid = node.sendrawtransaction(signed_tx['hex'], True)
# If requested, ensure txouts are confirmed.
if confirmed:
mempool_size = len(node.getrawmempool())
while mempool_size > 0:
node.generate(1)
new_size = len(node.getrawmempool())
# Error out if we have something stuck in the mempool, as this
# would likely be a bug.
assert(new_size < mempool_size)
mempool_size = new_size
return COutPoint(int(txid, 16), 0)
class ReplaceByFeeTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
self.setup_clean_chain = False
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000",
"-whitelist=127.0.0.1",
"-limitancestorcount=50",
"-limitancestorsize=101",
"-limitdescendantcount=200",
"-limitdescendantsize=101"
]))
self.is_network_split = False
def run_test(self):
make_utxo(self.nodes[0], 1*COIN)
self.log.info("Running test simple doublespend...")
self.test_simple_doublespend()
self.log.info("Running test doublespend chain...")
self.test_doublespend_chain()
self.log.info("Running test doublespend tree...")
self.test_doublespend_tree()
self.log.info("Running test replacement feeperkb...")
self.test_replacement_feeperkb()
self.log.info("Running test spends of conflicting outputs...")
self.test_spends_of_conflicting_outputs()
self.log.info("Running test new unconfirmed inputs...")
self.test_new_unconfirmed_inputs()
self.log.info("Running test too many replacements...")
self.test_too_many_replacements()
self.log.info("Running test opt-in...")
self.test_opt_in()
self.log.info("Running test prioritised transactions...")
self.test_prioritised_transactions()
self.log.info("Passed")
def test_simple_doublespend(self):
"""Simple doublespend"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Should fail because we haven't changed the fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(1*COIN, CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
# This will raise an exception due to insufficient fee
assert_raises_jsonrpc(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True)
# Extra 0.1 BTC fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
mempool = self.nodes[0].getrawmempool()
assert (tx1a_txid not in mempool)
assert (tx1b_txid in mempool)
assert_equal(tx1b_hex, self.nodes[0].getrawtransaction(tx1b_txid))
def test_doublespend_chain(self):
"""Doublespend of a long chain"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
prevout = tx0_outpoint
remaining_value = initial_nValue
chain_txids = []
while remaining_value > 10*COIN:
remaining_value -= 1*COIN
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = [CTxOut(remaining_value, CScript([1]))]
tx_hex = txToHex(tx)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
chain_txids.append(txid)
prevout = COutPoint(int(txid, 16), 0)
# Whether the double-spend is allowed is evaluated by including all
# child fees - 40 BTC - so this attempt is rejected.
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 30*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
# This will raise an exception due to insufficient fee
assert_raises_jsonrpc(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, True)
# Accepted with sufficient fee
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(1*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for doublespent_txid in chain_txids:
assert(doublespent_txid not in mempool)
def test_doublespend_tree(self):
"""Doublespend of a big tree of transactions"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001*COIN, _total_txs=None):
if _total_txs is None:
_total_txs = [0]
if _total_txs[0] >= max_txs:
return
txout_value = (initial_value - fee) // tree_width
if txout_value < fee:
return
vout = [CTxOut(txout_value, CScript([i+1]))
for i in range(tree_width)]
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = vout
tx_hex = txToHex(tx)
assert(len(tx.serialize()) < 100000)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
yield tx
_total_txs[0] += 1
txid = int(txid, 16)
for i, txout in enumerate(tx.vout):
for x in branch(COutPoint(txid, i), txout_value,
max_txs,
tree_width=tree_width, fee=fee,
_total_txs=_total_txs):
yield x
fee = int(0.0001*COIN)
n = MAX_REPLACEMENT_LIMIT
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
# Attempt double-spend, will fail because too little fee paid
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee*n, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
# This will raise an exception due to insufficient fee
assert_raises_jsonrpc(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, True)
# 1 BTC fee is enough
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee*n - 1*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for tx in tree_txs:
tx.rehash()
assert (tx.hash not in mempool)
# Try again, but with more total transactions than the "max txs
# double-spent at once" anti-DoS limit.
for n in (MAX_REPLACEMENT_LIMIT+1, MAX_REPLACEMENT_LIMIT*2):
fee = int(0.0001*COIN)
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 2*fee*n, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
# This will raise an exception
assert_raises_jsonrpc(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, dbl_tx_hex, True)
for tx in tree_txs:
tx.rehash()
self.nodes[0].getrawtransaction(tx.hash)
def test_replacement_feeperkb(self):
"""Replacement requires fee-per-KB to be higher"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the fee per KB is much lower, so the replacement is
# rejected.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*999000]))]
tx1b_hex = txToHex(tx1b)
# This will raise an exception due to insufficient fee
assert_raises_jsonrpc(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True)
def test_spends_of_conflicting_outputs(self):
"""Replacements that spend conflicting tx outputs are rejected"""
utxo1 = make_utxo(self.nodes[0], int(1.2*COIN))
utxo2 = make_utxo(self.nodes[0], 3*COIN)
tx1a = CTransaction()
tx1a.vin = [CTxIn(utxo1, nSequence=0)]
tx1a.vout = [CTxOut(int(1.1*COIN), CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
tx1a_txid = int(tx1a_txid, 16)
# Direct spend an output of the transaction we're replacing.
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0)]
tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), nSequence=0))
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
# This will raise an exception
assert_raises_jsonrpc(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, True)
# Spend tx1a's output to test the indirect case.
tx1b = CTransaction()
tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx1b.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1b_hex = txToHex(tx1b)
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
tx1b_txid = int(tx1b_txid, 16)
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0),
CTxIn(COutPoint(tx1b_txid, 0))]
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
# This will raise an exception
assert_raises_jsonrpc(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, True)
def test_new_unconfirmed_inputs(self):
"""Replacements that add new unconfirmed inputs are rejected"""
confirmed_utxo = make_utxo(self.nodes[0], int(1.1*COIN))
unconfirmed_utxo = make_utxo(self.nodes[0], int(0.1*COIN), False)
tx1 = CTransaction()
tx1.vin = [CTxIn(confirmed_utxo)]
tx1.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1_hex = txToHex(tx1)
tx1_txid = self.nodes[0].sendrawtransaction(tx1_hex, True)
tx2 = CTransaction()
tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)]
tx2.vout = tx1.vout
tx2_hex = txToHex(tx2)
# This will raise an exception
assert_raises_jsonrpc(-26, "replacement-adds-unconfirmed", self.nodes[0].sendrawtransaction, tx2_hex, True)
def test_too_many_replacements(self):
"""Replacements that evict too many transactions are rejected"""
# Try directly replacing more than MAX_REPLACEMENT_LIMIT
# transactions
# Start by creating a single transaction with many outputs
initial_nValue = 10*COIN
utxo = make_utxo(self.nodes[0], initial_nValue)
fee = int(0.0001*COIN)
split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1))
outputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
outputs.append(CTxOut(split_value, CScript([1])))
splitting_tx = CTransaction()
splitting_tx.vin = [CTxIn(utxo, nSequence=0)]
splitting_tx.vout = outputs
splitting_tx_hex = txToHex(splitting_tx)
txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, True)
txid = int(txid, 16)
# Now spend each of those outputs individually
for i in range(MAX_REPLACEMENT_LIMIT+1):
tx_i = CTransaction()
tx_i.vin = [CTxIn(COutPoint(txid, i), nSequence=0)]
tx_i.vout = [CTxOut(split_value-fee, CScript([b'a']))]
tx_i_hex = txToHex(tx_i)
self.nodes[0].sendrawtransaction(tx_i_hex, True)
# Now create doublespend of the whole lot; should fail.
# Need a big enough fee to cover all spending transactions and have
# a higher fee rate
double_spend_value = (split_value-100*fee)*(MAX_REPLACEMENT_LIMIT+1)
inputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
inputs.append(CTxIn(COutPoint(txid, i), nSequence=0))
double_tx = CTransaction()
double_tx.vin = inputs
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
# This will raise an exception
assert_raises_jsonrpc(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, double_tx_hex, True)
# If we remove an input, it should pass
double_tx = CTransaction()
double_tx.vin = inputs[0:-1]
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
self.nodes[0].sendrawtransaction(double_tx_hex, True)
def test_opt_in(self):
"""Replacing should only work if orig tx opted in"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
# Create a non-opting in transaction
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0xffffffff)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Shouldn't be able to double-spend
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
# This will raise an exception
assert_raises_jsonrpc(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx1b_hex, True)
tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
# Create a different non-opting in transaction
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0xfffffffe)]
tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx2a_hex = txToHex(tx2a)
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True)
# Still shouldn't be able to double-spend
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))]
tx2b_hex = txToHex(tx2b)
# This will raise an exception
assert_raises_jsonrpc(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx2b_hex, True)
# Now create a new transaction that spends from tx1a and tx2a
# opt-in on one of the inputs
# Transaction should be replaceable on either input
tx1a_txid = int(tx1a_txid, 16)
tx2a_txid = int(tx2a_txid, 16)
tx3a = CTransaction()
tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0xffffffff),
CTxIn(COutPoint(tx2a_txid, 0), nSequence=0xfffffffd)]
tx3a.vout = [CTxOut(int(0.9*COIN), CScript([b'c'])), CTxOut(int(0.9*COIN), CScript([b'd']))]
tx3a_hex = txToHex(tx3a)
self.nodes[0].sendrawtransaction(tx3a_hex, True)
tx3b = CTransaction()
tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx3b.vout = [CTxOut(int(0.5*COIN), CScript([b'e']))]
tx3b_hex = txToHex(tx3b)
tx3c = CTransaction()
tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), nSequence=0)]
tx3c.vout = [CTxOut(int(0.5*COIN), CScript([b'f']))]
tx3c_hex = txToHex(tx3c)
self.nodes[0].sendrawtransaction(tx3b_hex, True)
# If tx3b was accepted, tx3c won't look like a replacement,
# but make sure it is accepted anyway
self.nodes[0].sendrawtransaction(tx3c_hex, True)
def test_prioritised_transactions(self):
# Ensure that fee deltas used via prioritisetransaction are
# correctly used by replacement logic
# 1. Check that feeperkb uses modified fees
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the actual fee per KB is much lower.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*740000]))]
tx1b_hex = txToHex(tx1b)
# Verify tx1b cannot replace tx1a.
assert_raises_jsonrpc(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True)
# Use prioritisetransaction to set tx1a's fee to 0.
self.nodes[0].prioritisetransaction(tx1a_txid, int(-0.1*COIN))
# Now tx1b should be able to replace tx1a
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
assert(tx1b_txid in self.nodes[0].getrawmempool())
# 2. Check that absolute fee checks use modified fee.
tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx2a_hex = txToHex(tx2a)
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True)
# Lower fee, but we'll prioritise it
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(int(1.01*COIN), CScript([b'a']))]
tx2b.rehash()
tx2b_hex = txToHex(tx2b)
# Verify tx2b cannot replace tx2a.
assert_raises_jsonrpc(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx2b_hex, True)
# Now prioritise tx2b to have a higher modified fee
self.nodes[0].prioritisetransaction(tx2b.hash, int(0.1*COIN))
# tx2b should now be accepted
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
assert(tx2b_txid in self.nodes[0].getrawmempool())
if __name__ == '__main__':
ReplaceByFeeTest().main()
| mit |
tomdee/calico-containers | calicoctl/tests/unit/node_test.py | 2 | 41982 | # Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from netaddr import IPNetwork, IPAddress
from requests import Response
import signal
import os
import sys
from docker.errors import APIError
from docker import Client as DockerClient
from mock import patch, Mock, call
from nose.tools import *
from nose_parameterized import parameterized
from pycalico.datastore import (ETCD_AUTHORITY_DEFAULT, ETCD_SCHEME_DEFAULT,
ETCD_KEY_FILE_ENV, ETCD_CERT_FILE_ENV,
ETCD_CA_CERT_FILE_ENV, ETCD_SCHEME_ENV,
ETCD_AUTHORITY_ENV)
from calico_ctl import node
from calico_ctl.node import (ETCD_CA_CERT_NODE_FILE, ETCD_CERT_NODE_FILE,
ETCD_KEY_NODE_FILE, CALICO_NETWORKING_DEFAULT)
import calico_ctl
from pycalico.datastore_datatypes import IPPool
class TestAttachAndStream(unittest.TestCase):
@patch("calico_ctl.node.docker_client", spec=DockerClient)
@patch("calico_ctl.node.sys", spec=sys)
def test_container_stops_normally(self, m_sys, m_docker_client):
"""
Test _attach_and_stream when the container stops normally.
:return: None
"""
# attach(..., stream=True) returns a generator.
def container_output_gen():
yield ("Some output\n")
yield ("from the container.")
m_docker_client.attach.return_value = container_output_gen()
m_stdout = Mock(spec=sys.stdout)
m_sys.stdout = m_stdout
m_container = Mock()
node._attach_and_stream(m_container)
m_docker_client.attach.assert_called_once_with(m_container,
stream=True)
self.assertFalse(m_container.called)
m_stdout.write.assert_has_calls([call("Some output\n"),
call("from the container.")])
m_docker_client.stop.assert_called_once_with(m_container)
@patch("calico_ctl.node.docker_client", spec=DockerClient)
@patch("calico_ctl.node.sys", spec=sys)
def test_ctrl_c(self, m_sys, m_docker_client):
"""
Test _attach_and_stream when a Keyboard interrupt is generated.
:return: None
"""
# attach(..., stream=True) returns a generator.
def container_output_gen():
yield ("Some output\n")
yield ("from the container.")
raise KeyboardInterrupt()
yield ("This output is not printed.")
m_docker_client.attach.return_value = container_output_gen()
m_stdout = Mock(spec=sys.stdout)
m_sys.stdout = m_stdout
m_container = Mock()
node._attach_and_stream(m_container)
m_docker_client.attach.assert_called_once_with(m_container,
stream=True)
self.assertFalse(m_container.called)
m_stdout.write.assert_has_calls([call("Some output\n"),
call("from the container.")])
self.assertEqual(m_stdout.write.call_count, 2)
m_docker_client.stop.assert_called_once_with(m_container)
@patch("calico_ctl.node.docker_client", spec=DockerClient)
@patch("calico_ctl.node.sys", spec=sys)
def test_killed(self, m_sys, m_docker_client):
"""
Test _attach_and_stream when killed by another process.
:return: None
"""
# attach(..., stream=True) returns a generator.
def container_output_gen():
yield ("Some output\n")
yield ("from the container.")
# Commit suicide, simulating being killed from another terminal.
os.kill(os.getpid(), signal.SIGTERM)
yield ("\nThis output is printed, but only because we nerf'd "
"sys.exit()")
m_docker_client.attach.return_value = container_output_gen()
m_stdout = Mock(spec=sys.stdout)
m_sys.stdout = m_stdout
m_container = Mock()
node._attach_and_stream(m_container)
m_docker_client.attach.assert_called_once_with(m_container,
stream=True)
self.assertFalse(m_container.called)
m_sys.exit.assert_called_once_with(0)
m_stdout.write.assert_has_calls([call("Some output\n"),
call("from the container."),
call("\nThis output is printed, but "
"only because we nerf'd "
"sys.exit()")])
# Stop gets called twice, once for SIGTERM, and because sys.exit() gets
# mocked, the function continues and we get another call when the
# generator ends normally.
m_docker_client.stop.assert_has_calls([call(m_container),
call(m_container)])
self.assertEqual(m_docker_client.stop.call_count, 2)
class TestNode(unittest.TestCase):
@parameterized.expand([
({'--ip': '127.a.0.1'}, True),
({'--ip': 'aa:bb::cc'}, True),
({'--ip': '127.0.0.1', '--ip6': '127.0.0.1'}, True),
({'--ip': '127.0.0.1', '--ip6': 'aa:bb::zz'}, True),
({'--ip': ''}, False),
({'--ip6': ''}, False),
({'--ip': None, '--ip6': ''}, False),
({'--ip': '10.10.10.10', '--ip6': None}, False),
({'--ip': '', '--ip6': 'dadd::beef'}, False),
({'--ip': '10.10.10.10'}, False),
({'--ip': '10.10.10.10', '--ip6': 'dead::beef'}, False),
({'<IP>': '10.10.10.10', '<IP6>': 'dead::beef'}, False),
({'<AS_NUM>': None}, False),
({'<AS_NUM>': '65535.65535'}, False),
({'<AS_NUM>': '0.65535'}, False),
({'<AS_NUM>': '1000000'}, False),
({'<AS_NUM>': '65535'}, False),
({'<AS_NUM>': '65536.0'}, True),
({'<AS_NUM>': '65535.65536'}, True),
({'<AS_NUM>': '65535.'}, True)
])
def test_validate_arguments(self, case, sys_exit_called):
"""
Test validate_arguments for calicoctl node command
"""
with patch('sys.exit', autospec=True) as m_sys_exit:
# Call method under test
node.validate_arguments(case)
# Assert that method exits on bad input
self.assertEqual(m_sys_exit.called, sys_exit_called)
@patch('os.path.exists', autospec=True)
@patch('os.makedirs', autospec=True)
@patch('calico_ctl.node.check_system', autospec=True)
@patch('calico_ctl.node._setup_ip_forwarding', autospec=True)
@patch('calico_ctl.node.get_host_ips', autospec=True)
@patch('calico_ctl.node.warn_if_unknown_ip', autospec=True)
@patch('calico_ctl.node.warn_if_hostname_conflict', autospec=True)
@patch('calico_ctl.node.error_if_bgp_ip_conflict', autospec=True)
@patch('calico_ctl.node.client', autospec=True)
@patch('calico_ctl.node.docker_client', autospec=True)
@patch('calico_ctl.node.docker', autospec=True)
@patch('calico_ctl.node._find_or_pull_node_image', autospec=True)
@patch('calico_ctl.node._attach_and_stream', autospec=True)
def test_node_dockerless_start(self, m_attach_and_stream,
m_find_or_pull_node_image, m_docker,
m_docker_client, m_client,
m_error_if_bgp_ip_conflict,
m_warn_if_hostname_conflict,
m_warn_if_unknown_ip, m_get_host_ips,
m_setup_ip, m_check_system, m_os_makedirs,
m_os_path_exists):
"""
Test that the node_start function performs all necessary configurations
without making Docker calls when runtime=none.
"""
# Set up mock objects
m_os_path_exists.return_value = False
ip_1 = '1.1.1.1'
ip_2 = '2.2.2.2'
m_get_host_ips.return_value = [ip_1, ip_2]
m_docker.utils.create_host_config.return_value = 'host_config'
container = {'Id': 666}
m_docker_client.create_container.return_value = container
m_check_system.return_value = [True, True, True]
# Set up arguments
node_image = 'node_image'
runtime = 'none'
log_dir = './log_dir'
ip = ''
ip6 = 'aa:bb::zz'
as_num = ''
detach = True
libnetwork = False
# Call method under test
node.node_start(node_image, runtime, log_dir, ip, ip6, as_num, detach,
libnetwork)
# Assert
m_os_path_exists.assert_called_once_with(log_dir)
m_os_makedirs.assert_called_once_with(log_dir)
m_check_system.assert_called_once_with(quit_if_error=False,
libnetwork=libnetwork,
check_docker=False)
m_setup_ip.assert_called_once_with()
m_get_host_ips.assert_called_once_with(exclude=["^docker.*", "^cbr.*",
"virbr.*", "lxcbr.*",
"veth.*", "cali.*",
"tunl.*"])
m_warn_if_unknown_ip.assert_called_once_with(ip_2, ip6)
m_warn_if_hostname_conflict.assert_called_once_with(ip_2)
m_error_if_bgp_ip_conflict.assert_called_once_with(ip_2, ip6)
m_client.get_ip_pools.assert_has_calls([call(4), call(6)])
m_client.ensure_global_config.assert_called_once_with()
m_client.create_host.assert_called_once_with(
node.hostname, ip_2, ip6, as_num
)
self.assertFalse(m_docker_client.remove_container.called)
self.assertFalse(m_docker.utils.create_host_config.called)
self.assertFalse(m_find_or_pull_node_image.called)
self.assertFalse(m_docker_client.create_container.called)
self.assertFalse(m_docker_client.start.called)
self.assertFalse(m_attach_and_stream.called)
@patch('os.path.exists', autospec=True)
@patch('os.makedirs', autospec=True)
@patch('calico_ctl.node._remove_host_tunnel_addr', autospec=True)
@patch('calico_ctl.node._ensure_host_tunnel_addr', autospec=True)
@patch('calico_ctl.node.check_system', autospec=True)
@patch('calico_ctl.node._setup_ip_forwarding', autospec=True)
@patch('calico_ctl.node.get_host_ips', autospec=True)
@patch('calico_ctl.node.warn_if_unknown_ip', autospec=True)
@patch('calico_ctl.node.warn_if_hostname_conflict', autospec=True)
@patch('calico_ctl.node.error_if_bgp_ip_conflict', autospec=True)
@patch('calico_ctl.node.client', autospec=True)
@patch('calico_ctl.node.docker_client', autospec=True)
@patch('calico_ctl.node.docker', autospec=True)
@patch('calico_ctl.node._find_or_pull_node_image', autospec=True)
@patch('calico_ctl.node._attach_and_stream', autospec=True)
def test_node_start(self, m_attach_and_stream,
m_find_or_pull_node_image, m_docker,
m_docker_client, m_client,
m_error_if_bgp_ip_conflict, m_warn_if_hostname_conflict,
m_warn_if_unknown_ip, m_get_host_ips, m_setup_ip,
m_check_system, m_ensure_host_tunnel_addr,
m_remove_host_tunnel_addr, m_os_makedirs,
m_os_path_exists):
"""
Test that the node_Start function does not make Docker calls
function returns
"""
# Set up mock objects
m_os_path_exists.return_value = False
ip_1 = '1.1.1.1'
ip_2 = '2.2.2.2'
m_get_host_ips.return_value = [ip_1, ip_2]
m_docker.utils.create_host_config.return_value = 'host_config'
container = {'Id': 666}
m_docker_client.create_container.return_value = container
m_check_system.return_value = [True, True, True]
ipv4_pools = [IPPool(IPNetwork("10.0.0.0/16")),
IPPool(IPNetwork("10.1.0.0/16"), ipip=True)]
ipip_pools = [IPPool(IPNetwork("10.1.0.0/16"), ipip=True)]
m_client.get_ip_pools.return_value = ipv4_pools
# Set up arguments
node_image = 'node_image'
runtime = 'docker'
log_dir = './log_dir'
ip = ''
ip6 = 'aa:bb::zz'
as_num = ''
detach = False
libnetwork = False
# Call method under test
node.node_start(node_image, runtime, log_dir, ip, ip6, as_num, detach,
libnetwork)
# Set up variables used in assertion statements
environment = [
"HOSTNAME=%s" % node.hostname,
"IP=%s" % ip_2,
"IP6=%s" % ip6,
"CALICO_NETWORKING=%s" % node.CALICO_NETWORKING_DEFAULT,
"ETCD_AUTHORITY=%s" % ETCD_AUTHORITY_DEFAULT, # etcd host:port
"ETCD_SCHEME=%s" % ETCD_SCHEME_DEFAULT,
"FELIX_ETCDADDR=%s" % ETCD_AUTHORITY_DEFAULT, # etcd host:port
"FELIX_ETCDSCHEME=%s" % ETCD_SCHEME_DEFAULT
]
binds = {
log_dir:
{
"bind": "/var/log/calico",
"ro": False
}
}
# Assert
m_os_path_exists.assert_called_once_with(log_dir)
m_os_makedirs.assert_called_once_with(log_dir)
m_check_system.assert_called_once_with(quit_if_error=False,
libnetwork=libnetwork,
check_docker=True)
m_setup_ip.assert_called_once_with()
m_get_host_ips.assert_called_once_with(exclude=["^docker.*", "^cbr.*",
"virbr.*", "lxcbr.*",
"veth.*", "cali.*",
"tunl.*"])
m_warn_if_unknown_ip.assert_called_once_with(ip_2, ip6)
m_warn_if_hostname_conflict.assert_called_once_with(ip_2)
m_error_if_bgp_ip_conflict.assert_called_once_with(ip_2, ip6)
m_client.get_ip_pools.assert_has_calls([call(4), call(6)])
m_client.ensure_global_config.assert_called_once_with()
m_client.create_host.assert_called_once_with(
node.hostname, ip_2, ip6, as_num
)
m_ensure_host_tunnel_addr.assert_called_once_with(ipv4_pools,
ipip_pools)
assert_false(m_remove_host_tunnel_addr.called)
m_docker_client.remove_container.assert_called_once_with(
'calico-node', force=True
)
m_docker.utils.create_host_config.assert_called_once_with(
privileged=True,
restart_policy={"Name": "always"},
network_mode="host",
binds=binds
)
m_find_or_pull_node_image.assert_called_once_with('node_image')
m_docker_client.create_container.assert_called_once_with(
node_image,
name='calico-node',
detach=True,
environment=environment,
host_config='host_config',
volumes=['/var/log/calico']
)
m_docker_client.start.assert_called_once_with(container)
m_attach_and_stream.assert_called_once_with(container)
@patch('os.path.exists', autospec=True)
@patch('os.makedirs', autospec=True)
@patch('os.getenv', autospec=True)
@patch('calico_ctl.node._remove_host_tunnel_addr', autospec=True)
@patch('calico_ctl.node._ensure_host_tunnel_addr', autospec=True)
@patch('calico_ctl.node.check_system', autospec=True)
@patch('calico_ctl.node._setup_ip_forwarding', autospec=True)
@patch('calico_ctl.node.get_host_ips', autospec=True)
@patch('calico_ctl.node.warn_if_unknown_ip', autospec=True)
@patch('calico_ctl.node.warn_if_hostname_conflict', autospec=True)
@patch('calico_ctl.node.error_if_bgp_ip_conflict', autospec=True)
@patch('calico_ctl.node.client', autospec=True)
@patch('calico_ctl.node.docker_client', autospec=True)
@patch('calico_ctl.node.docker', autospec=True)
@patch('calico_ctl.node._find_or_pull_node_image', autospec=True)
@patch('calico_ctl.node._attach_and_stream', autospec=True)
def test_node_start_secure(self, m_attach_and_stream,
m_find_or_pull_node_image, m_docker,
m_docker_client, m_client,
m_error_if_bgp_ip_conflict,
m_warn_if_hostname_conflict, m_warn_if_unknown_ip,
m_get_host_ips, m_setup_ip, m_check_system,
m_ensure_host_tunnel_addr,
m_remove_host_tunnel_addr, m_os_getenv,
m_os_makedirs, m_os_path_exists):
"""
Test that the node_start function passes in correct values when
secure etcd environment variables are present.
"""
# Set up mock objects
ip_1 = '1.1.1.1'
ip_2 = '2.2.2.2'
m_get_host_ips.return_value = [ip_1, ip_2]
container1 = {'Id': 111}
container2 = {'Id': 222}
m_docker_client.create_container.side_effect = iter([container1,
container2])
m_docker.utils.create_host_config.return_value = 'host_config'
m_os_path_exists.return_value = True
m_check_system.return_value = [True, True, True]
m_client.get_ip_pools.return_value = []
etcd_ca_path = "/path/to/ca.crt"
etcd_cert_path = "/path/to/cert.crt"
etcd_key_path = "/path/to/key.pem"
env = {"CALICO_NETWORKING": CALICO_NETWORKING_DEFAULT,
ETCD_AUTHORITY_ENV: ETCD_AUTHORITY_DEFAULT,
ETCD_SCHEME_ENV: "https",
ETCD_CA_CERT_FILE_ENV: etcd_ca_path,
ETCD_CERT_FILE_ENV: etcd_cert_path,
ETCD_KEY_FILE_ENV: etcd_key_path}
def m_getenv(env_var, *args, **kwargs):
return env[env_var]
m_os_getenv.side_effect = m_getenv
# Set up arguments
node_image = 'node_image'
runtime = 'docker'
log_dir = './log_dir'
docker_plugin = "/run/docker/plugins"
ip = ''
ip6 = 'aa:bb::zz'
as_num = ''
detach = False
libnetwork_image = 'libnetwork_image'
# Call method under test
node.node_start(node_image, runtime, log_dir, ip, ip6, as_num, detach,
libnetwork_image)
# Set up variables used in assertion statements
environment_node = [
"HOSTNAME=%s" % node.hostname,
"IP=%s" % ip_2,
"IP6=%s" % ip6,
"CALICO_NETWORKING=%s" % CALICO_NETWORKING_DEFAULT,
"ETCD_AUTHORITY=%s" % ETCD_AUTHORITY_DEFAULT, # etcd host:port
"ETCD_SCHEME=%s" % "https",
"ETCD_CA_CERT_FILE=%s" % ETCD_CA_CERT_NODE_FILE,
"ETCD_KEY_FILE=%s" % ETCD_KEY_NODE_FILE,
"ETCD_CERT_FILE=%s" % ETCD_CERT_NODE_FILE,
"FELIX_ETCDADDR=%s" % ETCD_AUTHORITY_DEFAULT, # etcd host:port
"FELIX_ETCDSCHEME=https",
"FELIX_ETCDCAFILE=%s" % ETCD_CA_CERT_NODE_FILE,
"FELIX_ETCDKEYFILE=%s" % ETCD_KEY_NODE_FILE,
"FELIX_ETCDCERTFILE=%s" % ETCD_CERT_NODE_FILE
]
environment_libnetwork = [
"HOSTNAME=%s" % node.hostname,
"ETCD_AUTHORITY=%s" % ETCD_AUTHORITY_DEFAULT, # etcd host:port
"ETCD_SCHEME=%s" % "https",
"ETCD_CA_CERT_FILE=%s" % ETCD_CA_CERT_NODE_FILE,
"ETCD_KEY_FILE=%s" % ETCD_KEY_NODE_FILE,
"ETCD_CERT_FILE=%s" % ETCD_CERT_NODE_FILE,
]
binds_node = {
log_dir: {"bind": "/var/log/calico", "ro": False},
etcd_ca_path: {"bind": ETCD_CA_CERT_NODE_FILE, "ro": True},
etcd_cert_path: {"bind": ETCD_CERT_NODE_FILE, "ro": True},
etcd_key_path: {"bind": ETCD_KEY_NODE_FILE, "ro": True}
}
binds_libnetwork = {
etcd_ca_path: {"bind": ETCD_CA_CERT_NODE_FILE, "ro": True},
etcd_cert_path: {"bind": ETCD_CERT_NODE_FILE, "ro": True},
etcd_key_path: {"bind": ETCD_KEY_NODE_FILE, "ro": True},
docker_plugin: {'bind': docker_plugin, 'ro': False}
}
volumes_node = ['/var/log/calico', ETCD_CA_CERT_NODE_FILE,
ETCD_KEY_NODE_FILE, ETCD_CERT_NODE_FILE]
volumes_libnetwork= [docker_plugin, ETCD_CA_CERT_NODE_FILE,
ETCD_KEY_NODE_FILE, ETCD_CERT_NODE_FILE]
# Assert
m_os_path_exists.assert_called_once_with(log_dir)
m_check_system.assert_called_once_with(quit_if_error=False,
libnetwork=libnetwork_image,
check_docker=True)
m_setup_ip.assert_called_once_with()
m_get_host_ips.assert_called_once_with(exclude=["^docker.*", "^cbr.*",
"virbr.*", "lxcbr.*",
"veth.*", "cali.*",
"tunl.*"])
m_warn_if_unknown_ip.assert_called_once_with(ip_2, ip6)
m_warn_if_hostname_conflict.assert_called_once_with(ip_2)
m_error_if_bgp_ip_conflict.assert_called_once_with(ip_2, ip6)
m_client.get_ip_pools.assert_has_calls([call(4), call(6)])
m_client.ensure_global_config.assert_called_once_with()
m_client.create_host.assert_called_once_with(node.hostname, ip_2, ip6,
as_num)
assert_true(m_remove_host_tunnel_addr.called)
m_docker_client.remove_container.assert_has_calls([
call('calico-node', force=True),
call('calico-libnetwork', force=True)
])
m_docker.utils.create_host_config.assert_has_calls([
call(privileged=True,
restart_policy={"Name": "always"},
network_mode="host",
binds=binds_node),
call(privileged=True,
restart_policy={"Name": "always"},
network_mode="host",
binds=binds_libnetwork)
])
m_find_or_pull_node_image.assert_has_calls([call('node_image'),
call('libnetwork_image')])
m_docker_client.create_container.assert_has_calls([
call(node_image,
name='calico-node',
detach=True,
environment=environment_node,
host_config='host_config',
volumes=volumes_node),
call(libnetwork_image,
name='calico-libnetwork',
detach=True,
environment=environment_libnetwork,
host_config='host_config',
volumes=volumes_libnetwork)
])
m_docker_client.start.assert_has_calls([call(container1),
call(container2)])
m_attach_and_stream.assert_called_once_with(container1)
@patch('os.path.exists', autospec=True)
@patch('os.makedirs', autospec=True)
@patch('calico_ctl.node.check_system', autospec=True)
@patch('calico_ctl.node._setup_ip_forwarding', autospec=True)
@patch('calico_ctl.node.get_host_ips', autospec=True)
@patch('calico_ctl.node.warn_if_unknown_ip', autospec=True)
@patch('calico_ctl.node.warn_if_hostname_conflict', autospec=True)
@patch('calico_ctl.node.error_if_bgp_ip_conflict', autospec=True)
@patch('calico_ctl.node.client', autospec=True)
@patch('calico_ctl.node.docker_client', autospec=True)
def test_node_start_remove_container_error(
self, m_docker_client, m_client,
m_error_if_bgp_ip_conflict, m_warn_if_hostname_conflict,
m_warn_if_unknown_ip, m_get_host_ips, m_setup_ip, m_check_system,
m_os_makedirs, m_os_path_exists):
"""
Test that the docker client raises an APIError when it fails to
remove a container.
"""
# Set up mock objects
err = APIError("Test error message", Response())
m_docker_client.remove_container.side_effect = err
m_check_system.return_value = [True, True, True]
# Set up arguments
node_image = 'node_image'
runtime = 'docker'
log_dir = './log_dir'
ip = ''
ip6 = 'aa:bb::zz'
as_num = ''
detach = False
libnetwork = False
# Testing expecting APIError exception
self.assertRaises(APIError, node.node_start,
node_image, runtime, log_dir, ip, ip6, as_num, detach,
libnetwork)
@patch('sys.exit', autospec=True)
@patch('os.path.exists', autospec=True)
@patch('os.makedirs', autospec=True)
@patch('calico_ctl.node.check_system', autospec=True)
@patch('calico_ctl.node._setup_ip_forwarding', autospec=True)
@patch('calico_ctl.node.get_host_ips', autospec=True)
@patch('calico_ctl.node.warn_if_unknown_ip', autospec=True)
@patch('calico_ctl.node.warn_if_hostname_conflict', autospec=True)
@patch('calico_ctl.node.error_if_bgp_ip_conflict', autospec=True)
@patch('calico_ctl.node.client', autospec=True)
@patch('calico_ctl.node.docker_client', autospec=True)
def test_node_start_no_detected_ips(
self, m_docker_client, m_client,
m_error_if_bgp_ip_conflict, m_warn_if_hostname_conflict,
m_warn_if_unknown_ip, m_get_host_ips, m_setup_ip, m_check_system,
m_os_makedirs, m_os_path_exists, m_sys_exit):
"""
Test that system exits when no ip is provided and host ips cannot be
obtained
"""
# Set up mock objects
m_get_host_ips.return_value = []
m_check_system.return_value = [True, True, True]
# Set up arguments
node_image = 'node_image'
runtime = 'docker'
log_dir = './log_dir'
ip = ''
ip6 = 'aa:bb::zz'
as_num = ''
detach = False
libnetwork = False
# Call method under test
node.node_start(node_image, runtime, log_dir, ip, ip6, as_num, detach,
libnetwork)
# Assert
m_sys_exit.assert_called_once_with(1)
@patch('os.path.exists', autospec=True)
@patch('os.makedirs', autospec=True)
@patch('calico_ctl.node.check_system', autospec=True)
@patch('calico_ctl.node._setup_ip_forwarding', autospec=True)
@patch('calico_ctl.node.get_host_ips', autospec=True)
@patch('calico_ctl.node.warn_if_unknown_ip', autospec=True)
@patch('calico_ctl.node.warn_if_hostname_conflict', autospec=True)
@patch('calico_ctl.node.error_if_bgp_ip_conflict', autospec=True)
@patch('calico_ctl.node.client', autospec=True)
@patch('calico_ctl.node.docker_client', autospec=True)
def test_node_start_create_default_ip_pools(
self, m_docker_client, m_client,
m_error_if_bgp_ip_conflict, m_warn_if_hostname_conflict,
m_warn_if_unknown_ip, m_get_host_ips, m_setup_ip, m_check_system,
m_os_makedirs, m_os_path_exists):
"""
Test that the client creates default ipv4 and ipv6 pools when the
client returns an empty ip_pool on etcd setup
"""
# Set up mock objects
m_client.get_ip_pools.return_value = []
m_check_system.return_value = [True, True, True]
# Set up arguments
node_image = 'node_image'
runtime = 'docker'
log_dir = './log_dir'
ip = ''
ip6 = 'aa:bb::zz'
as_num = ''
detach = False
libnetwork = False
# Call method under test
node.node_start(node_image, runtime, log_dir, ip, ip6, as_num, detach,
libnetwork)
# Assert
m_client.add_ip_pool.assert_has_calls([
call(4, node.DEFAULT_IPV4_POOL),
call(6, node.DEFAULT_IPV6_POOL)
])
@patch('calico_ctl.node.client', autospec=True)
@patch('calico_ctl.node.docker_client', autospec=True)
def test_node_stop(self, m_docker_client, m_client):
"""
Test the client stops the node when node_stop called when there are
endpoints and the force flag is set.
"""
# Call method under test
m_client.get_endpoints.return_value = [Mock()]
node.node_stop(True)
# Assert
m_client.get_endpoints.assert_called_once_with(hostname=node.hostname)
m_docker_client.stop.assert_has_calls([call('calico-node'),
call('calico-libnetwork')])
@patch('calico_ctl.node.client', autospec=True)
@patch('calico_ctl.node.docker_client', autospec=True)
def test_node_stop_endpoints(self, m_docker_client, m_client):
"""
Test the client does not stops the node when node_stop is called and
there are endpoints and the force flag is not set.
"""
# Call method under test
m_client.get_endpoints.return_value = [Mock()]
self.assertRaises(SystemExit, node.node_stop, False)
# Assert
m_client.get_endpoints.assert_called_once_with(hostname=node.hostname)
self.assertEquals(m_docker_client.stop.call_count, 0)
@patch('calico_ctl.node.client', autospec=True)
@patch('calico_ctl.node.docker_client', autospec=True)
def test_node_stop_error(self, m_docker_client, m_client):
"""
Test node_stop raises an exception when the docker client cannot not
stop the node
"""
# Set up mock objects
m_client.get_endpoints.return_value = [Mock()]
err = APIError("Test error message", Response())
for sidee in ([None, err], [err, None]):
m_docker_client.stop.side_effect = sidee
# Call method under test expecting an exception
self.assertRaises(APIError, node.node_stop, True)
@patch('calico_ctl.node._remove_host_tunnel_addr', autospec=True)
@patch('calico_ctl.node.remove_veth', autospec=True)
@patch('calico_ctl.node._container_running', autospec=True, return_value=False)
@patch('calico_ctl.node.client', autospec=True)
def test_node_remove(self, m_client, m_cont_running, m_veth,
m_remove_tunnel_addr):
"""
Test the client removes the host when node_remove called, and that
endpoints are removed when remove_endpoints flag is set.
"""
# Call method under test
endpoint1 = Mock()
endpoint1.name = "vethname1"
endpoint2 = Mock()
endpoint2.name = "vethname2"
m_client.get_endpoints.return_value = [endpoint1, endpoint2]
node.node_remove(True)
# Assert
m_client.get_endpoints.assert_called_once_with(hostname=node.hostname)
m_client.remove_host.assert_called_once_with(node.hostname)
m_veth.assert_has_calls([call("vethname1"), call("vethname2")])
m_cont_running.assert_has_calls([call("calico-node"), call("calico-libnetwork")])
assert_equal(m_remove_tunnel_addr.mock_calls, [call()])
@patch('calico_ctl.node.remove_veth', autospec=True)
@patch('calico_ctl.node._container_running', autospec=True, return_value=True)
@patch('calico_ctl.node.client', autospec=True)
def test_node_remove_node_running(self, m_client, m_cont_running, m_veth):
"""
Test the client does not remove host when containers are running and
node_remove is invoked.
"""
# Assert
self.assertRaises(SystemExit, node.node_remove, True)
self.assertEquals(m_client.get_endpoints.call_count, 0)
self.assertEquals(m_client.remove_host.call_count, 0)
self.assertEquals(m_veth.call_count, 0)
@patch('calico_ctl.node.remove_veth', autospec=True)
@patch('calico_ctl.node._container_running', autospec=True, return_value=False)
@patch('calico_ctl.node.client', autospec=True)
def test_node_remove_endpoints_exist(self, m_client, m_cont_running, m_veth):
"""
Test the client does not remove host when endpoints exist and
node_remove is invoked without remove_endpoints flag.
"""
# Call method under test
m_client.get_endpoints.return_value = [Mock()]
self.assertRaises(SystemExit, node.node_remove, False)
# Assert
m_client.get_endpoints.assert_called_once_with(hostname=node.hostname)
self.assertEquals(m_client.remove_host.call_count, 0)
self.assertEquals(m_veth.call_count, 0)
@patch('calico_ctl.node.docker_client', autospec=True)
def test_container_running_no_cont(self, m_docker_client):
"""
Test the _container_running command when no container exists.
"""
response = Response()
response.status_code = 404
m_docker_client.inspect_container.side_effect = APIError("Test error message", response)
self.assertEquals(node._container_running("container1"), False)
m_docker_client.inspect_container.assert_called_once_with("container1")
@patch('calico_ctl.node.docker_client', autospec=True)
def test_container_running_err(self, m_docker_client):
"""
Test the _container_running command when the inspect command errors.
"""
response = Response()
response.status_code = 400
m_docker_client.inspect_container.side_effect = APIError("Test error message", response)
self.assertRaises(APIError, node._container_running, "container1")
m_docker_client.inspect_container.assert_called_once_with("container1")
@patch('calico_ctl.node.docker_client', autospec=True)
def test_container_running_cont_running(self, m_docker_client):
"""
Test the _container_running command when the container is running
"""
for test in (True, False):
m_docker_client.inspect_container.return_value = {"State": {"Running": test}}
self.assertEquals(node._container_running("container1"), test)
@patch("calico_ctl.node.client", autospec=True)
@patch("sys.exit", autospec=True)
def test_error_if_bgp_ipv4_conflict_no_conflict(self, m_exit, m_client):
"""
Test check that node IP is not already in use by another node, no error.
"""
m_client.get_hostnames_from_ips = Mock()
m_client.get_hostnames_from_ips.return_value = {}
node.error_if_bgp_ip_conflict("10.0.0.1", "abcd::beef")
self.assertFalse(m_exit.called)
@patch("calico_ctl.node.client", autospec=True)
@patch("calico_ctl.utils.get_hostname", autospec=True)
@patch("sys.exit", autospec=True)
def test_error_if_ip_conflict_ipv6_key_error(self, m_exit, m_hostname,
m_client):
"""
Test that function accepts IP being owned by same host.
"""
calico_ctl.node.hostname = "host"
m_client.get_hostnames_from_ips = Mock()
m_client.get_hostnames_from_ips.return_value = {"10.0.0.1":"host"}
node.error_if_bgp_ip_conflict("10.0.0.1", "abcd::beef")
self.assertFalse(m_exit.called)
@patch("calico_ctl.node.client", autospec=True)
@patch("calico_ctl.utils.get_hostname", autospec=True)
def test_error_when_bgp_ipv4_conflict(self, m_hostname, m_client):
"""
Test that function exits when another node already uses ipv4 addr.
"""
calico_ctl.node.hostname = "not_host"
m_client.get_hostnames_from_ips = Mock()
m_client.get_hostnames_from_ips.return_value = {"10.0.0.1":"host"}
self.assertRaises(SystemExit, node.error_if_bgp_ip_conflict,
"10.0.0.1", None)
@patch("calico_ctl.node.client", autospec=True)
@patch("calico_ctl.utils.get_hostname", autospec=True)
def test_error_when_bgp_ipv6_conflict(self, m_hostname, m_client):
"""
Test that function exits when another node already uses ipv6 addr.
"""
calico_ctl.node.hostname = "not_host"
m_client.get_hostnames_from_ips = Mock()
m_client.get_hostnames_from_ips.return_value = {"abcd::beef":"host"}
self.assertRaises(SystemExit, node.error_if_bgp_ip_conflict,
None, "abcd::beef")
@patch("calico_ctl.node._get_host_tunnel_ip", autospec=True)
@patch("calico_ctl.node._assign_host_tunnel_addr", autospec=True)
@patch("calico_ctl.node.client", autospec=True)
@patch("calico_ctl.utils.get_hostname", autospec=True)
def test_ensure_host_tunnel_addr_no_ip(self, m_hostname, m_client,
m_assign_host_tunnel_addr,
m_get_tunnel_host_ip):
m_get_tunnel_host_ip.return_value = None
ipv4_pools = [IPPool("10.0.0.0/16"),
IPPool("10.1.0.0/16", ipip=True)]
ipip_pools = [IPPool("10.1.0.0/16", ipip=True)]
calico_ctl.node._ensure_host_tunnel_addr(ipv4_pools, ipip_pools)
assert_equal(m_assign_host_tunnel_addr.mock_calls, [call(ipip_pools)])
@patch("calico_ctl.node._get_host_tunnel_ip", autospec=True)
@patch("calico_ctl.node._assign_host_tunnel_addr", autospec=True)
@patch("calico_ctl.node.client", autospec=True)
@patch("calico_ctl.utils.get_hostname", autospec=True)
def test_ensure_host_tunnel_addr_non_ipip(self, m_hostname, m_client,
m_assign_host_tunnel_addr,
m_get_tunnel_host_ip):
m_get_tunnel_host_ip.return_value = IPAddress("10.0.0.1")
ipv4_pools = [IPPool("10.0.0.0/16"),
IPPool("10.1.0.0/16", ipip=True)]
ipip_pools = [IPPool("10.1.0.0/16", ipip=True)]
calico_ctl.node._ensure_host_tunnel_addr(ipv4_pools, ipip_pools)
assert_equal(m_client.release_ips.mock_calls,
[call({IPAddress("10.0.0.1")})])
assert_equal(m_assign_host_tunnel_addr.mock_calls, [call(ipip_pools)])
@patch("calico_ctl.node._get_host_tunnel_ip", autospec=True)
@patch("calico_ctl.node._assign_host_tunnel_addr", autospec=True)
@patch("calico_ctl.node.client", autospec=True)
@patch("calico_ctl.utils.get_hostname", autospec=True)
def test_ensure_host_tunnel_addr_bad_ip(self, m_hostname, m_client,
m_assign_host_tunnel_addr,
m_get_tunnel_host_ip):
m_get_tunnel_host_ip.return_value = IPAddress("11.0.0.1")
ipv4_pools = [IPPool("10.0.0.0/16"),
IPPool("10.1.0.0/16", ipip=True)]
ipip_pools = [IPPool("10.1.0.0/16", ipip=True)]
calico_ctl.node._ensure_host_tunnel_addr(ipv4_pools, ipip_pools)
assert_equal(m_assign_host_tunnel_addr.mock_calls, [call(ipip_pools)])
@patch("calico_ctl.node.client", autospec=True)
@patch("calico_ctl.node.hostname", autospec=True)
def test_assign_host_tunnel_addr(self, m_hostname, m_client):
# First pool full, IP allocated from second pool.
m_client.auto_assign_ips.side_effect = iter([
([], []),
([IPAddress("10.0.0.1")], [])
])
ipip_pools = [IPPool("10.1.0.0/16", ipip=True),
IPPool("10.0.0.0/16", ipip=True)]
calico_ctl.node._assign_host_tunnel_addr(ipip_pools)
assert_equal(
m_client.set_per_host_config.mock_calls,
[call(m_hostname, "IpInIpTunnelAddr", "10.0.0.1")]
)
@patch("sys.exit", autospec=True)
@patch("calico_ctl.node.client", autospec=True)
@patch("calico_ctl.node.hostname", autospec=True)
def test_assign_host_tunnel_addr_none_available(self, m_hostname,
m_client, m_exit):
# First pool full, IP allocated from second pool.
m_client.auto_assign_ips.side_effect = iter([
([], []),
([], [])
])
ipip_pools = [IPPool("10.1.0.0/16", ipip=True),
IPPool("10.0.0.0/16", ipip=True)]
m_exit.side_effect = Exception
assert_raises(Exception, calico_ctl.node._assign_host_tunnel_addr,
ipip_pools)
assert_equal(m_exit.mock_calls, [call(1)])
@patch("calico_ctl.node._get_host_tunnel_ip", autospec=True)
@patch("calico_ctl.node.client", autospec=True)
@patch("calico_ctl.node.hostname", autospec=True)
def test_remove_host_tunnel_addr(self, m_hostname, m_client, m_get_ip):
ip_address = IPAddress("10.0.0.1")
m_get_ip.return_value = ip_address
calico_ctl.node._remove_host_tunnel_addr()
assert_equal(m_client.release_ips.mock_calls, [call({ip_address})])
assert_equal(m_client.remove_per_host_config.mock_calls,
[call(m_hostname, "IpInIpTunnelAddr")])
| apache-2.0 |
mcking49/apache-flask | Python/Lib/site-packages/simplejson/compat.py | 155 | 1036 | """Python 3 compatibility shims
"""
import sys
if sys.version_info[0] < 3:
PY3 = False
def b(s):
return s
def u(s):
return unicode(s, 'unicode_escape')
import cStringIO as StringIO
StringIO = BytesIO = StringIO.StringIO
text_type = unicode
binary_type = str
string_types = (basestring,)
integer_types = (int, long)
unichr = unichr
reload_module = reload
def fromhex(s):
return s.decode('hex')
else:
PY3 = True
if sys.version_info[:2] >= (3, 4):
from importlib import reload as reload_module
else:
from imp import reload as reload_module
import codecs
def b(s):
return codecs.latin_1_encode(s)[0]
def u(s):
return s
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
text_type = str
binary_type = bytes
string_types = (str,)
integer_types = (int,)
def unichr(s):
return u(chr(s))
def fromhex(s):
return bytes.fromhex(s)
long_type = integer_types[-1]
| mit |
imsally/redash | redash/handlers/data_sources.py | 5 | 5528 | import logging
from flask import make_response, request
from flask_restful import abort
from funcy import project
from redash import models
from redash.utils.configuration import ConfigurationContainer, ValidationError
from redash.permissions import require_admin, require_permission, require_access, view_only
from redash.query_runner import query_runners, get_configuration_schema_for_query_runner_type
from redash.handlers.base import BaseResource, get_object_or_404
class DataSourceTypeListResource(BaseResource):
@require_admin
def get(self):
return [q.to_dict() for q in sorted(query_runners.values(), key=lambda q: q.name())]
class DataSourceResource(BaseResource):
@require_admin
def get(self, data_source_id):
data_source = models.DataSource.get_by_id_and_org(data_source_id, self.current_org)
return data_source.to_dict(all=True)
@require_admin
def post(self, data_source_id):
data_source = models.DataSource.get_by_id_and_org(data_source_id, self.current_org)
req = request.get_json(True)
schema = get_configuration_schema_for_query_runner_type(req['type'])
if schema is None:
abort(400)
try:
data_source.options.set_schema(schema)
data_source.options.update(req['options'])
except ValidationError:
abort(400)
data_source.type = req['type']
data_source.name = req['name']
models.db.session.add(data_source)
models.db.session.commit()
return data_source.to_dict(all=True)
@require_admin
def delete(self, data_source_id):
data_source = models.DataSource.get_by_id_and_org(data_source_id, self.current_org)
models.db.session.delete(data_source)
models.db.session.commit()
return make_response('', 204)
class DataSourceListResource(BaseResource):
@require_permission('list_data_sources')
def get(self):
if self.current_user.has_permission('admin'):
data_sources = models.DataSource.all(self.current_org)
else:
data_sources = models.DataSource.all(self.current_org, group_ids=self.current_user.group_ids)
response = {}
for ds in data_sources:
if ds.id in response:
continue
try:
d = ds.to_dict()
d['view_only'] = all(project(ds.groups, self.current_user.group_ids).values())
response[ds.id] = d
except AttributeError:
logging.exception("Error with DataSource#to_dict (data source id: %d)", ds.id)
return sorted(response.values(), key=lambda d: d['id'])
@require_admin
def post(self):
req = request.get_json(True)
required_fields = ('options', 'name', 'type')
for f in required_fields:
if f not in req:
abort(400)
schema = get_configuration_schema_for_query_runner_type(req['type'])
if schema is None:
abort(400)
config = ConfigurationContainer(req['options'], schema)
if not config.is_valid():
abort(400)
datasource = models.DataSource.create_with_group(org=self.current_org,
name=req['name'],
type=req['type'],
options=config)
models.db.session.commit()
self.record_event({
'action': 'create',
'object_id': datasource.id,
'object_type': 'datasource'
})
return datasource.to_dict(all=True)
class DataSourceSchemaResource(BaseResource):
def get(self, data_source_id):
data_source = get_object_or_404(models.DataSource.get_by_id_and_org, data_source_id, self.current_org)
require_access(data_source.groups, self.current_user, view_only)
refresh = request.args.get('refresh') is not None
schema = data_source.get_schema(refresh)
return schema
class DataSourcePauseResource(BaseResource):
@require_admin
def post(self, data_source_id):
data_source = get_object_or_404(models.DataSource.get_by_id_and_org, data_source_id, self.current_org)
data = request.get_json(force=True, silent=True)
if data:
reason = data.get('reason')
else:
reason = request.args.get('reason')
data_source.pause(reason)
self.record_event({
'action': 'pause',
'object_id': data_source.id,
'object_type': 'datasource'
})
return data_source.to_dict()
@require_admin
def delete(self, data_source_id):
data_source = get_object_or_404(models.DataSource.get_by_id_and_org, data_source_id, self.current_org)
data_source.resume()
self.record_event({
'action': 'resume',
'object_id': data_source.id,
'object_type': 'datasource'
})
return data_source.to_dict()
class DataSourceTestResource(BaseResource):
@require_admin
def post(self, data_source_id):
data_source = get_object_or_404(models.DataSource.get_by_id_and_org, data_source_id, self.current_org)
try:
data_source.query_runner.test_connection()
except Exception as e:
return {"message": unicode(e), "ok": False}
else:
return {"message": "success", "ok": True}
| bsd-2-clause |
splunk/splunk-demo-yelp-search-command | bin/requests/packages/charade/latin1prober.py | 50 | 5387 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe
from .compat import wrap_ord
FREQ_CAT_NUM = 4
UDF = 0 # undefined
OTH = 1 # other
ASC = 2 # ascii capital letter
ASS = 3 # ascii small letter
ACV = 4 # accent capital vowel
ACO = 5 # accent capital other
ASV = 6 # accent small vowel
ASO = 7 # accent small other
CLASS_NUM = 8 # total classes
Latin1_CharToClass = (
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F
OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57
ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F
OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77
ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F
OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87
OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F
UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97
OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF
ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7
ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF
ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7
ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF
ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7
ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF
ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7
ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF
)
# 0 : illegal
# 1 : very unlikely
# 2 : normal
# 3 : very likely
Latin1ClassModel = (
# UDF OTH ASC ASS ACV ACO ASV ASO
0, 0, 0, 0, 0, 0, 0, 0, # UDF
0, 3, 3, 3, 3, 3, 3, 3, # OTH
0, 3, 3, 3, 3, 3, 3, 3, # ASC
0, 3, 3, 3, 1, 1, 3, 3, # ASS
0, 3, 3, 3, 1, 2, 1, 2, # ACV
0, 3, 3, 3, 3, 3, 3, 3, # ACO
0, 3, 1, 3, 1, 1, 1, 3, # ASV
0, 3, 1, 3, 1, 1, 3, 3, # ASO
)
class Latin1Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self.reset()
def reset(self):
self._mLastCharClass = OTH
self._mFreqCounter = [0] * FREQ_CAT_NUM
CharSetProber.reset(self)
def get_charset_name(self):
return "windows-1252"
def feed(self, aBuf):
aBuf = self.filter_with_english_letters(aBuf)
for c in aBuf:
charClass = Latin1_CharToClass[wrap_ord(c)]
freq = Latin1ClassModel[(self._mLastCharClass * CLASS_NUM)
+ charClass]
if freq == 0:
self._mState = eNotMe
break
self._mFreqCounter[freq] += 1
self._mLastCharClass = charClass
return self.get_state()
def get_confidence(self):
if self.get_state() == eNotMe:
return 0.01
total = sum(self._mFreqCounter)
if total < 0.01:
confidence = 0.0
else:
confidence = ((float(self._mFreqCounter[3]) / total)
- (self._mFreqCounter[1] * 20.0 / total))
if confidence < 0.0:
confidence = 0.0
# lower the confidence of latin1 so that other more accurate
# detector can take priority.
confidence = confidence * 0.5
return confidence
| apache-2.0 |
martong/python-mode | pymode/libs2/rope/base/oi/runmod.py | 27 | 7631 |
def __rope_start_everything():
import os
import sys
import socket
import cPickle as pickle
import marshal
import inspect
import types
import threading
class _MessageSender(object):
def send_data(self, data):
pass
class _SocketSender(_MessageSender):
def __init__(self, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', port))
self.my_file = s.makefile('w')
def send_data(self, data):
if not self.my_file.closed:
pickle.dump(data, self.my_file)
def close(self):
self.my_file.close()
class _FileSender(_MessageSender):
def __init__(self, file_name):
self.my_file = open(file_name, 'wb')
def send_data(self, data):
if not self.my_file.closed:
marshal.dump(data, self.my_file)
def close(self):
self.my_file.close()
def _cached(func):
cache = {}
def newfunc(self, arg):
if arg in cache:
return cache[arg]
result = func(self, arg)
cache[arg] = result
return result
return newfunc
class _FunctionCallDataSender(object):
def __init__(self, send_info, project_root):
self.project_root = project_root
if send_info.isdigit():
self.sender = _SocketSender(int(send_info))
else:
self.sender = _FileSender(send_info)
def global_trace(frame, event, arg):
# HACK: Ignoring out->in calls
# This might lose some information
if self._is_an_interesting_call(frame):
return self.on_function_call
sys.settrace(global_trace)
threading.settrace(global_trace)
def on_function_call(self, frame, event, arg):
if event != 'return':
return
args = []
returned = ('unknown',)
code = frame.f_code
for argname in code.co_varnames[:code.co_argcount]:
try:
args.append(self._object_to_persisted_form(frame.f_locals[argname]))
except (TypeError, AttributeError):
args.append(('unknown',))
try:
returned = self._object_to_persisted_form(arg)
except (TypeError, AttributeError):
pass
try:
data = (self._object_to_persisted_form(frame.f_code),
tuple(args), returned)
self.sender.send_data(data)
except (TypeError):
pass
return self.on_function_call
def _is_an_interesting_call(self, frame):
#if frame.f_code.co_name in ['?', '<module>']:
# return False
#return not frame.f_back or not self._is_code_inside_project(frame.f_back.f_code)
if not self._is_code_inside_project(frame.f_code) and \
(not frame.f_back or not self._is_code_inside_project(frame.f_back.f_code)):
return False
return True
def _is_code_inside_project(self, code):
source = self._path(code.co_filename)
return source is not None and os.path.exists(source) and \
_realpath(source).startswith(self.project_root)
@_cached
def _get_persisted_code(self, object_):
source = self._path(object_.co_filename)
if not os.path.exists(source):
raise TypeError('no source')
return ('defined', _realpath(source), str(object_.co_firstlineno))
@_cached
def _get_persisted_class(self, object_):
try:
return ('defined', _realpath(inspect.getsourcefile(object_)),
object_.__name__)
except (TypeError, AttributeError):
return ('unknown',)
def _get_persisted_builtin(self, object_):
if isinstance(object_, (str, unicode)):
return ('builtin', 'str')
if isinstance(object_, list):
holding = None
if len(object_) > 0:
holding = object_[0]
return ('builtin', 'list', self._object_to_persisted_form(holding))
if isinstance(object_, dict):
keys = None
values = None
if len(object_) > 0:
keys = object_.keys()[0]
values = object_[keys]
return ('builtin', 'dict',
self._object_to_persisted_form(keys),
self._object_to_persisted_form(values))
if isinstance(object_, tuple):
objects = []
if len(object_) < 3:
for holding in object_:
objects.append(self._object_to_persisted_form(holding))
else:
objects.append(self._object_to_persisted_form(object_[0]))
return tuple(['builtin', 'tuple'] + objects)
if isinstance(object_, set):
holding = None
if len(object_) > 0:
for o in object_:
holding = o
break
return ('builtin', 'set', self._object_to_persisted_form(holding))
return ('unknown',)
def _object_to_persisted_form(self, object_):
if object_ is None:
return ('none',)
if isinstance(object_, types.CodeType):
return self._get_persisted_code(object_)
if isinstance(object_, types.FunctionType):
return self._get_persisted_code(object_.func_code)
if isinstance(object_, types.MethodType):
return self._get_persisted_code(object_.im_func.func_code)
if isinstance(object_, types.ModuleType):
return self._get_persisted_module(object_)
if isinstance(object_, (str, unicode, list, dict, tuple, set)):
return self._get_persisted_builtin(object_)
if isinstance(object_, (types.TypeType, types.ClassType)):
return self._get_persisted_class(object_)
return ('instance', self._get_persisted_class(type(object_)))
@_cached
def _get_persisted_module(self, object_):
path = self._path(object_.__file__)
if path and os.path.exists(path):
return ('defined', _realpath(path))
return ('unknown',)
def _path(self, path):
if path.endswith('.pyc'):
path = path[:-1]
if path.endswith('.py'):
return path
def close(self):
self.sender.close()
sys.settrace(None)
def _realpath(path):
return os.path.realpath(os.path.abspath(os.path.expanduser(path)))
send_info = sys.argv[1]
project_root = sys.argv[2]
file_to_run = sys.argv[3]
run_globals = globals()
run_globals.update({'__name__': '__main__',
'__builtins__': __builtins__,
'__file__': file_to_run})
if send_info != '-':
data_sender = _FunctionCallDataSender(send_info, project_root)
del sys.argv[1:4]
execfile(file_to_run, run_globals)
if send_info != '-':
data_sender.close()
if __name__ == '__main__':
__rope_start_everything()
| lgpl-3.0 |
damonkohler/sl4a | python-build/python-libs/gdata/src/atom/mock_http.py | 278 | 4474 | #!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeff Scudder)'
import atom.http_interface
import atom.url
class Error(Exception):
pass
class NoRecordingFound(Error):
pass
class MockRequest(object):
"""Holds parameters of an HTTP request for matching against future requests.
"""
def __init__(self, operation, url, data=None, headers=None):
self.operation = operation
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
self.url = url
self.data = data
self.headers = headers
class MockResponse(atom.http_interface.HttpResponse):
"""Simulates an httplib.HTTPResponse object."""
def __init__(self, body=None, status=None, reason=None, headers=None):
if body and hasattr(body, 'read'):
self.body = body.read()
else:
self.body = body
if status is not None:
self.status = int(status)
else:
self.status = None
self.reason = reason
self._headers = headers or {}
def read(self):
return self.body
class MockHttpClient(atom.http_interface.GenericHttpClient):
def __init__(self, headers=None, recordings=None, real_client=None):
"""An HttpClient which responds to request with stored data.
The request-response pairs are stored as tuples in a member list named
recordings.
The MockHttpClient can be switched from replay mode to record mode by
setting the real_client member to an instance of an HttpClient which will
make real HTTP requests and store the server's response in list of
recordings.
Args:
headers: dict containing HTTP headers which should be included in all
HTTP requests.
recordings: The initial recordings to be used for responses. This list
contains tuples in the form: (MockRequest, MockResponse)
real_client: An HttpClient which will make a real HTTP request. The
response will be converted into a MockResponse and stored in
recordings.
"""
self.recordings = recordings or []
self.real_client = real_client
self.headers = headers or {}
def add_response(self, response, operation, url, data=None, headers=None):
"""Adds a request-response pair to the recordings list.
After the recording is added, future matching requests will receive the
response.
Args:
response: MockResponse
operation: str
url: str
data: str, Currently the data is ignored when looking for matching
requests.
headers: dict of strings: Currently the headers are ignored when
looking for matching requests.
"""
request = MockRequest(operation, url, data=data, headers=headers)
self.recordings.append((request, response))
def request(self, operation, url, data=None, headers=None):
"""Returns a matching MockResponse from the recordings.
If the real_client is set, the request will be passed along and the
server's response will be added to the recordings and also returned.
If there is no match, a NoRecordingFound error will be raised.
"""
if self.real_client is None:
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
for recording in self.recordings:
if recording[0].operation == operation and recording[0].url == url:
return recording[1]
raise NoRecordingFound('No recodings found for %s %s' % (
operation, url))
else:
# There is a real HTTP client, so make the request, and record the
# response.
response = self.real_client.request(operation, url, data=data,
headers=headers)
# TODO: copy the headers
stored_response = MockResponse(body=response, status=response.status,
reason=response.reason)
self.add_response(stored_response, operation, url, data=data,
headers=headers)
return stored_response
| apache-2.0 |
stephen-soltesz/collectd-mlab | site-packages/mlab/disco/route.py | 2 | 2298 | """Implements a minimally compatible form of netifaces.gateways."""
import netifaces
# Path to the routing table file under the /proc filesystem.
_ROUTE_FILENAME = '/proc/net/route'
# The default IPv4 route address as reported by /proc/net/route.
_ROUTE_DEFAULT_ADDR = '00000000'
AF_INET = netifaces.AF_INET
# TODO(soltesz): Remove this module and function when netifaces-0.10 is
# available in a system package.
def gateways(route_lines=None):
"""Parses a routing table, and returns the default IPv4 gateway.
This implementation does not support all features of netifaces.gateways.
Only the 'default' key is present in the returned dict and only the AF_INET
(IPv4) family is supported.
Example:
$ cat /proc/net/route
Iface Destination Gateway Flags RefCnt Use Metric ...
eth0 00000000 01020304 0003 0 0 0 ...
Would return:
{'default': {AF_INET: ('4.3.2.1', 'eth0')}}
Args:
route_lines: None or list of str, lines read from a routing table. If
None, read lines from the system routing table.
Returns:
dict of dict of tuple, the outer dict will always have one key,
'default'. The inner dict will always have one key, AF_INET. The value
is a tuple of str, str with the gateway IPv4 address and device name.
For example: {'default': {AF_INET: ('4.3.2.1', 'eth0')}}
"""
if route_lines is None:
with open(_ROUTE_FILENAME) as route_file:
route_lines = route_file.readlines()
# Skip the header.
for line in route_lines[1:]:
fields = line.split()
if fields[1] == _ROUTE_DEFAULT_ADDR:
gateway_ip = _hex_ip_to_dec_ip(fields[2])
return {'default': {AF_INET: (gateway_ip, fields[0])}}
return {}
def _hex_ip_to_dec_ip(hex_ip):
"""Converts a hexadecimal IPv4 address to quad-dotted form.
Args:
hex_ip: str, zero padded, network order, hexadecimal format IPv4 address.
e.g. "01020A04".
Returns:
str, quad-dotted format IPv4 address, e.g. "4.10.2.1"
"""
fields = [hex_ip[i:i + 2] for i in range(0, len(hex_ip), 2)]
dec_fields = [str(int(v, 16)) for v in fields]
dec_fields.reverse()
return '.'.join(dec_fields)
| apache-2.0 |
Ballz0fSteel/Umeko | lib/aiohttp/_ws_impl.py | 20 | 14844 | """WebSocket protocol versions 13 and 8."""
import base64
import binascii
import collections
import hashlib
import json
import os
import random
import sys
from enum import IntEnum
from struct import Struct
from aiohttp import errors, hdrs
from aiohttp.log import ws_logger
__all__ = ('WebSocketParser', 'WebSocketWriter', 'do_handshake',
'WSMessage', 'WebSocketError', 'WSMsgType', 'WSCloseCode')
class WSCloseCode(IntEnum):
OK = 1000
GOING_AWAY = 1001
PROTOCOL_ERROR = 1002
UNSUPPORTED_DATA = 1003
INVALID_TEXT = 1007
POLICY_VIOLATION = 1008
MESSAGE_TOO_BIG = 1009
MANDATORY_EXTENSION = 1010
INTERNAL_ERROR = 1011
SERVICE_RESTART = 1012
TRY_AGAIN_LATER = 1013
ALLOWED_CLOSE_CODES = {int(i) for i in WSCloseCode}
class WSMsgType(IntEnum):
CONTINUATION = 0x0
TEXT = 0x1
BINARY = 0x2
PING = 0x9
PONG = 0xa
CLOSE = 0x8
CLOSED = 0x101
ERROR = 0x102
text = TEXT
binary = BINARY
ping = PING
pong = PONG
close = CLOSE
closed = CLOSED
error = ERROR
WS_KEY = b'258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
UNPACK_LEN2 = Struct('!H').unpack_from
UNPACK_LEN3 = Struct('!Q').unpack_from
UNPACK_CLOSE_CODE = Struct('!H').unpack
PACK_LEN1 = Struct('!BB').pack
PACK_LEN2 = Struct('!BBH').pack
PACK_LEN3 = Struct('!BBQ').pack
PACK_CLOSE_CODE = Struct('!H').pack
MSG_SIZE = 2 ** 14
_WSMessageBase = collections.namedtuple('_WSMessageBase',
['type', 'data', 'extra'])
class WSMessage(_WSMessageBase):
def json(self, *, loads=json.loads):
"""Return parsed JSON data.
.. versionadded:: 0.22
"""
return loads(self.data)
@property
def tp(self):
return self.type
CLOSED_MESSAGE = WSMessage(WSMsgType.CLOSED, None, None)
class WebSocketError(Exception):
"""WebSocket protocol parser error."""
def __init__(self, code, message):
self.code = code
super().__init__(message)
def WebSocketParser(out, buf):
while True:
fin, opcode, payload = yield from parse_frame(buf)
if opcode == WSMsgType.CLOSE:
if len(payload) >= 2:
close_code = UNPACK_CLOSE_CODE(payload[:2])[0]
if close_code < 3000 and close_code not in ALLOWED_CLOSE_CODES:
raise WebSocketError(
WSCloseCode.PROTOCOL_ERROR,
'Invalid close code: {}'.format(close_code))
try:
close_message = payload[2:].decode('utf-8')
except UnicodeDecodeError as exc:
raise WebSocketError(
WSCloseCode.INVALID_TEXT,
'Invalid UTF-8 text message') from exc
msg = WSMessage(WSMsgType.CLOSE, close_code, close_message)
elif payload:
raise WebSocketError(
WSCloseCode.PROTOCOL_ERROR,
'Invalid close frame: {} {} {!r}'.format(
fin, opcode, payload))
else:
msg = WSMessage(WSMsgType.CLOSE, 0, '')
out.feed_data(msg, 0)
elif opcode == WSMsgType.PING:
out.feed_data(WSMessage(WSMsgType.PING, payload, ''), len(payload))
elif opcode == WSMsgType.PONG:
out.feed_data(WSMessage(WSMsgType.PONG, payload, ''), len(payload))
elif opcode not in (WSMsgType.TEXT, WSMsgType.BINARY):
raise WebSocketError(
WSCloseCode.PROTOCOL_ERROR,
"Unexpected opcode={!r}".format(opcode))
else:
# load text/binary
data = [payload]
while not fin:
fin, _opcode, payload = yield from parse_frame(buf, True)
# We can receive ping/close in the middle of
# text message, Case 5.*
if _opcode == WSMsgType.PING:
out.feed_data(
WSMessage(WSMsgType.PING, payload, ''), len(payload))
fin, _opcode, payload = yield from parse_frame(buf, True)
elif _opcode == WSMsgType.CLOSE:
if len(payload) >= 2:
close_code = UNPACK_CLOSE_CODE(payload[:2])[0]
if (close_code not in ALLOWED_CLOSE_CODES and
close_code < 3000):
raise WebSocketError(
WSCloseCode.PROTOCOL_ERROR,
'Invalid close code: {}'.format(close_code))
try:
close_message = payload[2:].decode('utf-8')
except UnicodeDecodeError as exc:
raise WebSocketError(
WSCloseCode.INVALID_TEXT,
'Invalid UTF-8 text message') from exc
msg = WSMessage(WSMsgType.CLOSE, close_code,
close_message)
elif payload:
raise WebSocketError(
WSCloseCode.PROTOCOL_ERROR,
'Invalid close frame: {} {} {!r}'.format(
fin, opcode, payload))
else:
msg = WSMessage(WSMsgType.CLOSE, 0, '')
out.feed_data(msg, 0)
fin, _opcode, payload = yield from parse_frame(buf, True)
if _opcode != WSMsgType.CONTINUATION:
raise WebSocketError(
WSCloseCode.PROTOCOL_ERROR,
'The opcode in non-fin frame is expected '
'to be zero, got {!r}'.format(_opcode))
else:
data.append(payload)
if opcode == WSMsgType.TEXT:
try:
text = b''.join(data).decode('utf-8')
out.feed_data(WSMessage(WSMsgType.TEXT, text, ''),
len(text))
except UnicodeDecodeError as exc:
raise WebSocketError(
WSCloseCode.INVALID_TEXT,
'Invalid UTF-8 text message') from exc
else:
data = b''.join(data)
out.feed_data(
WSMessage(WSMsgType.BINARY, data, ''), len(data))
native_byteorder = sys.byteorder
def _websocket_mask_python(mask, data):
"""Websocket masking function.
`mask` is a `bytes` object of length 4; `data` is a `bytes` object
of any length. Returns a `bytes` object of the same length as
`data` with the mask applied as specified in section 5.3 of RFC
6455.
This pure-python implementation may be replaced by an optimized
version when available.
"""
assert isinstance(data, bytearray), data
assert len(mask) == 4, mask
datalen = len(data)
if datalen == 0:
# everything work without this, but may be changed later in Python.
return bytearray()
data = int.from_bytes(data, native_byteorder)
mask = int.from_bytes(mask * (datalen // 4) + mask[: datalen % 4],
native_byteorder)
return (data ^ mask).to_bytes(datalen, native_byteorder)
if bool(os.environ.get('AIOHTTP_NO_EXTENSIONS')):
_websocket_mask = _websocket_mask_python
else:
try:
from ._websocket import _websocket_mask_cython
_websocket_mask = _websocket_mask_cython
except ImportError: # pragma: no cover
_websocket_mask = _websocket_mask_python
def parse_frame(buf, continuation=False):
"""Return the next frame from the socket."""
# read header
data = yield from buf.read(2)
first_byte, second_byte = data
fin = (first_byte >> 7) & 1
rsv1 = (first_byte >> 6) & 1
rsv2 = (first_byte >> 5) & 1
rsv3 = (first_byte >> 4) & 1
opcode = first_byte & 0xf
# frame-fin = %x0 ; more frames of this message follow
# / %x1 ; final frame of this message
# frame-rsv1 = %x0 ; 1 bit, MUST be 0 unless negotiated otherwise
# frame-rsv2 = %x0 ; 1 bit, MUST be 0 unless negotiated otherwise
# frame-rsv3 = %x0 ; 1 bit, MUST be 0 unless negotiated otherwise
if rsv1 or rsv2 or rsv3:
raise WebSocketError(
WSCloseCode.PROTOCOL_ERROR,
'Received frame with non-zero reserved bits')
if opcode > 0x7 and fin == 0:
raise WebSocketError(
WSCloseCode.PROTOCOL_ERROR,
'Received fragmented control frame')
if fin == 0 and opcode == WSMsgType.CONTINUATION and not continuation:
raise WebSocketError(
WSCloseCode.PROTOCOL_ERROR,
'Received new fragment frame with non-zero '
'opcode {!r}'.format(opcode))
has_mask = (second_byte >> 7) & 1
length = (second_byte) & 0x7f
# Control frames MUST have a payload length of 125 bytes or less
if opcode > 0x7 and length > 125:
raise WebSocketError(
WSCloseCode.PROTOCOL_ERROR,
"Control frame payload cannot be larger than 125 bytes")
# read payload
if length == 126:
data = yield from buf.read(2)
length = UNPACK_LEN2(data)[0]
elif length > 126:
data = yield from buf.read(8)
length = UNPACK_LEN3(data)[0]
if has_mask:
mask = yield from buf.read(4)
if length:
payload = yield from buf.read(length)
else:
payload = bytearray()
if has_mask:
payload = _websocket_mask(bytes(mask), payload)
return fin, opcode, payload
class WebSocketWriter:
def __init__(self, writer, *, use_mask=False, random=random.Random()):
self.writer = writer
self.use_mask = use_mask
self.randrange = random.randrange
def _send_frame(self, message, opcode):
"""Send a frame over the websocket with message as its payload."""
msg_length = len(message)
use_mask = self.use_mask
if use_mask:
mask_bit = 0x80
else:
mask_bit = 0
if msg_length < 126:
header = PACK_LEN1(0x80 | opcode, msg_length | mask_bit)
elif msg_length < (1 << 16):
header = PACK_LEN2(0x80 | opcode, 126 | mask_bit, msg_length)
else:
header = PACK_LEN3(0x80 | opcode, 127 | mask_bit, msg_length)
if use_mask:
mask = self.randrange(0, 0xffffffff)
mask = mask.to_bytes(4, 'big')
message = _websocket_mask(mask, bytearray(message))
self.writer.write(header + mask + message)
else:
if len(message) > MSG_SIZE:
self.writer.write(header)
self.writer.write(message)
else:
self.writer.write(header + message)
def pong(self, message=b''):
"""Send pong message."""
if isinstance(message, str):
message = message.encode('utf-8')
self._send_frame(message, WSMsgType.PONG)
def ping(self, message=b''):
"""Send ping message."""
if isinstance(message, str):
message = message.encode('utf-8')
self._send_frame(message, WSMsgType.PING)
def send(self, message, binary=False):
"""Send a frame over the websocket with message as its payload."""
if isinstance(message, str):
message = message.encode('utf-8')
if binary:
self._send_frame(message, WSMsgType.BINARY)
else:
self._send_frame(message, WSMsgType.TEXT)
def close(self, code=1000, message=b''):
"""Close the websocket, sending the specified code and message."""
if isinstance(message, str):
message = message.encode('utf-8')
self._send_frame(
PACK_CLOSE_CODE(code) + message, opcode=WSMsgType.CLOSE)
def do_handshake(method, headers, transport, protocols=()):
"""Prepare WebSocket handshake.
It return HTTP response code, response headers, websocket parser,
websocket writer. It does not perform any IO.
`protocols` is a sequence of known protocols. On successful handshake,
the returned response headers contain the first protocol in this list
which the server also knows.
"""
# WebSocket accepts only GET
if method.upper() != hdrs.METH_GET:
raise errors.HttpProcessingError(
code=405, headers=((hdrs.ALLOW, hdrs.METH_GET),))
if 'websocket' != headers.get(hdrs.UPGRADE, '').lower().strip():
raise errors.HttpBadRequest(
message='No WebSocket UPGRADE hdr: {}\n Can '
'"Upgrade" only to "WebSocket".'.format(headers.get(hdrs.UPGRADE)))
if 'upgrade' not in headers.get(hdrs.CONNECTION, '').lower():
raise errors.HttpBadRequest(
message='No CONNECTION upgrade hdr: {}'.format(
headers.get(hdrs.CONNECTION)))
# find common sub-protocol between client and server
protocol = None
if hdrs.SEC_WEBSOCKET_PROTOCOL in headers:
req_protocols = [str(proto.strip()) for proto in
headers[hdrs.SEC_WEBSOCKET_PROTOCOL].split(',')]
for proto in req_protocols:
if proto in protocols:
protocol = proto
break
else:
# No overlap found: Return no protocol as per spec
ws_logger.warning(
'Client protocols %r don’t overlap server-known ones %r',
req_protocols, protocols)
# check supported version
version = headers.get(hdrs.SEC_WEBSOCKET_VERSION, '')
if version not in ('13', '8', '7'):
raise errors.HttpBadRequest(
message='Unsupported version: {}'.format(version),
headers=((hdrs.SEC_WEBSOCKET_VERSION, '13'),))
# check client handshake for validity
key = headers.get(hdrs.SEC_WEBSOCKET_KEY)
try:
if not key or len(base64.b64decode(key)) != 16:
raise errors.HttpBadRequest(
message='Handshake error: {!r}'.format(key))
except binascii.Error:
raise errors.HttpBadRequest(
message='Handshake error: {!r}'.format(key)) from None
response_headers = [
(hdrs.UPGRADE, 'websocket'),
(hdrs.CONNECTION, 'upgrade'),
(hdrs.TRANSFER_ENCODING, 'chunked'),
(hdrs.SEC_WEBSOCKET_ACCEPT, base64.b64encode(
hashlib.sha1(key.encode() + WS_KEY).digest()).decode())]
if protocol:
response_headers.append((hdrs.SEC_WEBSOCKET_PROTOCOL, protocol))
# response code, headers, parser, writer, protocol
return (101,
response_headers,
WebSocketParser,
WebSocketWriter(transport),
protocol)
| gpl-3.0 |
shakamunyi/nova | nova/openstack/common/versionutils.py | 4 | 7305 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helpers for comparing version strings.
"""
import functools
import inspect
import pkg_resources
import six
from nova.openstack.common._i18n import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class deprecated(object):
"""A decorator to mark callables as deprecated.
This decorator logs a deprecation message when the callable it decorates is
used. The message will include the release where the callable was
deprecated, the release where it may be removed and possibly an optional
replacement.
Examples:
1. Specifying the required deprecated release
>>> @deprecated(as_of=deprecated.ICEHOUSE)
... def a(): pass
2. Specifying a replacement:
>>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()')
... def b(): pass
3. Specifying the release where the functionality may be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1)
... def c(): pass
4. Specifying the deprecated functionality will not be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=0)
... def d(): pass
5. Specifying a replacement, deprecated functionality will not be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()', remove_in=0)
... def e(): pass
"""
# NOTE(morganfainberg): Bexar is used for unit test purposes, it is
# expected we maintain a gap between Bexar and Folsom in this list.
BEXAR = 'B'
FOLSOM = 'F'
GRIZZLY = 'G'
HAVANA = 'H'
ICEHOUSE = 'I'
JUNO = 'J'
KILO = 'K'
_RELEASES = {
# NOTE(morganfainberg): Bexar is used for unit test purposes, it is
# expected we maintain a gap between Bexar and Folsom in this list.
'B': 'Bexar',
'F': 'Folsom',
'G': 'Grizzly',
'H': 'Havana',
'I': 'Icehouse',
'J': 'Juno',
'K': 'Kilo',
}
_deprecated_msg_with_alternative = _(
'%(what)s is deprecated as of %(as_of)s in favor of '
'%(in_favor_of)s and may be removed in %(remove_in)s.')
_deprecated_msg_no_alternative = _(
'%(what)s is deprecated as of %(as_of)s and may be '
'removed in %(remove_in)s. It will not be superseded.')
_deprecated_msg_with_alternative_no_removal = _(
'%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s.')
_deprecated_msg_with_no_alternative_no_removal = _(
'%(what)s is deprecated as of %(as_of)s. It will not be superseded.')
def __init__(self, as_of, in_favor_of=None, remove_in=2, what=None):
"""Initialize decorator
:param as_of: the release deprecating the callable. Constants
are define in this class for convenience.
:param in_favor_of: the replacement for the callable (optional)
:param remove_in: an integer specifying how many releases to wait
before removing (default: 2)
:param what: name of the thing being deprecated (default: the
callable's name)
"""
self.as_of = as_of
self.in_favor_of = in_favor_of
self.remove_in = remove_in
self.what = what
def __call__(self, func_or_cls):
if not self.what:
self.what = func_or_cls.__name__ + '()'
msg, details = self._build_message()
if inspect.isfunction(func_or_cls):
@six.wraps(func_or_cls)
def wrapped(*args, **kwargs):
LOG.deprecated(msg, details)
return func_or_cls(*args, **kwargs)
return wrapped
elif inspect.isclass(func_or_cls):
orig_init = func_or_cls.__init__
# TODO(tsufiev): change `functools` module to `six` as
# soon as six 1.7.4 (with fix for passing `assigned`
# argument to underlying `functools.wraps`) is released
# and added to the oslo-incubator requrements
@functools.wraps(orig_init, assigned=('__name__', '__doc__'))
def new_init(self, *args, **kwargs):
LOG.deprecated(msg, details)
orig_init(self, *args, **kwargs)
func_or_cls.__init__ = new_init
return func_or_cls
else:
raise TypeError('deprecated can be used only with functions or '
'classes')
def _get_safe_to_remove_release(self, release):
# TODO(dstanek): this method will have to be reimplemented once
# when we get to the X release because once we get to the Y
# release, what is Y+2?
new_release = chr(ord(release) + self.remove_in)
if new_release in self._RELEASES:
return self._RELEASES[new_release]
else:
return new_release
def _build_message(self):
details = dict(what=self.what,
as_of=self._RELEASES[self.as_of],
remove_in=self._get_safe_to_remove_release(self.as_of))
if self.in_favor_of:
details['in_favor_of'] = self.in_favor_of
if self.remove_in > 0:
msg = self._deprecated_msg_with_alternative
else:
# There are no plans to remove this function, but it is
# now deprecated.
msg = self._deprecated_msg_with_alternative_no_removal
else:
if self.remove_in > 0:
msg = self._deprecated_msg_no_alternative
else:
# There are no plans to remove this function, but it is
# now deprecated.
msg = self._deprecated_msg_with_no_alternative_no_removal
return msg, details
def is_compatible(requested_version, current_version, same_major=True):
"""Determine whether `requested_version` is satisfied by
`current_version`; in other words, `current_version` is >=
`requested_version`.
:param requested_version: version to check for compatibility
:param current_version: version to check against
:param same_major: if True, the major version must be identical between
`requested_version` and `current_version`. This is used when a
major-version difference indicates incompatibility between the two
versions. Since this is the common-case in practice, the default is
True.
:returns: True if compatible, False if not
"""
requested_parts = pkg_resources.parse_version(requested_version)
current_parts = pkg_resources.parse_version(current_version)
if same_major and (requested_parts[0] != current_parts[0]):
return False
return current_parts >= requested_parts
| apache-2.0 |
darkk/ansible | lib/ansible/utils/module_docs_fragments/files.py | 8 | 4027 | # (c) 2014, Matt Martz <matt@sivel.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
path:
description:
- 'path to the file being managed. Aliases: I(dest), I(name)'
required: true
default: []
aliases: ['dest', 'name']
state:
description:
- If C(directory), all immediate subdirectories will be created if they
do not exist, since 1.7 they will be created with the supplied permissions.
If C(file), the file will NOT be created if it does not exist, see the M(copy)
or M(template) module if you want that behavior. If C(link), the symbolic
link will be created or changed. Use C(hard) for hardlinks. If C(absent),
directories will be recursively deleted, and files or symlinks will be unlinked.
If C(touch) (new in 1.4), an empty file will be created if the c(path) does not
exist, while an existing file or directory will receive updated file access and
modification times (similar to the way `touch` works from the command line).
required: false
default: file
choices: [ file, link, directory, hard, touch, absent ]
mode:
required: false
default: null
choices: []
description:
- mode the file or directory should be, such as 0644 as would be fed to I(chmod)
owner:
required: false
default: null
choices: []
description:
- name of the user that should own the file/directory, as would be fed to I(chown)
group:
required: false
default: null
choices: []
description:
- name of the group that should own the file/directory, as would be fed to I(chown)
src:
required: false
default: null
choices: []
description:
- path of the file to link to (applies only to C(state=link)). Will accept absolute,
relative and nonexisting paths. Relative paths are not expanded.
seuser:
required: false
default: null
choices: []
description:
- user part of SELinux file context. Will default to system policy, if
applicable. If set to C(_default), it will use the C(user) portion of the
policy if available
serole:
required: false
default: null
choices: []
description:
- role part of SELinux file context, C(_default) feature works as for I(seuser).
setype:
required: false
default: null
choices: []
description:
- type part of SELinux file context, C(_default) feature works as for I(seuser).
selevel:
required: false
default: "s0"
choices: []
description:
- level part of the SELinux file context. This is the MLS/MCS attribute,
sometimes known as the C(range). C(_default) feature works as for
I(seuser).
recurse:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "1.1"
description:
- recursively set the specified file attributes (applies only to state=directory)
force:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- 'force the creation of the symlinks in two cases: the source file does
not exist (but will appear later); the destination exists and is a file (so, we need to unlink the
"path" file and create symlink to the "src" file in place of it).'
"""
| gpl-3.0 |
XiaodunServerGroup/xiaodun-platform | cms/djangoapps/contentstore/tests/test_transcripts_utils.py | 6 | 18269 | """ Tests for transcripts_utils. """
import unittest
from uuid import uuid4
import copy
import textwrap
from mock import patch, Mock
from pymongo import MongoClient
from django.test.utils import override_settings
from django.conf import settings
from django.utils import translation
from nose.plugins.skip import SkipTest
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.contentstore.content import StaticContent
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.exceptions import NotFoundError
from xmodule.contentstore.django import contentstore, _CONTENTSTORE
from xmodule.video_module import transcripts_utils
from contentstore.tests.modulestore_config import TEST_MODULESTORE
TEST_DATA_CONTENTSTORE = copy.deepcopy(settings.CONTENTSTORE)
TEST_DATA_CONTENTSTORE['DOC_STORE_CONFIG']['db'] = 'test_xcontent_%s' % uuid4().hex
class TestGenerateSubs(unittest.TestCase):
"""Tests for `generate_subs` function."""
def setUp(self):
self.source_subs = {
'start': [100, 200, 240, 390, 1000],
'end': [200, 240, 380, 1000, 1500],
'text': [
'subs #1',
'subs #2',
'subs #3',
'subs #4',
'subs #5'
]
}
def test_generate_subs_increase_speed(self):
subs = transcripts_utils.generate_subs(2, 1, self.source_subs)
self.assertDictEqual(
subs,
{
'start': [200, 400, 480, 780, 2000],
'end': [400, 480, 760, 2000, 3000],
'text': ['subs #1', 'subs #2', 'subs #3', 'subs #4', 'subs #5']
}
)
def test_generate_subs_decrease_speed_1(self):
subs = transcripts_utils.generate_subs(0.5, 1, self.source_subs)
self.assertDictEqual(
subs,
{
'start': [50, 100, 120, 195, 500],
'end': [100, 120, 190, 500, 750],
'text': ['subs #1', 'subs #2', 'subs #3', 'subs #4', 'subs #5']
}
)
def test_generate_subs_decrease_speed_2(self):
"""Test for correct devision during `generate_subs` process."""
subs = transcripts_utils.generate_subs(1, 2, self.source_subs)
self.assertDictEqual(
subs,
{
'start': [50, 100, 120, 195, 500],
'end': [100, 120, 190, 500, 750],
'text': ['subs #1', 'subs #2', 'subs #3', 'subs #4', 'subs #5']
}
)
@override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE, MODULESTORE=TEST_MODULESTORE)
class TestSaveSubsToStore(ModuleStoreTestCase):
"""Tests for `save_subs_to_store` function."""
org = 'MITx'
number = '999'
display_name = 'Test course'
def clear_subs_content(self):
"""Remove, if subtitles content exists."""
try:
content = contentstore().find(self.content_location)
contentstore().delete(content.get_id())
except NotFoundError:
pass
def setUp(self):
self.course = CourseFactory.create(
org=self.org, number=self.number, display_name=self.display_name)
self.subs = {
'start': [100, 200, 240, 390, 1000],
'end': [200, 240, 380, 1000, 1500],
'text': [
'subs #1',
'subs #2',
'subs #3',
'subs #4',
'subs #5'
]
}
self.subs_id = str(uuid4())
filename = 'subs_{0}.srt.sjson'.format(self.subs_id)
self.content_location = StaticContent.compute_location(
self.org, self.number, filename
)
# incorrect subs
self.unjsonable_subs = set([1]) # set can't be serialized
self.unjsonable_subs_id = str(uuid4())
filename_unjsonable = 'subs_{0}.srt.sjson'.format(self.unjsonable_subs_id)
self.content_location_unjsonable = StaticContent.compute_location(
self.org, self.number, filename_unjsonable
)
self.clear_subs_content()
def test_save_subs_to_store(self):
with self.assertRaises(NotFoundError):
contentstore().find(self.content_location)
result_location = transcripts_utils.save_subs_to_store(
self.subs,
self.subs_id,
self.course)
self.assertTrue(contentstore().find(self.content_location))
self.assertEqual(result_location, self.content_location)
def test_save_unjsonable_subs_to_store(self):
"""
Assures that subs, that can't be dumped, can't be found later.
"""
with self.assertRaises(NotFoundError):
contentstore().find(self.content_location_unjsonable)
with self.assertRaises(TypeError):
transcripts_utils.save_subs_to_store(
self.unjsonable_subs,
self.unjsonable_subs_id,
self.course)
with self.assertRaises(NotFoundError):
contentstore().find(self.content_location_unjsonable)
def tearDown(self):
self.clear_subs_content()
MongoClient().drop_database(TEST_DATA_CONTENTSTORE['DOC_STORE_CONFIG']['db'])
_CONTENTSTORE.clear()
@override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE, MODULESTORE=TEST_MODULESTORE)
class TestDownloadYoutubeSubs(ModuleStoreTestCase):
"""Tests for `download_youtube_subs` function."""
org = 'MITx'
number = '999'
display_name = 'Test course'
def clear_subs_content(self, youtube_subs):
"""Remove, if subtitles content exists."""
for subs_id in youtube_subs.values():
filename = 'subs_{0}.srt.sjson'.format(subs_id)
content_location = StaticContent.compute_location(
self.org, self.number, filename
)
try:
content = contentstore().find(content_location)
contentstore().delete(content.get_id())
except NotFoundError:
pass
def setUp(self):
self.course = CourseFactory.create(
org=self.org, number=self.number, display_name=self.display_name)
def tearDown(self):
MongoClient().drop_database(TEST_DATA_CONTENTSTORE['DOC_STORE_CONFIG']['db'])
_CONTENTSTORE.clear()
def test_success_downloading_subs(self):
response = textwrap.dedent("""<?xml version="1.0" encoding="utf-8" ?>
<transcript>
<text start="0" dur="0.27"></text>
<text start="0.27" dur="2.45">Test text 1.</text>
<text start="2.72">Test text 2.</text>
<text start="5.43" dur="1.73">Test text 3.</text>
</transcript>
""")
good_youtube_subs = {
0.5: 'good_id_1',
1.0: 'good_id_2',
2.0: 'good_id_3'
}
self.clear_subs_content(good_youtube_subs)
with patch('xmodule.video_module.transcripts_utils.requests.get') as mock_get:
mock_get.return_value = Mock(status_code=200, text=response, content=response)
# Check transcripts_utils.GetTranscriptsFromYouTubeException not thrown
transcripts_utils.download_youtube_subs(good_youtube_subs, self.course, settings)
mock_get.assert_any_call('http://video.google.com/timedtext', params={'lang': 'en', 'v': 'good_id_1'})
mock_get.assert_any_call('http://video.google.com/timedtext', params={'lang': 'en', 'v': 'good_id_2'})
mock_get.assert_any_call('http://video.google.com/timedtext', params={'lang': 'en', 'v': 'good_id_3'})
# Check assets status after importing subtitles.
for subs_id in good_youtube_subs.values():
filename = 'subs_{0}.srt.sjson'.format(subs_id)
content_location = StaticContent.compute_location(
self.org, self.number, filename
)
self.assertTrue(contentstore().find(content_location))
self.clear_subs_content(good_youtube_subs)
def test_subs_for_html5_vid_with_periods(self):
"""
This is to verify a fix whereby subtitle files uploaded against
a HTML5 video that contains periods in the name causes
incorrect subs name parsing
"""
html5_ids = transcripts_utils.get_html5_ids(['foo.mp4', 'foo.1.bar.mp4', 'foo/bar/baz.1.4.mp4', 'foo'])
self.assertEqual(4, len(html5_ids))
self.assertEqual(html5_ids[0], 'foo')
self.assertEqual(html5_ids[1], 'foo.1.bar')
self.assertEqual(html5_ids[2], 'baz.1.4')
self.assertEqual(html5_ids[3], 'foo')
@patch('xmodule.video_module.transcripts_utils.requests.get')
def test_fail_downloading_subs(self, mock_get):
mock_get.return_value = Mock(status_code=404, text='Error 404')
bad_youtube_subs = {
0.5: 'BAD_YOUTUBE_ID1',
1.0: 'BAD_YOUTUBE_ID2',
2.0: 'BAD_YOUTUBE_ID3'
}
self.clear_subs_content(bad_youtube_subs)
with self.assertRaises(transcripts_utils.GetTranscriptsFromYouTubeException):
transcripts_utils.download_youtube_subs(bad_youtube_subs, self.course, settings)
# Check assets status after importing subtitles.
for subs_id in bad_youtube_subs.values():
filename = 'subs_{0}.srt.sjson'.format(subs_id)
content_location = StaticContent.compute_location(
self.org, self.number, filename
)
with self.assertRaises(NotFoundError):
contentstore().find(content_location)
self.clear_subs_content(bad_youtube_subs)
def test_success_downloading_chinese_transcripts(self):
# Disabled 11/14/13
# This test is flakey because it performs an HTTP request on an external service
# Re-enable when `requests.get` is patched using `mock.patch`
raise SkipTest
good_youtube_subs = {
1.0: 'j_jEn79vS3g', # Chinese, utf-8
}
self.clear_subs_content(good_youtube_subs)
# Check transcripts_utils.GetTranscriptsFromYouTubeException not thrown
transcripts_utils.download_youtube_subs(good_youtube_subs, self.course, settings)
# Check assets status after importing subtitles.
for subs_id in good_youtube_subs.values():
filename = 'subs_{0}.srt.sjson'.format(subs_id)
content_location = StaticContent.compute_location(
self.org, self.number, filename
)
self.assertTrue(contentstore().find(content_location))
self.clear_subs_content(good_youtube_subs)
class TestGenerateSubsFromSource(TestDownloadYoutubeSubs):
"""Tests for `generate_subs_from_source` function."""
def test_success_generating_subs(self):
youtube_subs = {
0.5: 'JMD_ifUUfsU',
1.0: 'hI10vDNYz4M',
2.0: 'AKqURZnYqpk'
}
srt_filedata = textwrap.dedent("""
1
00:00:10,500 --> 00:00:13,000
Elephant's Dream
2
00:00:15,000 --> 00:00:18,000
At the left we can see...
""")
self.clear_subs_content(youtube_subs)
# Check transcripts_utils.TranscriptsGenerationException not thrown
transcripts_utils.generate_subs_from_source(youtube_subs, 'srt', srt_filedata, self.course)
# Check assets status after importing subtitles.
for subs_id in youtube_subs.values():
filename = 'subs_{0}.srt.sjson'.format(subs_id)
content_location = StaticContent.compute_location(
self.org, self.number, filename
)
self.assertTrue(contentstore().find(content_location))
self.clear_subs_content(youtube_subs)
def test_fail_bad_subs_type(self):
youtube_subs = {
0.5: 'JMD_ifUUfsU',
1.0: 'hI10vDNYz4M',
2.0: 'AKqURZnYqpk'
}
srt_filedata = textwrap.dedent("""
1
00:00:10,500 --> 00:00:13,000
Elephant's Dream
2
00:00:15,000 --> 00:00:18,000
At the left we can see...
""")
with self.assertRaises(transcripts_utils.TranscriptsGenerationException) as cm:
transcripts_utils.generate_subs_from_source(youtube_subs, 'BAD_FORMAT', srt_filedata, self.course)
exception_message = cm.exception.message
self.assertEqual(exception_message, "We support only SubRip (*.srt) transcripts format.")
def test_fail_bad_subs_filedata(self):
youtube_subs = {
0.5: 'JMD_ifUUfsU',
1.0: 'hI10vDNYz4M',
2.0: 'AKqURZnYqpk'
}
srt_filedata = """BAD_DATA"""
with self.assertRaises(transcripts_utils.TranscriptsGenerationException) as cm:
transcripts_utils.generate_subs_from_source(youtube_subs, 'srt', srt_filedata, self.course)
exception_message = cm.exception.message
self.assertEqual(exception_message, "Something wrong with SubRip transcripts file during parsing.")
class TestGenerateSrtFromSjson(TestDownloadYoutubeSubs):
"""Tests for `generate_srt_from_sjson` function."""
def test_success_generating_subs(self):
sjson_subs = {
'start': [100, 200, 240, 390, 54000],
'end': [200, 240, 380, 1000, 78400],
'text': [
'subs #1',
'subs #2',
'subs #3',
'subs #4',
'subs #5'
]
}
srt_subs = transcripts_utils.generate_srt_from_sjson(sjson_subs, 1)
self.assertTrue(srt_subs)
expected_subs = [
'00:00:00,100 --> 00:00:00,200\nsubs #1',
'00:00:00,200 --> 00:00:00,240\nsubs #2',
'00:00:00,240 --> 00:00:00,380\nsubs #3',
'00:00:00,390 --> 00:00:01,000\nsubs #4',
'00:00:54,000 --> 00:01:18,400\nsubs #5',
]
for sub in expected_subs:
self.assertIn(sub, srt_subs)
def test_success_generating_subs_speed_up(self):
sjson_subs = {
'start': [100, 200, 240, 390, 54000],
'end': [200, 240, 380, 1000, 78400],
'text': [
'subs #1',
'subs #2',
'subs #3',
'subs #4',
'subs #5'
]
}
srt_subs = transcripts_utils.generate_srt_from_sjson(sjson_subs, 0.5)
self.assertTrue(srt_subs)
expected_subs = [
'00:00:00,050 --> 00:00:00,100\nsubs #1',
'00:00:00,100 --> 00:00:00,120\nsubs #2',
'00:00:00,120 --> 00:00:00,190\nsubs #3',
'00:00:00,195 --> 00:00:00,500\nsubs #4',
'00:00:27,000 --> 00:00:39,200\nsubs #5',
]
for sub in expected_subs:
self.assertIn(sub, srt_subs)
def test_success_generating_subs_speed_down(self):
sjson_subs = {
'start': [100, 200, 240, 390, 54000],
'end': [200, 240, 380, 1000, 78400],
'text': [
'subs #1',
'subs #2',
'subs #3',
'subs #4',
'subs #5'
]
}
srt_subs = transcripts_utils.generate_srt_from_sjson(sjson_subs, 2)
self.assertTrue(srt_subs)
expected_subs = [
'00:00:00,200 --> 00:00:00,400\nsubs #1',
'00:00:00,400 --> 00:00:00,480\nsubs #2',
'00:00:00,480 --> 00:00:00,760\nsubs #3',
'00:00:00,780 --> 00:00:02,000\nsubs #4',
'00:01:48,000 --> 00:02:36,800\nsubs #5',
]
for sub in expected_subs:
self.assertIn(sub, srt_subs)
def test_fail_generating_subs(self):
sjson_subs = {
'start': [100, 200],
'end': [100],
'text': [
'subs #1',
'subs #2'
]
}
srt_subs = transcripts_utils.generate_srt_from_sjson(sjson_subs, 1)
self.assertFalse(srt_subs)
class TestYoutubeTranscripts(unittest.TestCase):
"""
Tests for checking right datastructure returning when using youtube api.
"""
@patch('xmodule.video_module.transcripts_utils.requests.get')
def test_youtube_bad_status_code(self, mock_get):
mock_get.return_value = Mock(status_code=404, text='test')
youtube_id = 'bad_youtube_id'
with self.assertRaises(transcripts_utils.GetTranscriptsFromYouTubeException):
transcripts_utils.get_transcripts_from_youtube(youtube_id, settings, translation)
@patch('xmodule.video_module.transcripts_utils.requests.get')
def test_youtube_empty_text(self, mock_get):
mock_get.return_value = Mock(status_code=200, text='')
youtube_id = 'bad_youtube_id'
with self.assertRaises(transcripts_utils.GetTranscriptsFromYouTubeException):
transcripts_utils.get_transcripts_from_youtube(youtube_id, settings, translation)
def test_youtube_good_result(self):
response = textwrap.dedent("""<?xml version="1.0" encoding="utf-8" ?>
<transcript>
<text start="0" dur="0.27"></text>
<text start="0.27" dur="2.45">Test text 1.</text>
<text start="2.72">Test text 2.</text>
<text start="5.43" dur="1.73">Test text 3.</text>
</transcript>
""")
expected_transcripts = {
'start': [270, 2720, 5430],
'end': [2720, 2720, 7160],
'text': ['Test text 1.', 'Test text 2.', 'Test text 3.']
}
youtube_id = 'good_youtube_id'
with patch('xmodule.video_module.transcripts_utils.requests.get') as mock_get:
mock_get.return_value = Mock(status_code=200, text=response, content=response)
transcripts = transcripts_utils.get_transcripts_from_youtube(youtube_id, settings, translation)
self.assertEqual(transcripts, expected_transcripts)
mock_get.assert_called_with('http://video.google.com/timedtext', params={'lang': 'en', 'v': 'good_youtube_id'})
| agpl-3.0 |
naslanidis/ansible | lib/ansible/modules/cloud/cloudstack/cs_affinitygroup.py | 48 | 7633 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: cs_affinitygroup
short_description: Manages affinity groups on Apache CloudStack based clouds.
description:
- Create and remove affinity groups.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the affinity group.
required: true
affinty_type:
description:
- Type of the affinity group. If not specified, first found affinity type is used.
required: false
default: null
description:
description:
- Description of the affinity group.
required: false
default: null
state:
description:
- State of the affinity group.
required: false
default: 'present'
choices: [ 'present', 'absent' ]
domain:
description:
- Domain the affinity group is related to.
required: false
default: null
account:
description:
- Account the affinity group is related to.
required: false
default: null
project:
description:
- Name of the project the affinity group is related to.
required: false
default: null
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Create a affinity group
- local_action:
module: cs_affinitygroup
name: haproxy
affinty_type: host anti-affinity
# Remove a affinity group
- local_action:
module: cs_affinitygroup
name: haproxy
state: absent
'''
RETURN = '''
---
id:
description: UUID of the affinity group.
returned: success
type: string
sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8
name:
description: Name of affinity group.
returned: success
type: string
sample: app
description:
description: Description of affinity group.
returned: success
type: string
sample: application affinity group
affinity_type:
description: Type of affinity group.
returned: success
type: string
sample: host anti-affinity
project:
description: Name of project the affinity group is related to.
returned: success
type: string
sample: Production
domain:
description: Domain the affinity group is related to.
returned: success
type: string
sample: example domain
account:
description: Account the affinity group is related to.
returned: success
type: string
sample: example account
'''
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackAffinityGroup(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackAffinityGroup, self).__init__(module)
self.returns = {
'type': 'affinity_type',
}
self.affinity_group = None
def get_affinity_group(self):
if not self.affinity_group:
args = {
'projectid': self.get_project(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'name': self.module.params.get('name'),
}
affinity_groups = self.cs.listAffinityGroups(**args)
if affinity_groups:
self.affinity_group = affinity_groups['affinitygroup'][0]
return self.affinity_group
def get_affinity_type(self):
affinity_type = self.module.params.get('affinty_type')
affinity_types = self.cs.listAffinityGroupTypes()
if affinity_types:
if not affinity_type:
return affinity_types['affinityGroupType'][0]['type']
for a in affinity_types['affinityGroupType']:
if a['type'] == affinity_type:
return a['type']
self.module.fail_json(msg="affinity group type '%s' not found" % affinity_type)
def create_affinity_group(self):
affinity_group = self.get_affinity_group()
if not affinity_group:
self.result['changed'] = True
args = {
'name': self.module.params.get('name'),
'type': self.get_affinity_type(),
'description': self.module.params.get('description'),
'projectid': self.get_project(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
}
if not self.module.check_mode:
res = self.cs.createAffinityGroup(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if res and poll_async:
affinity_group = self.poll_job(res, 'affinitygroup')
return affinity_group
def remove_affinity_group(self):
affinity_group = self.get_affinity_group()
if affinity_group:
self.result['changed'] = True
args = {
'name': self.module.params.get('name'),
'projectid': self.get_project(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
}
if not self.module.check_mode:
res = self.cs.deleteAffinityGroup(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if res and poll_async:
self.poll_job(res, 'affinitygroup')
return affinity_group
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
affinty_type=dict(default=None),
description=dict(default=None),
state=dict(choices=['present', 'absent'], default='present'),
domain=dict(default=None),
account=dict(default=None),
project=dict(default=None),
poll_async=dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
try:
acs_ag = AnsibleCloudStackAffinityGroup(module)
state = module.params.get('state')
if state in ['absent']:
affinity_group = acs_ag.remove_affinity_group()
else:
affinity_group = acs_ag.create_affinity_group()
result = acs_ag.get_result(affinity_group)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
cuongnv23/ansible | lib/ansible/modules/network/nxos/nxos_vrrp.py | 23 | 12514 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_vrrp
extends_documentation_fragment: nxos
version_added: "2.1"
short_description: Manages VRRP configuration on NX-OS switches.
description:
- Manages VRRP configuration on NX-OS switches.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- VRRP feature needs to be enabled first on the system.
- SVIs must exist before using this module.
- Interface must be a L3 port before using this module.
- C(state=absent) removes the VRRP group if it exists on the device.
- VRRP cannot be configured on loopback interfaces.
options:
group:
description:
- VRRP group number.
required: true
interface:
description:
- Full name of interface that is being managed for VRRP.
required: true
priority:
description:
- VRRP priority.
required: false
default: null
vip:
description:
- VRRP virtual IP address.
required: false
default: null
authentication:
description:
- Clear text authentication string.
required: false
default: null
admin_state:
description:
- Used to enable or disable the VRRP process.
required: false
choices: ['shutdown', 'no shutdown']
default: no shutdown
version_added: "2.2"
state:
description:
- Specify desired state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: Ensure vrrp group 100 and vip 10.1.100.1 is on vlan10
nxos_vrrp:
interface: vlan10
group: 100
vip: 10.1.100.1
- name: Ensure removal of the vrrp group config
# vip is required to ensure the user knows what they are removing
nxos_vrrp:
interface: vlan10
group: 100
vip: 10.1.100.1
state: absent
- name: Re-config with more params
nxos_vrrp:
interface: vlan10
group: 100
vip: 10.1.100.1
preempt: false
priority: 130
authentication: AUTHKEY
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["interface vlan10", "vrrp 150", "address 10.1.15.1",
"authentication text testing", "no shutdown"]
'''
from ansible.module_utils.nxos import load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
def execute_show_command(command, module):
if 'show run' not in command:
output = 'json'
else:
output = 'text'
commands = [{
'command': command,
'output': output,
}]
return run_commands(module, commands)[0]
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def get_interface_type(interface):
if interface.upper().startswith('ET'):
return 'ethernet'
elif interface.upper().startswith('VL'):
return 'svi'
elif interface.upper().startswith('LO'):
return 'loopback'
elif interface.upper().startswith('MG'):
return 'management'
elif interface.upper().startswith('MA'):
return 'management'
elif interface.upper().startswith('PO'):
return 'portchannel'
else:
return 'unknown'
def is_default(interface, module):
command = 'show run interface {0}'.format(interface)
try:
body = execute_show_command(command, module)
if 'invalid' in body.lower():
return 'DNE'
else:
raw_list = body.split('\n')
if raw_list[-1].startswith('interface'):
return True
else:
return False
except (KeyError):
return 'DNE'
def get_interface_mode(interface, intf_type, module):
command = 'show interface {0}'.format(interface)
interface = {}
mode = 'unknown'
body = execute_show_command(command, module)
interface_table = body['TABLE_interface']['ROW_interface']
name = interface_table.get('interface')
if intf_type in ['ethernet', 'portchannel']:
mode = str(interface_table.get('eth_mode', 'layer3'))
if mode == 'access' or mode == 'trunk':
mode = 'layer2'
elif intf_type == 'svi':
mode = 'layer3'
return mode, name
def get_vrr_status(group, module, interface):
command = 'show run all | section interface.{0}$'.format(interface)
body = execute_show_command(command, module)
vrf_index = None
admin_state = 'shutdown'
if body:
splitted_body = body.splitlines()
for index in range(0, len(splitted_body) - 1):
if splitted_body[index].strip() == 'vrrp {0}'.format(group):
vrf_index = index
vrf_section = splitted_body[vrf_index::]
for line in vrf_section:
if line.strip() == 'no shutdown':
admin_state = 'no shutdown'
break
return admin_state
def get_existing_vrrp(interface, group, module, name):
command = 'show vrrp detail interface {0}'.format(interface)
body = execute_show_command(command, module)
vrrp = {}
vrrp_key = {
'sh_group_id': 'group',
'sh_vip_addr': 'vip',
'sh_priority': 'priority',
'sh_group_preempt': 'preempt',
'sh_auth_text': 'authentication',
'sh_adv_interval': 'interval'
}
try:
vrrp_table = body['TABLE_vrrp_group']
except (AttributeError, IndexError, TypeError):
return {}
if isinstance(vrrp_table, dict):
vrrp_table = [vrrp_table]
for each_vrrp in vrrp_table:
vrrp_row = each_vrrp['ROW_vrrp_group']
parsed_vrrp = apply_key_map(vrrp_key, vrrp_row)
if parsed_vrrp['preempt'] == 'Disable':
parsed_vrrp['preempt'] = False
elif parsed_vrrp['preempt'] == 'Enable':
parsed_vrrp['preempt'] = True
if parsed_vrrp['group'] == group:
parsed_vrrp['admin_state'] = get_vrr_status(group, module, name)
return parsed_vrrp
return vrrp
def get_commands_config_vrrp(delta, group):
commands = []
CMDS = {
'priority': 'priority {0}',
'preempt': 'preempt',
'vip': 'address {0}',
'interval': 'advertisement-interval {0}',
'auth': 'authentication text {0}'
}
vip = delta.get('vip')
priority = delta.get('priority')
preempt = delta.get('preempt')
interval = delta.get('interval')
auth = delta.get('authentication')
admin_state = delta.get('admin_state')
if vip:
commands.append((CMDS.get('vip')).format(vip))
if priority:
commands.append((CMDS.get('priority')).format(priority))
if preempt:
commands.append(CMDS.get('preempt'))
elif preempt is False:
commands.append('no ' + CMDS.get('preempt'))
if interval:
commands.append((CMDS.get('interval')).format(interval))
if auth:
commands.append((CMDS.get('auth')).format(auth))
if admin_state:
commands.append(admin_state)
commands.insert(0, 'vrrp {0}'.format(group))
return commands
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def validate_params(param, module):
value = module.params[param]
if param == 'group':
try:
if (int(value) < 1 or int(value) > 255):
raise ValueError
except ValueError:
module.fail_json(msg="Warning! 'group' must be an integer between"
" 1 and 255", group=value)
elif param == 'priority':
try:
if (int(value) < 1 or int(value) > 254):
raise ValueError
except ValueError:
module.fail_json(msg="Warning! 'priority' must be an integer "
"between 1 and 254", priority=value)
def main():
argument_spec = dict(
group=dict(required=True, type='str'),
interface=dict(required=True),
priority=dict(required=False, type='str'),
preempt=dict(required=False, type='bool'),
vip=dict(required=False, type='str'),
admin_state=dict(required=False, type='str',
choices=['shutdown', 'no shutdown'],
default='no shutdown'),
authentication=dict(required=False, type='str'),
state=dict(choices=['absent', 'present'], required=False, default='present'),
include_defaults=dict(default=False),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
warnings = list()
check_args(module, warnings)
results = {'changed': False, 'commands': [], 'warnings': warnings}
state = module.params['state']
interface = module.params['interface'].lower()
group = module.params['group']
priority = module.params['priority']
preempt = module.params['preempt']
vip = module.params['vip']
authentication = module.params['authentication']
admin_state = module.params['admin_state']
transport = module.params['transport']
if state == 'present' and not vip:
module.fail_json(msg='the "vip" param is required when state=present')
intf_type = get_interface_type(interface)
if (intf_type != 'ethernet' and transport == 'cli'):
if is_default(interface, module) == 'DNE':
module.fail_json(msg='That interface does not exist yet. Create '
'it first.', interface=interface)
if intf_type == 'loopback':
module.fail_json(msg="Loopback interfaces don't support VRRP.",
interface=interface)
mode, name = get_interface_mode(interface, intf_type, module)
if mode == 'layer2':
module.fail_json(msg='That interface is a layer2 port.\nMake it '
'a layer 3 port first.', interface=interface)
args = dict(group=group, priority=priority, preempt=preempt,
vip=vip, authentication=authentication,
admin_state=admin_state)
proposed = dict((k, v) for k, v in args.items() if v is not None)
existing = get_existing_vrrp(interface, group, module, name)
changed = False
end_state = existing
commands = []
if state == 'present':
delta = dict(
set(proposed.items()).difference(existing.items()))
if delta:
command = get_commands_config_vrrp(delta, group)
commands.append(command)
elif state == 'absent':
if existing:
commands.append(['no vrrp {0}'.format(group)])
if commands:
commands.insert(0, ['interface {0}'.format(interface)])
commands = flatten_list(commands)
results['commands'] = commands
results['changed'] = True
if not module.check_mode:
load_config(module, commands)
if 'configure' in commands:
commands.pop(0)
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
spulec/moto | moto/sqs/exceptions.py | 1 | 3660 | from __future__ import unicode_literals
from moto.core.exceptions import RESTError
class MessageNotInflight(Exception):
description = "The message referred to is not in flight."
status_code = 400
class ReceiptHandleIsInvalid(RESTError):
code = 400
def __init__(self):
super(ReceiptHandleIsInvalid, self).__init__(
"ReceiptHandleIsInvalid", "The input receipt handle is invalid."
)
class MessageAttributesInvalid(RESTError):
code = 400
def __init__(self, description):
super(MessageAttributesInvalid, self).__init__(
"MessageAttributesInvalid", description
)
class QueueDoesNotExist(RESTError):
code = 404
def __init__(self):
super(QueueDoesNotExist, self).__init__(
"QueueDoesNotExist",
"The specified queue does not exist for this wsdl version.",
)
class QueueAlreadyExists(RESTError):
code = 400
def __init__(self, message):
super(QueueAlreadyExists, self).__init__("QueueAlreadyExists", message)
class EmptyBatchRequest(RESTError):
code = 400
def __init__(self):
super(EmptyBatchRequest, self).__init__(
"EmptyBatchRequest",
"There should be at least one SendMessageBatchRequestEntry in the request.",
)
class InvalidBatchEntryId(RESTError):
code = 400
def __init__(self):
super(InvalidBatchEntryId, self).__init__(
"InvalidBatchEntryId",
"A batch entry id can only contain alphanumeric characters, "
"hyphens and underscores. It can be at most 80 letters long.",
)
class BatchRequestTooLong(RESTError):
code = 400
def __init__(self, length):
super(BatchRequestTooLong, self).__init__(
"BatchRequestTooLong",
"Batch requests cannot be longer than 262144 bytes. "
"You have sent {} bytes.".format(length),
)
class BatchEntryIdsNotDistinct(RESTError):
code = 400
def __init__(self, entry_id):
super(BatchEntryIdsNotDistinct, self).__init__(
"BatchEntryIdsNotDistinct", "Id {} repeated.".format(entry_id)
)
class TooManyEntriesInBatchRequest(RESTError):
code = 400
def __init__(self, number):
super(TooManyEntriesInBatchRequest, self).__init__(
"TooManyEntriesInBatchRequest",
"Maximum number of entries per request are 10. "
"You have sent {}.".format(number),
)
class InvalidAttributeName(RESTError):
code = 400
def __init__(self, attribute_name):
super(InvalidAttributeName, self).__init__(
"InvalidAttributeName", "Unknown Attribute {}.".format(attribute_name)
)
class InvalidAttributeValue(RESTError):
code = 400
def __init__(self, attribute_name):
super(InvalidAttributeValue, self).__init__(
"InvalidAttributeValue",
"Invalid value for the parameter {}.".format(attribute_name),
)
class InvalidParameterValue(RESTError):
code = 400
def __init__(self, message):
super(InvalidParameterValue, self).__init__("InvalidParameterValue", message)
class MissingParameter(RESTError):
code = 400
def __init__(self, parameter):
super(MissingParameter, self).__init__(
"MissingParameter",
"The request must contain the parameter {}.".format(parameter),
)
class OverLimit(RESTError):
code = 403
def __init__(self, count):
super(OverLimit, self).__init__(
"OverLimit", "{} Actions were found, maximum allowed is 7.".format(count)
)
| apache-2.0 |
x303597316/hue | desktop/core/ext-py/Django-1.6.10/tests/tablespaces/models.py | 150 | 1819 | from django.db import models
# Since the test database doesn't have tablespaces, it's impossible for Django
# to create the tables for models where db_tablespace is set. To avoid this
# problem, we mark the models as unmanaged, and temporarily revert them to
# managed during each test. We also set them to use the same tables as the
# "reference" models to avoid errors when other tests run 'syncdb'
# (proxy_models_inheritance does).
class ScientistRef(models.Model):
name = models.CharField(max_length=50)
class ArticleRef(models.Model):
title = models.CharField(max_length=50, unique=True)
code = models.CharField(max_length=50, unique=True)
authors = models.ManyToManyField(ScientistRef, related_name='articles_written_set')
reviewers = models.ManyToManyField(ScientistRef, related_name='articles_reviewed_set')
class Scientist(models.Model):
name = models.CharField(max_length=50)
class Meta:
db_table = 'tablespaces_scientistref'
db_tablespace = 'tbl_tbsp'
managed = False
class Article(models.Model):
title = models.CharField(max_length=50, unique=True)
code = models.CharField(max_length=50, unique=True, db_tablespace='idx_tbsp')
authors = models.ManyToManyField(Scientist, related_name='articles_written_set')
reviewers = models.ManyToManyField(Scientist, related_name='articles_reviewed_set', db_tablespace='idx_tbsp')
class Meta:
db_table = 'tablespaces_articleref'
db_tablespace = 'tbl_tbsp'
managed = False
# Also set the tables for automatically created models
Authors = Article._meta.get_field('authors').rel.through
Authors._meta.db_table = 'tablespaces_articleref_authors'
Reviewers = Article._meta.get_field('reviewers').rel.through
Reviewers._meta.db_table = 'tablespaces_articleref_reviewers'
| apache-2.0 |
Kniyl/mezzanine | mezzanine/blog/views.py | 1 | 3433 | from __future__ import unicode_literals
from future.builtins import str
from future.builtins import int
from calendar import month_name
from django.contrib.auth import get_user_model
from django.http import Http404
from django.shortcuts import get_object_or_404
from mezzanine.blog.models import BlogPost, BlogCategory
from mezzanine.blog.feeds import PostsRSS, PostsAtom
from mezzanine.conf import settings
from mezzanine.generic.models import Keyword
from mezzanine.utils.views import render, paginate
User = get_user_model()
def blog_post_list(request, tag=None, year=None, month=None, username=None,
category=None, template="blog/blog_post_list.html"):
"""
Display a list of blog posts that are filtered by tag, year, month,
author or category. Custom templates are checked for using the name
``blog/blog_post_list_XXX.html`` where ``XXX`` is either the
category slug or author's username if given.
"""
settings.use_editable()
templates = []
blog_posts = BlogPost.objects.published(for_user=request.user)
if tag is not None:
tag = get_object_or_404(Keyword, slug=tag)
blog_posts = blog_posts.filter(keywords__keyword=tag)
if year is not None:
blog_posts = blog_posts.filter(publish_date__year=year)
if month is not None:
blog_posts = blog_posts.filter(publish_date__month=month)
try:
month = month_name[int(month)]
except IndexError:
raise Http404()
if category is not None:
category = get_object_or_404(BlogCategory, slug=category)
blog_posts = blog_posts.filter(categories=category)
templates.append(u"blog/blog_post_list_%s.html" %
str(category.slug))
author = None
if username is not None:
author = get_object_or_404(User, username=username)
blog_posts = blog_posts.filter(user=author)
templates.append(u"blog/blog_post_list_%s.html" % username)
prefetch = ("categories", "keywords__keyword")
blog_posts = blog_posts.select_related("user").prefetch_related(*prefetch)
blog_posts = paginate(blog_posts, request.GET.get("page", 1),
settings.BLOG_POST_PER_PAGE,
settings.MAX_PAGING_LINKS)
context = {"blog_posts": blog_posts, "year": year, "month": month,
"tag": tag, "category": category, "author": author}
templates.append(template)
return render(request, templates, context)
def blog_post_detail(request, slug, year=None, month=None, day=None,
template="blog/blog_post_detail.html"):
""". Custom templates are checked for using the name
``blog/blog_post_detail_XXX.html`` where ``XXX`` is the blog
posts's slug.
"""
blog_posts = BlogPost.objects.published(
for_user=request.user).select_related()
blog_post = get_object_or_404(blog_posts, slug=slug)
context = {"blog_post": blog_post, "editable_obj": blog_post}
templates = [u"blog/blog_post_detail_%s.html" % str(slug), template]
return render(request, templates, context)
def blog_post_feed(request, format, **kwargs):
"""
Blog posts feeds - maps format to the correct feed view.
"""
try:
return {"rss": PostsRSS, "atom": PostsAtom}[format](**kwargs)(request)
except KeyError:
raise Http404()
| bsd-2-clause |
Emergya/icm-openedx-educamadrid-platform-basic | lms/djangoapps/mailing/management/commands/mailchimp_id.py | 155 | 1629 | """
mailchimp_id: Returns whether or not a given mailchimp key represents
a valid list.
"""
import sys
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from mailsnake import MailSnake
class Command(BaseCommand):
"""
Given a mailchimp key, validates that a list with that key
exists in mailchimp.
"""
args = '<mailchimp_key web_id>'
help = 'Get the list id from a web_id'
option_list = BaseCommand.option_list + (
make_option('--key', action='store', help='mailchimp api key'),
make_option('--webid', action='store', dest='web_id', type=int,
help='mailchimp list web id'),
)
def parse_options(self, options):
"""Parses `options` of the command."""
if not options['key']:
raise CommandError('missing key')
if not options['web_id']:
raise CommandError('missing list web id')
return options['key'], options['web_id']
def handle(self, *args, **options):
"""
Validates that the id passed in exists in mailchimp.
"""
key, web_id = self.parse_options(options)
mailchimp = MailSnake(key)
lists = mailchimp.lists()['data']
by_web_id = {l['web_id']: l for l in lists}
list_with_id = by_web_id.get(web_id, None)
if list_with_id:
print "id: {} for web_id: {}".format(list_with_id['id'], web_id)
print "list name: {}".format(list_with_id['name'])
else:
print "list with web_id: {} not found.".format(web_id)
sys.exit(1)
| agpl-3.0 |
mrabbah/snmpccgx | flask/lib/python2.7/site-packages/click/_textwrap.py | 282 | 1198 | import textwrap
from contextlib import contextmanager
class TextWrapper(textwrap.TextWrapper):
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
space_left = max(width - cur_len, 1)
if self.break_long_words:
last = reversed_chunks[-1]
cut = last[:space_left]
res = last[space_left:]
cur_line.append(cut)
reversed_chunks[-1] = res
elif not cur_line:
cur_line.append(reversed_chunks.pop())
@contextmanager
def extra_indent(self, indent):
old_initial_indent = self.initial_indent
old_subsequent_indent = self.subsequent_indent
self.initial_indent += indent
self.subsequent_indent += indent
try:
yield
finally:
self.initial_indent = old_initial_indent
self.subsequent_indent = old_subsequent_indent
def indent_only(self, text):
rv = []
for idx, line in enumerate(text.splitlines()):
indent = self.initial_indent
if idx > 0:
indent = self.subsequent_indent
rv.append(indent + line)
return '\n'.join(rv)
| gpl-3.0 |
Batterfii/django | django/forms/utils.py | 241 | 6131 | from __future__ import unicode_literals
import json
import sys
from django.conf import settings
from django.core.exceptions import ValidationError # backwards compatibility
from django.utils import six, timezone
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.html import escape, format_html, format_html_join, html_safe
from django.utils.translation import ugettext_lazy as _
try:
from collections import UserList
except ImportError: # Python 2
from UserList import UserList
def pretty_name(name):
"""Converts 'first_name' to 'First name'"""
if not name:
return ''
return name.replace('_', ' ').capitalize()
def flatatt(attrs):
"""
Convert a dictionary of attributes to a single string.
The returned string will contain a leading space followed by key="value",
XML-style pairs. In the case of a boolean value, the key will appear
without a value. It is assumed that the keys do not need to be
XML-escaped. If the passed dictionary is empty, then return an empty
string.
The result is passed through 'mark_safe' (by way of 'format_html_join').
"""
key_value_attrs = []
boolean_attrs = []
for attr, value in attrs.items():
if isinstance(value, bool):
if value:
boolean_attrs.append((attr,))
else:
key_value_attrs.append((attr, value))
return (
format_html_join('', ' {}="{}"', sorted(key_value_attrs)) +
format_html_join('', ' {}', sorted(boolean_attrs))
)
@html_safe
@python_2_unicode_compatible
class ErrorDict(dict):
"""
A collection of errors that knows how to display itself in various formats.
The dictionary keys are the field names, and the values are the errors.
"""
def as_data(self):
return {f: e.as_data() for f, e in self.items()}
def as_json(self, escape_html=False):
return json.dumps({f: e.get_json_data(escape_html) for f, e in self.items()})
def as_ul(self):
if not self:
return ''
return format_html(
'<ul class="errorlist">{}</ul>',
format_html_join('', '<li>{}{}</li>', ((k, force_text(v)) for k, v in self.items()))
)
def as_text(self):
output = []
for field, errors in self.items():
output.append('* %s' % field)
output.append('\n'.join(' * %s' % e for e in errors))
return '\n'.join(output)
def __str__(self):
return self.as_ul()
@html_safe
@python_2_unicode_compatible
class ErrorList(UserList, list):
"""
A collection of errors that knows how to display itself in various formats.
"""
def __init__(self, initlist=None, error_class=None):
super(ErrorList, self).__init__(initlist)
if error_class is None:
self.error_class = 'errorlist'
else:
self.error_class = 'errorlist {}'.format(error_class)
def as_data(self):
return ValidationError(self.data).error_list
def get_json_data(self, escape_html=False):
errors = []
for error in self.as_data():
message = list(error)[0]
errors.append({
'message': escape(message) if escape_html else message,
'code': error.code or '',
})
return errors
def as_json(self, escape_html=False):
return json.dumps(self.get_json_data(escape_html))
def as_ul(self):
if not self.data:
return ''
return format_html(
'<ul class="{}">{}</ul>',
self.error_class,
format_html_join('', '<li>{}</li>', ((force_text(e),) for e in self))
)
def as_text(self):
return '\n'.join('* %s' % e for e in self)
def __str__(self):
return self.as_ul()
def __repr__(self):
return repr(list(self))
def __contains__(self, item):
return item in list(self)
def __eq__(self, other):
return list(self) == other
def __ne__(self, other):
return list(self) != other
def __getitem__(self, i):
error = self.data[i]
if isinstance(error, ValidationError):
return list(error)[0]
return force_text(error)
def __reduce_ex__(self, *args, **kwargs):
# The `list` reduce function returns an iterator as the fourth element
# that is normally used for repopulating. Since we only inherit from
# `list` for `isinstance` backward compatibility (Refs #17413) we
# nullify this iterator as it would otherwise result in duplicate
# entries. (Refs #23594)
info = super(UserList, self).__reduce_ex__(*args, **kwargs)
return info[:3] + (None, None)
# Utilities for time zone support in DateTimeField et al.
def from_current_timezone(value):
"""
When time zone support is enabled, convert naive datetimes
entered in the current time zone to aware datetimes.
"""
if settings.USE_TZ and value is not None and timezone.is_naive(value):
current_timezone = timezone.get_current_timezone()
try:
return timezone.make_aware(value, current_timezone)
except Exception:
message = _(
'%(datetime)s couldn\'t be interpreted '
'in time zone %(current_timezone)s; it '
'may be ambiguous or it may not exist.'
)
params = {'datetime': value, 'current_timezone': current_timezone}
six.reraise(ValidationError, ValidationError(
message,
code='ambiguous_timezone',
params=params,
), sys.exc_info()[2])
return value
def to_current_timezone(value):
"""
When time zone support is enabled, convert aware datetimes
to naive datetimes in the current time zone for display.
"""
if settings.USE_TZ and value is not None and timezone.is_aware(value):
current_timezone = timezone.get_current_timezone()
return timezone.make_naive(value, current_timezone)
return value
| bsd-3-clause |
edry/edx-platform | openedx/core/djangoapps/content/course_overviews/migrations/0005_add_enrollment_fields.py | 57 | 5537 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# The default values for these new columns may not match the actual
# values of courses already present in the table. To ensure that the
# cached values are correct, we must clear the table before adding any
# new columns.
db.clear_table('course_overviews_courseoverview')
# Adding field 'CourseOverview.enrollment_start'
db.add_column('course_overviews_courseoverview', 'enrollment_start',
self.gf('django.db.models.fields.DateTimeField')(null=True),
keep_default=False)
# Adding field 'CourseOverview.enrollment_end'
db.add_column('course_overviews_courseoverview', 'enrollment_end',
self.gf('django.db.models.fields.DateTimeField')(null=True),
keep_default=False)
# Adding field 'CourseOverview.enrollment_domain'
db.add_column('course_overviews_courseoverview', 'enrollment_domain',
self.gf('django.db.models.fields.TextField')(null=True),
keep_default=False)
# Adding field 'CourseOverview.invitation_only'
db.add_column('course_overviews_courseoverview', 'invitation_only',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'CourseOverview.max_student_enrollments_allowed'
db.add_column('course_overviews_courseoverview', 'max_student_enrollments_allowed',
self.gf('django.db.models.fields.IntegerField')(null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CourseOverview.enrollment_start'
db.delete_column('course_overviews_courseoverview', 'enrollment_start')
# Deleting field 'CourseOverview.enrollment_end'
db.delete_column('course_overviews_courseoverview', 'enrollment_end')
# Deleting field 'CourseOverview.enrollment_domain'
db.delete_column('course_overviews_courseoverview', 'enrollment_domain')
# Deleting field 'CourseOverview.invitation_only'
db.delete_column('course_overviews_courseoverview', 'invitation_only')
# Deleting field 'CourseOverview.max_student_enrollments_allowed'
db.delete_column('course_overviews_courseoverview', 'max_student_enrollments_allowed')
models = {
'course_overviews.courseoverview': {
'Meta': {'object_name': 'CourseOverview'},
'_location': ('xmodule_django.models.UsageKeyField', [], {'max_length': '255'}),
'_pre_requisite_courses_json': ('django.db.models.fields.TextField', [], {}),
'advertised_start': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'cert_html_view_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cert_name_long': ('django.db.models.fields.TextField', [], {}),
'cert_name_short': ('django.db.models.fields.TextField', [], {}),
'certificates_display_behavior': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'certificates_show_before_end': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'course_image_url': ('django.db.models.fields.TextField', [], {}),
'days_early_for_beta': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'display_name': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'display_number_with_default': ('django.db.models.fields.TextField', [], {}),
'display_org_with_default': ('django.db.models.fields.TextField', [], {}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'end_of_course_survey_url': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'enrollment_domain': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'enrollment_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'enrollment_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'facebook_url': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'has_any_active_web_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'primary_key': 'True', 'db_index': 'True'}),
'invitation_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'lowest_passing_grade': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2', 'null': 'True'}),
'max_student_enrollments_allowed': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'mobile_available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'social_sharing_url': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'visible_to_staff_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
}
}
complete_apps = ['course_overviews'] | agpl-3.0 |
Orochimarufan/youtube-dl | youtube_dl/extractor/generic.py | 3 | 142659 | # coding: utf-8
from __future__ import unicode_literals
import os
import re
import sys
from .common import InfoExtractor
from .youtube import YoutubeIE
from ..compat import (
compat_etree_fromstring,
compat_str,
compat_urllib_parse_unquote,
compat_urlparse,
compat_xml_parse_error,
)
from ..utils import (
determine_ext,
ExtractorError,
float_or_none,
HEADRequest,
is_html,
js_to_json,
KNOWN_EXTENSIONS,
merge_dicts,
mimetype2ext,
orderedSet,
sanitized_Request,
smuggle_url,
unescapeHTML,
unified_strdate,
unsmuggle_url,
UnsupportedError,
xpath_text,
)
from .commonprotocols import RtmpIE
from .brightcove import (
BrightcoveLegacyIE,
BrightcoveNewIE,
)
from .nexx import (
NexxIE,
NexxEmbedIE,
)
from .nbc import NBCSportsVPlayerIE
from .ooyala import OoyalaIE
from .rutv import RUTVIE
from .tvc import TVCIE
from .sportbox import SportBoxIE
from .smotri import SmotriIE
from .myvi import MyviIE
from .condenast import CondeNastIE
from .udn import UDNEmbedIE
from .senateisvp import SenateISVPIE
from .svt import SVTIE
from .pornhub import PornHubIE
from .xhamster import XHamsterEmbedIE
from .tnaflix import TNAFlixNetworkEmbedIE
from .drtuber import DrTuberIE
from .redtube import RedTubeIE
from .tube8 import Tube8IE
from .mofosex import MofosexEmbedIE
from .spankwire import SpankwireIE
from .youporn import YouPornIE
from .vimeo import VimeoIE
from .dailymotion import DailymotionIE
from .dailymail import DailyMailIE
from .onionstudios import OnionStudiosIE
from .viewlift import ViewLiftEmbedIE
from .mtv import MTVServicesEmbeddedIE
from .pladform import PladformIE
from .videomore import VideomoreIE
from .webcaster import WebcasterFeedIE
from .googledrive import GoogleDriveIE
from .jwplatform import JWPlatformIE
from .digiteka import DigitekaIE
from .arkena import ArkenaIE
from .instagram import InstagramIE
from .liveleak import LiveLeakIE
from .threeqsdn import ThreeQSDNIE
from .theplatform import ThePlatformIE
from .kaltura import KalturaIE
from .eagleplatform import EaglePlatformIE
from .facebook import FacebookIE
from .soundcloud import SoundcloudEmbedIE
from .tunein import TuneInBaseIE
from .vbox7 import Vbox7IE
from .dbtv import DBTVIE
from .piksel import PikselIE
from .videa import VideaIE
from .twentymin import TwentyMinutenIE
from .ustream import UstreamIE
from .videopress import VideoPressIE
from .rutube import RutubeIE
from .limelight import LimelightBaseIE
from .anvato import AnvatoIE
from .washingtonpost import WashingtonPostIE
from .wistia import WistiaIE
from .mediaset import MediasetIE
from .joj import JojIE
from .megaphone import MegaphoneIE
from .vzaar import VzaarIE
from .channel9 import Channel9IE
from .vshare import VShareIE
from .mediasite import MediasiteIE
from .springboardplatform import SpringboardPlatformIE
from .yapfiles import YapFilesIE
from .vice import ViceIE
from .xfileshare import XFileShareIE
from .cloudflarestream import CloudflareStreamIE
from .peertube import PeerTubeIE
from .teachable import TeachableIE
from .indavideo import IndavideoEmbedIE
from .apa import APAIE
from .foxnews import FoxNewsIE
from .viqeo import ViqeoIE
from .expressen import ExpressenIE
from .zype import ZypeIE
from .odnoklassniki import OdnoklassnikiIE
from .kinja import KinjaEmbedIE
class GenericIE(InfoExtractor):
IE_DESC = 'Generic downloader that works on some sites'
_VALID_URL = r'.*'
IE_NAME = 'generic'
_TESTS = [
# Direct link to a video
{
'url': 'http://media.w3.org/2010/05/sintel/trailer.mp4',
'md5': '67d406c2bcb6af27fa886f31aa934bbe',
'info_dict': {
'id': 'trailer',
'ext': 'mp4',
'title': 'trailer',
'upload_date': '20100513',
}
},
# Direct link to media delivered compressed (until Accept-Encoding is *)
{
'url': 'http://calimero.tk/muzik/FictionJunction-Parallel_Hearts.flac',
'md5': '128c42e68b13950268b648275386fc74',
'info_dict': {
'id': 'FictionJunction-Parallel_Hearts',
'ext': 'flac',
'title': 'FictionJunction-Parallel_Hearts',
'upload_date': '20140522',
},
'expected_warnings': [
'URL could be a direct video link, returning it as such.'
],
'skip': 'URL invalid',
},
# Direct download with broken HEAD
{
'url': 'http://ai-radio.org:8000/radio.opus',
'info_dict': {
'id': 'radio',
'ext': 'opus',
'title': 'radio',
},
'params': {
'skip_download': True, # infinite live stream
},
'expected_warnings': [
r'501.*Not Implemented',
r'400.*Bad Request',
],
},
# Direct link with incorrect MIME type
{
'url': 'http://ftp.nluug.nl/video/nluug/2014-11-20_nj14/zaal-2/5_Lennart_Poettering_-_Systemd.webm',
'md5': '4ccbebe5f36706d85221f204d7eb5913',
'info_dict': {
'url': 'http://ftp.nluug.nl/video/nluug/2014-11-20_nj14/zaal-2/5_Lennart_Poettering_-_Systemd.webm',
'id': '5_Lennart_Poettering_-_Systemd',
'ext': 'webm',
'title': '5_Lennart_Poettering_-_Systemd',
'upload_date': '20141120',
},
'expected_warnings': [
'URL could be a direct video link, returning it as such.'
]
},
# RSS feed
{
'url': 'http://phihag.de/2014/youtube-dl/rss2.xml',
'info_dict': {
'id': 'http://phihag.de/2014/youtube-dl/rss2.xml',
'title': 'Zero Punctuation',
'description': 're:.*groundbreaking video review series.*'
},
'playlist_mincount': 11,
},
# RSS feed with enclosure
{
'url': 'http://podcastfeeds.nbcnews.com/audio/podcast/MSNBC-MADDOW-NETCAST-M4V.xml',
'info_dict': {
'id': 'pdv_maddow_netcast_m4v-02-27-2015-201624',
'ext': 'm4v',
'upload_date': '20150228',
'title': 'pdv_maddow_netcast_m4v-02-27-2015-201624',
}
},
# RSS feed with enclosures and unsupported link URLs
{
'url': 'http://www.hellointernet.fm/podcast?format=rss',
'info_dict': {
'id': 'http://www.hellointernet.fm/podcast?format=rss',
'description': 'CGP Grey and Brady Haran talk about YouTube, life, work, whatever.',
'title': 'Hello Internet',
},
'playlist_mincount': 100,
},
# SMIL from http://videolectures.net/promogram_igor_mekjavic_eng
{
'url': 'http://videolectures.net/promogram_igor_mekjavic_eng/video/1/smil.xml',
'info_dict': {
'id': 'smil',
'ext': 'mp4',
'title': 'Automatics, robotics and biocybernetics',
'description': 'md5:815fc1deb6b3a2bff99de2d5325be482',
'upload_date': '20130627',
'formats': 'mincount:16',
'subtitles': 'mincount:1',
},
'params': {
'force_generic_extractor': True,
'skip_download': True,
},
},
# SMIL from http://www1.wdr.de/mediathek/video/livestream/index.html
{
'url': 'http://metafilegenerator.de/WDR/WDR_FS/hds/hds.smil',
'info_dict': {
'id': 'hds',
'ext': 'flv',
'title': 'hds',
'formats': 'mincount:1',
},
'params': {
'skip_download': True,
},
},
# SMIL from https://www.restudy.dk/video/play/id/1637
{
'url': 'https://www.restudy.dk/awsmedia/SmilDirectory/video_1637.xml',
'info_dict': {
'id': 'video_1637',
'ext': 'flv',
'title': 'video_1637',
'formats': 'mincount:3',
},
'params': {
'skip_download': True,
},
},
# SMIL from http://adventure.howstuffworks.com/5266-cool-jobs-iditarod-musher-video.htm
{
'url': 'http://services.media.howstuffworks.com/videos/450221/smil-service.smil',
'info_dict': {
'id': 'smil-service',
'ext': 'flv',
'title': 'smil-service',
'formats': 'mincount:1',
},
'params': {
'skip_download': True,
},
},
# SMIL from http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370
{
'url': 'http://api.new.livestream.com/accounts/1570303/events/1585861/videos/4719370.smil',
'info_dict': {
'id': '4719370',
'ext': 'mp4',
'title': '571de1fd-47bc-48db-abf9-238872a58d1f',
'formats': 'mincount:3',
},
'params': {
'skip_download': True,
},
},
# XSPF playlist from http://www.telegraaf.nl/tv/nieuws/binnenland/24353229/__Tikibad_ontruimd_wegens_brand__.html
{
'url': 'http://www.telegraaf.nl/xml/playlist/2015/8/7/mZlp2ctYIUEB.xspf',
'info_dict': {
'id': 'mZlp2ctYIUEB',
'ext': 'mp4',
'title': 'Tikibad ontruimd wegens brand',
'description': 'md5:05ca046ff47b931f9b04855015e163a4',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 33,
},
'params': {
'skip_download': True,
},
},
# MPD from http://dash-mse-test.appspot.com/media.html
{
'url': 'http://yt-dash-mse-test.commondatastorage.googleapis.com/media/car-20120827-manifest.mpd',
'md5': '4b57baab2e30d6eb3a6a09f0ba57ef53',
'info_dict': {
'id': 'car-20120827-manifest',
'ext': 'mp4',
'title': 'car-20120827-manifest',
'formats': 'mincount:9',
'upload_date': '20130904',
},
'params': {
'format': 'bestvideo',
},
},
# m3u8 served with Content-Type: audio/x-mpegURL; charset=utf-8
{
'url': 'http://once.unicornmedia.com/now/master/playlist/bb0b18ba-64f5-4b1b-a29f-0ac252f06b68/77a785f3-5188-4806-b788-0893a61634ed/93677179-2d99-4ef4-9e17-fe70d49abfbf/content.m3u8',
'info_dict': {
'id': 'content',
'ext': 'mp4',
'title': 'content',
'formats': 'mincount:8',
},
'params': {
# m3u8 downloads
'skip_download': True,
},
'skip': 'video gone',
},
# m3u8 served with Content-Type: text/plain
{
'url': 'http://www.nacentapps.com/m3u8/index.m3u8',
'info_dict': {
'id': 'index',
'ext': 'mp4',
'title': 'index',
'upload_date': '20140720',
'formats': 'mincount:11',
},
'params': {
# m3u8 downloads
'skip_download': True,
},
'skip': 'video gone',
},
# google redirect
{
'url': 'http://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&cad=rja&ved=0CCUQtwIwAA&url=http%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DcmQHVoWB5FY&ei=F-sNU-LLCaXk4QT52ICQBQ&usg=AFQjCNEw4hL29zgOohLXvpJ-Bdh2bils1Q&bvm=bv.61965928,d.bGE',
'info_dict': {
'id': 'cmQHVoWB5FY',
'ext': 'mp4',
'upload_date': '20130224',
'uploader_id': 'TheVerge',
'description': r're:^Chris Ziegler takes a look at the\.*',
'uploader': 'The Verge',
'title': 'First Firefox OS phones side-by-side',
},
'params': {
'skip_download': False,
}
},
{
# redirect in Refresh HTTP header
'url': 'https://www.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DpO8h3EaFRdo&h=TAQHsoToz&enc=AZN16h-b6o4Zq9pZkCCdOLNKMN96BbGMNtcFwHSaazus4JHT_MFYkAA-WARTX2kvsCIdlAIyHZjl6d33ILIJU7Jzwk_K3mcenAXoAzBNoZDI_Q7EXGDJnIhrGkLXo_LJ_pAa2Jzbx17UHMd3jAs--6j2zaeto5w9RTn8T_1kKg3fdC5WPX9Dbb18vzH7YFX0eSJmoa6SP114rvlkw6pkS1-T&s=1',
'info_dict': {
'id': 'pO8h3EaFRdo',
'ext': 'mp4',
'title': 'Tripeo Boiler Room x Dekmantel Festival DJ Set',
'description': 'md5:6294cc1af09c4049e0652b51a2df10d5',
'upload_date': '20150917',
'uploader_id': 'brtvofficial',
'uploader': 'Boiler Room',
},
'params': {
'skip_download': False,
},
},
{
'url': 'http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html',
'md5': '85b90ccc9d73b4acd9138d3af4c27f89',
'info_dict': {
'id': '13601338388002',
'ext': 'mp4',
'uploader': 'www.hodiho.fr',
'title': 'R\u00e9gis plante sa Jeep',
}
},
# bandcamp page with custom domain
{
'add_ie': ['Bandcamp'],
'url': 'http://bronyrock.com/track/the-pony-mash',
'info_dict': {
'id': '3235767654',
'ext': 'mp3',
'title': 'The Pony Mash',
'uploader': 'M_Pallante',
},
'skip': 'There is a limit of 200 free downloads / month for the test song',
},
{
# embedded brightcove video
# it also tests brightcove videos that need to set the 'Referer'
# in the http requests
'add_ie': ['BrightcoveLegacy'],
'url': 'http://www.bfmtv.com/video/bfmbusiness/cours-bourse/cours-bourse-l-analyse-technique-154522/',
'info_dict': {
'id': '2765128793001',
'ext': 'mp4',
'title': 'Le cours de bourse : l’analyse technique',
'description': 'md5:7e9ad046e968cb2d1114004aba466fd9',
'uploader': 'BFM BUSINESS',
},
'params': {
'skip_download': True,
},
},
{
# embedded with itemprop embedURL and video id spelled as `idVideo`
'add_id': ['BrightcoveLegacy'],
'url': 'http://bfmbusiness.bfmtv.com/mediaplayer/chroniques/olivier-delamarche/',
'info_dict': {
'id': '5255628253001',
'ext': 'mp4',
'title': 'md5:37c519b1128915607601e75a87995fc0',
'description': 'md5:37f7f888b434bb8f8cc8dbd4f7a4cf26',
'uploader': 'BFM BUSINESS',
'uploader_id': '876450612001',
'timestamp': 1482255315,
'upload_date': '20161220',
},
'params': {
'skip_download': True,
},
},
{
# https://github.com/ytdl-org/youtube-dl/issues/2253
'url': 'http://bcove.me/i6nfkrc3',
'md5': '0ba9446db037002366bab3b3eb30c88c',
'info_dict': {
'id': '3101154703001',
'ext': 'mp4',
'title': 'Still no power',
'uploader': 'thestar.com',
'description': 'Mississauga resident David Farmer is still out of power as a result of the ice storm a month ago. To keep the house warm, Farmer cuts wood from his property for a wood burning stove downstairs.',
},
'add_ie': ['BrightcoveLegacy'],
'skip': 'video gone',
},
{
'url': 'http://www.championat.com/video/football/v/87/87499.html',
'md5': 'fb973ecf6e4a78a67453647444222983',
'info_dict': {
'id': '3414141473001',
'ext': 'mp4',
'title': 'Видео. Удаление Дзагоева (ЦСКА)',
'description': 'Онлайн-трансляция матча ЦСКА - "Волга"',
'uploader': 'Championat',
},
},
{
# https://github.com/ytdl-org/youtube-dl/issues/3541
'add_ie': ['BrightcoveLegacy'],
'url': 'http://www.kijk.nl/sbs6/leermijvrouwenkennen/videos/jqMiXKAYan2S/aflevering-1',
'info_dict': {
'id': '3866516442001',
'ext': 'mp4',
'title': 'Leer mij vrouwen kennen: Aflevering 1',
'description': 'Leer mij vrouwen kennen: Aflevering 1',
'uploader': 'SBS Broadcasting',
},
'skip': 'Restricted to Netherlands',
'params': {
'skip_download': True, # m3u8 download
},
},
{
# Brightcove video in <iframe>
'url': 'http://www.un.org/chinese/News/story.asp?NewsID=27724',
'md5': '36d74ef5e37c8b4a2ce92880d208b968',
'info_dict': {
'id': '5360463607001',
'ext': 'mp4',
'title': '叙利亚失明儿童在废墟上演唱《心跳》 呼吁获得正常童年生活',
'description': '联合国儿童基金会中东和北非区域大使、作曲家扎德·迪拉尼(Zade Dirani)在3月15日叙利亚冲突爆发7周年纪念日之际发布了为叙利亚谱写的歌曲《心跳》(HEARTBEAT),为受到六年冲突影响的叙利亚儿童发出强烈呐喊,呼吁世界做出共同努力,使叙利亚儿童重新获得享有正常童年生活的权利。',
'uploader': 'United Nations',
'uploader_id': '1362235914001',
'timestamp': 1489593889,
'upload_date': '20170315',
},
'add_ie': ['BrightcoveLegacy'],
},
{
# Brightcove with alternative playerID key
'url': 'http://www.nature.com/nmeth/journal/v9/n7/fig_tab/nmeth.2062_SV1.html',
'info_dict': {
'id': 'nmeth.2062_SV1',
'title': 'Simultaneous multiview imaging of the Drosophila syncytial blastoderm : Quantitative high-speed imaging of entire developing embryos with simultaneous multiview light-sheet microscopy : Nature Methods : Nature Research',
},
'playlist': [{
'info_dict': {
'id': '2228375078001',
'ext': 'mp4',
'title': 'nmeth.2062-sv1',
'description': 'nmeth.2062-sv1',
'timestamp': 1363357591,
'upload_date': '20130315',
'uploader': 'Nature Publishing Group',
'uploader_id': '1964492299001',
},
}],
},
{
# Brightcove with UUID in videoPlayer
'url': 'http://www8.hp.com/cn/zh/home.html',
'info_dict': {
'id': '5255815316001',
'ext': 'mp4',
'title': 'Sprocket Video - China',
'description': 'Sprocket Video - China',
'uploader': 'HP-Video Gallery',
'timestamp': 1482263210,
'upload_date': '20161220',
'uploader_id': '1107601872001',
},
'params': {
'skip_download': True, # m3u8 download
},
'skip': 'video rotates...weekly?',
},
{
# Brightcove:new type [2].
'url': 'http://www.delawaresportszone.com/video-st-thomas-more-earns-first-trip-to-basketball-semis',
'md5': '2b35148fcf48da41c9fb4591650784f3',
'info_dict': {
'id': '5348741021001',
'ext': 'mp4',
'upload_date': '20170306',
'uploader_id': '4191638492001',
'timestamp': 1488769918,
'title': 'VIDEO: St. Thomas More earns first trip to basketball semis',
},
},
{
# Alternative brightcove <video> attributes
'url': 'http://www.programme-tv.net/videos/extraits/81095-guillaume-canet-evoque-les-rumeurs-d-infidelite-de-marion-cotillard-avec-brad-pitt-dans-vivement-dimanche/',
'info_dict': {
'id': '81095-guillaume-canet-evoque-les-rumeurs-d-infidelite-de-marion-cotillard-avec-brad-pitt-dans-vivement-dimanche',
'title': "Guillaume Canet évoque les rumeurs d'infidélité de Marion Cotillard avec Brad Pitt dans Vivement Dimanche, Extraits : toutes les vidéos avec Télé-Loisirs",
},
'playlist': [{
'md5': '732d22ba3d33f2f3fc253c39f8f36523',
'info_dict': {
'id': '5311302538001',
'ext': 'mp4',
'title': "Guillaume Canet évoque les rumeurs d'infidélité de Marion Cotillard avec Brad Pitt dans Vivement Dimanche",
'description': "Guillaume Canet évoque les rumeurs d'infidélité de Marion Cotillard avec Brad Pitt dans Vivement Dimanche (France 2, 5 février 2017)",
'timestamp': 1486321708,
'upload_date': '20170205',
'uploader_id': '800000640001',
},
'only_matching': True,
}],
},
{
# Brightcove with UUID in videoPlayer
'url': 'http://www8.hp.com/cn/zh/home.html',
'info_dict': {
'id': '5255815316001',
'ext': 'mp4',
'title': 'Sprocket Video - China',
'description': 'Sprocket Video - China',
'uploader': 'HP-Video Gallery',
'timestamp': 1482263210,
'upload_date': '20161220',
'uploader_id': '1107601872001',
},
'params': {
'skip_download': True, # m3u8 download
},
},
# ooyala video
{
'url': 'http://www.rollingstone.com/music/videos/norwegian-dj-cashmere-cat-goes-spartan-on-with-me-premiere-20131219',
'md5': '166dd577b433b4d4ebfee10b0824d8ff',
'info_dict': {
'id': 'BwY2RxaTrTkslxOfcan0UCf0YqyvWysJ',
'ext': 'mp4',
'title': '2cc213299525360.mov', # that's what we get
'duration': 238.231,
},
'add_ie': ['Ooyala'],
},
{
# ooyala video embedded with http://player.ooyala.com/iframe.js
'url': 'http://www.macrumors.com/2015/07/24/steve-jobs-the-man-in-the-machine-first-trailer/',
'info_dict': {
'id': 'p0MGJndjoG5SOKqO_hZJuZFPB-Tr5VgB',
'ext': 'mp4',
'title': '"Steve Jobs: Man in the Machine" trailer',
'description': 'The first trailer for the Alex Gibney documentary "Steve Jobs: Man in the Machine."',
'duration': 135.427,
},
'params': {
'skip_download': True,
},
'skip': 'movie expired',
},
# ooyala video embedded with http://player.ooyala.com/static/v4/production/latest/core.min.js
{
'url': 'http://wnep.com/2017/07/22/steampunk-fest-comes-to-honesdale/',
'info_dict': {
'id': 'lwYWYxYzE6V5uJMjNGyKtwwiw9ZJD7t2',
'ext': 'mp4',
'title': 'Steampunk Fest Comes to Honesdale',
'duration': 43.276,
},
'params': {
'skip_download': True,
}
},
# embed.ly video
{
'url': 'http://www.tested.com/science/weird/460206-tested-grinding-coffee-2000-frames-second/',
'info_dict': {
'id': '9ODmcdjQcHQ',
'ext': 'mp4',
'title': 'Tested: Grinding Coffee at 2000 Frames Per Second',
'upload_date': '20140225',
'description': 'md5:06a40fbf30b220468f1e0957c0f558ff',
'uploader': 'Tested',
'uploader_id': 'testedcom',
},
# No need to test YoutubeIE here
'params': {
'skip_download': True,
},
},
# funnyordie embed
{
'url': 'http://www.theguardian.com/world/2014/mar/11/obama-zach-galifianakis-between-two-ferns',
'info_dict': {
'id': '18e820ec3f',
'ext': 'mp4',
'title': 'Between Two Ferns with Zach Galifianakis: President Barack Obama',
'description': 'Episode 18: President Barack Obama sits down with Zach Galifianakis for his most memorable interview yet.',
},
# HEAD requests lead to endless 301, while GET is OK
'expected_warnings': ['301'],
},
# RUTV embed
{
'url': 'http://www.rg.ru/2014/03/15/reg-dfo/anklav-anons.html',
'info_dict': {
'id': '776940',
'ext': 'mp4',
'title': 'Охотское море стало целиком российским',
'description': 'md5:5ed62483b14663e2a95ebbe115eb8f43',
},
'params': {
# m3u8 download
'skip_download': True,
},
},
# TVC embed
{
'url': 'http://sch1298sz.mskobr.ru/dou_edu/karamel_ki/filial_galleries/video/iframe_src_http_tvc_ru_video_iframe_id_55304_isplay_false_acc_video_id_channel_brand_id_11_show_episodes_episode_id_32307_frameb/',
'info_dict': {
'id': '55304',
'ext': 'mp4',
'title': 'Дошкольное воспитание',
},
},
# SportBox embed
{
'url': 'http://www.vestifinance.ru/articles/25753',
'info_dict': {
'id': '25753',
'title': 'Прямые трансляции с Форума-выставки "Госзаказ-2013"',
},
'playlist': [{
'info_dict': {
'id': '370908',
'title': 'Госзаказ. День 3',
'ext': 'mp4',
}
}, {
'info_dict': {
'id': '370905',
'title': 'Госзаказ. День 2',
'ext': 'mp4',
}
}, {
'info_dict': {
'id': '370902',
'title': 'Госзаказ. День 1',
'ext': 'mp4',
}
}],
'params': {
# m3u8 download
'skip_download': True,
},
},
# Myvi.ru embed
{
'url': 'http://www.kinomyvi.tv/news/detail/Pervij-dublirovannij-trejler--Uzhastikov-_nOw1',
'info_dict': {
'id': 'f4dafcad-ff21-423d-89b5-146cfd89fa1e',
'ext': 'mp4',
'title': 'Ужастики, русский трейлер (2015)',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 153,
}
},
# XHamster embed
{
'url': 'http://www.numisc.com/forum/showthread.php?11696-FM15-which-pumiscer-was-this-%28-vid-%29-%28-alfa-as-fuck-srx-%29&s=711f5db534502e22260dec8c5e2d66d8',
'info_dict': {
'id': 'showthread',
'title': '[NSFL] [FM15] which pumiscer was this ( vid ) ( alfa as fuck srx )',
},
'playlist_mincount': 7,
# This forum does not allow <iframe> syntaxes anymore
# Now HTML tags are displayed as-is
'skip': 'No videos on this page',
},
# Embedded TED video
{
'url': 'http://en.support.wordpress.com/videos/ted-talks/',
'md5': '65fdff94098e4a607385a60c5177c638',
'info_dict': {
'id': '1969',
'ext': 'mp4',
'title': 'Hidden miracles of the natural world',
'uploader': 'Louie Schwartzberg',
'description': 'md5:8145d19d320ff3e52f28401f4c4283b9',
}
},
# nowvideo embed hidden behind percent encoding
{
'url': 'http://www.waoanime.tv/the-super-dimension-fortress-macross-episode-1/',
'md5': '2baf4ddd70f697d94b1c18cf796d5107',
'info_dict': {
'id': '06e53103ca9aa',
'ext': 'flv',
'title': 'Macross Episode 001 Watch Macross Episode 001 onl',
'description': 'No description',
},
},
# arte embed
{
'url': 'http://www.tv-replay.fr/redirection/20-03-14/x-enius-arte-10753389.html',
'md5': '7653032cbb25bf6c80d80f217055fa43',
'info_dict': {
'id': '048195-004_PLUS7-F',
'ext': 'flv',
'title': 'X:enius',
'description': 'md5:d5fdf32ef6613cdbfd516ae658abf168',
'upload_date': '20140320',
},
'params': {
'skip_download': 'Requires rtmpdump'
},
'skip': 'video gone',
},
# francetv embed
{
'url': 'http://www.tsprod.com/replay-du-concert-alcaline-de-calogero',
'info_dict': {
'id': 'EV_30231',
'ext': 'mp4',
'title': 'Alcaline, le concert avec Calogero',
'description': 'md5:61f08036dcc8f47e9cfc33aed08ffaff',
'upload_date': '20150226',
'timestamp': 1424989860,
'duration': 5400,
},
'params': {
# m3u8 downloads
'skip_download': True,
},
'expected_warnings': [
'Forbidden'
]
},
# Condé Nast embed
{
'url': 'http://www.wired.com/2014/04/honda-asimo/',
'md5': 'ba0dfe966fa007657bd1443ee672db0f',
'info_dict': {
'id': '53501be369702d3275860000',
'ext': 'mp4',
'title': 'Honda’s New Asimo Robot Is More Human Than Ever',
}
},
# Dailymotion embed
{
'url': 'http://www.spi0n.com/zap-spi0n-com-n216/',
'md5': '441aeeb82eb72c422c7f14ec533999cd',
'info_dict': {
'id': 'k2mm4bCdJ6CQ2i7c8o2',
'ext': 'mp4',
'title': 'Le Zap de Spi0n n°216 - Zapping du Web',
'description': 'md5:faf028e48a461b8b7fad38f1e104b119',
'uploader': 'Spi0n',
'uploader_id': 'xgditw',
'upload_date': '20140425',
'timestamp': 1398441542,
},
'add_ie': ['Dailymotion'],
},
# DailyMail embed
{
'url': 'http://www.bumm.sk/krimi/2017/07/05/biztonsagi-kamera-buktatta-le-az-agg-ferfit-utlegelo-apolot',
'info_dict': {
'id': '1495629',
'ext': 'mp4',
'title': 'Care worker punches elderly dementia patient in head 11 times',
'description': 'md5:3a743dee84e57e48ec68bf67113199a5',
},
'add_ie': ['DailyMail'],
'params': {
'skip_download': True,
},
},
# YouTube embed
{
'url': 'http://www.badzine.de/ansicht/datum/2014/06/09/so-funktioniert-die-neue-englische-badminton-liga.html',
'info_dict': {
'id': 'FXRb4ykk4S0',
'ext': 'mp4',
'title': 'The NBL Auction 2014',
'uploader': 'BADMINTON England',
'uploader_id': 'BADMINTONEvents',
'upload_date': '20140603',
'description': 'md5:9ef128a69f1e262a700ed83edb163a73',
},
'add_ie': ['Youtube'],
'params': {
'skip_download': True,
}
},
# MTVSercices embed
{
'url': 'http://www.vulture.com/2016/06/new-key-peele-sketches-released.html',
'md5': 'ca1aef97695ef2c1d6973256a57e5252',
'info_dict': {
'id': '769f7ec0-0692-4d62-9b45-0d88074bffc1',
'ext': 'mp4',
'title': 'Key and Peele|October 10, 2012|2|203|Liam Neesons - Uncensored',
'description': 'Two valets share their love for movie star Liam Neesons.',
'timestamp': 1349922600,
'upload_date': '20121011',
},
},
# YouTube embed via <data-embed-url="">
{
'url': 'https://play.google.com/store/apps/details?id=com.gameloft.android.ANMP.GloftA8HM',
'info_dict': {
'id': '4vAffPZIT44',
'ext': 'mp4',
'title': 'Asphalt 8: Airborne - Update - Welcome to Dubai!',
'uploader': 'Gameloft',
'uploader_id': 'gameloft',
'upload_date': '20140828',
'description': 'md5:c80da9ed3d83ae6d1876c834de03e1c4',
},
'params': {
'skip_download': True,
}
},
# YouTube <object> embed
{
'url': 'http://www.improbable.com/2017/04/03/untrained-modern-youths-and-ancient-masters-in-selfie-portraits/',
'md5': '516718101ec834f74318df76259fb3cc',
'info_dict': {
'id': 'msN87y-iEx0',
'ext': 'webm',
'title': 'Feynman: Mirrors FUN TO IMAGINE 6',
'upload_date': '20080526',
'description': 'md5:0ffc78ea3f01b2e2c247d5f8d1d3c18d',
'uploader': 'Christopher Sykes',
'uploader_id': 'ChristopherJSykes',
},
'add_ie': ['Youtube'],
},
# Camtasia studio
{
'url': 'http://www.ll.mit.edu/workshops/education/videocourses/antennas/lecture1/video/',
'playlist': [{
'md5': '0c5e352edabf715d762b0ad4e6d9ee67',
'info_dict': {
'id': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final',
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final - video1',
'ext': 'flv',
'duration': 2235.90,
}
}, {
'md5': '10e4bb3aaca9fd630e273ff92d9f3c63',
'info_dict': {
'id': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final_PIP',
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final - pip',
'ext': 'flv',
'duration': 2235.93,
}
}],
'info_dict': {
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final',
}
},
# Flowplayer
{
'url': 'http://www.handjobhub.com/video/busty-blonde-siri-tit-fuck-while-wank-6313.html',
'md5': '9d65602bf31c6e20014319c7d07fba27',
'info_dict': {
'id': '5123ea6d5e5a7',
'ext': 'mp4',
'age_limit': 18,
'uploader': 'www.handjobhub.com',
'title': 'Busty Blonde Siri Tit Fuck While Wank at HandjobHub.com',
}
},
# Multiple brightcove videos
# https://github.com/ytdl-org/youtube-dl/issues/2283
{
'url': 'http://www.newyorker.com/online/blogs/newsdesk/2014/01/always-never-nuclear-command-and-control.html',
'info_dict': {
'id': 'always-never',
'title': 'Always / Never - The New Yorker',
},
'playlist_count': 3,
'params': {
'extract_flat': False,
'skip_download': True,
}
},
# MLB embed
{
'url': 'http://umpire-empire.com/index.php/topic/58125-laz-decides-no-thats-low/',
'md5': '96f09a37e44da40dd083e12d9a683327',
'info_dict': {
'id': '33322633',
'ext': 'mp4',
'title': 'Ump changes call to ball',
'description': 'md5:71c11215384298a172a6dcb4c2e20685',
'duration': 48,
'timestamp': 1401537900,
'upload_date': '20140531',
'thumbnail': r're:^https?://.*\.jpg$',
},
},
# Wistia embed
{
'url': 'http://study.com/academy/lesson/north-american-exploration-failed-colonies-of-spain-france-england.html#lesson',
'md5': '1953f3a698ab51cfc948ed3992a0b7ff',
'info_dict': {
'id': '6e2wtrbdaf',
'ext': 'mov',
'title': 'paywall_north-american-exploration-failed-colonies-of-spain-france-england',
'description': 'a Paywall Videos video from Remilon',
'duration': 644.072,
'uploader': 'study.com',
'timestamp': 1459678540,
'upload_date': '20160403',
'filesize': 24687186,
},
},
{
'url': 'http://thoughtworks.wistia.com/medias/uxjb0lwrcz',
'md5': 'baf49c2baa8a7de5f3fc145a8506dcd4',
'info_dict': {
'id': 'uxjb0lwrcz',
'ext': 'mp4',
'title': 'Conversation about Hexagonal Rails Part 1',
'description': 'a Martin Fowler video from ThoughtWorks',
'duration': 1715.0,
'uploader': 'thoughtworks.wistia.com',
'timestamp': 1401832161,
'upload_date': '20140603',
},
},
# Wistia standard embed (async)
{
'url': 'https://www.getdrip.com/university/brennan-dunn-drip-workshop/',
'info_dict': {
'id': '807fafadvk',
'ext': 'mp4',
'title': 'Drip Brennan Dunn Workshop',
'description': 'a JV Webinars video from getdrip-1',
'duration': 4986.95,
'timestamp': 1463607249,
'upload_date': '20160518',
},
'params': {
'skip_download': True,
}
},
# Soundcloud embed
{
'url': 'http://nakedsecurity.sophos.com/2014/10/29/sscc-171-are-you-sure-that-1234-is-a-bad-password-podcast/',
'info_dict': {
'id': '174391317',
'ext': 'mp3',
'description': 'md5:ff867d6b555488ad3c52572bb33d432c',
'uploader': 'Sophos Security',
'title': 'Chet Chat 171 - Oct 29, 2014',
'upload_date': '20141029',
}
},
# Soundcloud multiple embeds
{
'url': 'http://www.guitarplayer.com/lessons/1014/legato-workout-one-hour-to-more-fluid-performance---tab/52809',
'info_dict': {
'id': '52809',
'title': 'Guitar Essentials: Legato Workout—One-Hour to Fluid Performance | TAB + AUDIO',
},
'playlist_mincount': 7,
},
# TuneIn station embed
{
'url': 'http://radiocnrv.com/promouvoir-radio-cnrv/',
'info_dict': {
'id': '204146',
'ext': 'mp3',
'title': 'CNRV',
'location': 'Paris, France',
'is_live': True,
},
'params': {
# Live stream
'skip_download': True,
},
},
# Livestream embed
{
'url': 'http://www.esa.int/Our_Activities/Space_Science/Rosetta/Philae_comet_touch-down_webcast',
'info_dict': {
'id': '67864563',
'ext': 'flv',
'upload_date': '20141112',
'title': 'Rosetta #CometLanding webcast HL 10',
}
},
# Another Livestream embed, without 'new.' in URL
{
'url': 'https://www.freespeech.org/',
'info_dict': {
'id': '123537347',
'ext': 'mp4',
'title': 're:^FSTV [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
},
'params': {
# Live stream
'skip_download': True,
},
},
# LazyYT
{
'url': 'https://skiplagged.com/',
'info_dict': {
'id': 'skiplagged',
'title': 'Skiplagged: The smart way to find cheap flights',
},
'playlist_mincount': 1,
'add_ie': ['Youtube'],
},
# Cinchcast embed
{
'url': 'http://undergroundwellness.com/podcasts/306-5-steps-to-permanent-gut-healing/',
'info_dict': {
'id': '7141703',
'ext': 'mp3',
'upload_date': '20141126',
'title': 'Jack Tips: 5 Steps to Permanent Gut Healing',
}
},
# Cinerama player
{
'url': 'http://www.abc.net.au/7.30/content/2015/s4164797.htm',
'info_dict': {
'id': '730m_DandD_1901_512k',
'ext': 'mp4',
'uploader': 'www.abc.net.au',
'title': 'Game of Thrones with dice - Dungeons and Dragons fantasy role-playing game gets new life - 19/01/2015',
}
},
# embedded viddler video
{
'url': 'http://deadspin.com/i-cant-stop-watching-john-wall-chop-the-nuggets-with-th-1681801597',
'info_dict': {
'id': '4d03aad9',
'ext': 'mp4',
'uploader': 'deadspin',
'title': 'WALL-TO-GORTAT',
'timestamp': 1422285291,
'upload_date': '20150126',
},
'add_ie': ['Viddler'],
},
# Libsyn embed
{
'url': 'http://thedailyshow.cc.com/podcast/episodetwelve',
'info_dict': {
'id': '3377616',
'ext': 'mp3',
'title': "The Daily Show Podcast without Jon Stewart - Episode 12: Bassem Youssef: Egypt's Jon Stewart",
'description': 'md5:601cb790edd05908957dae8aaa866465',
'upload_date': '20150220',
},
'skip': 'All The Daily Show URLs now redirect to http://www.cc.com/shows/',
},
# jwplayer YouTube
{
'url': 'http://media.nationalarchives.gov.uk/index.php/webinar-using-discovery-national-archives-online-catalogue/',
'info_dict': {
'id': 'Mrj4DVp2zeA',
'ext': 'mp4',
'upload_date': '20150212',
'uploader': 'The National Archives UK',
'description': 'md5:8078af856dca76edc42910b61273dbbf',
'uploader_id': 'NationalArchives08',
'title': 'Webinar: Using Discovery, The National Archives’ online catalogue',
},
},
# jwplayer rtmp
{
'url': 'http://www.suffolk.edu/sjc/live.php',
'info_dict': {
'id': 'live',
'ext': 'flv',
'title': 'Massachusetts Supreme Judicial Court Oral Arguments',
'uploader': 'www.suffolk.edu',
},
'params': {
'skip_download': True,
},
'skip': 'Only has video a few mornings per month, see http://www.suffolk.edu/sjc/',
},
# Complex jwplayer
{
'url': 'http://www.indiedb.com/games/king-machine/videos',
'info_dict': {
'id': 'videos',
'ext': 'mp4',
'title': 'king machine trailer 1',
'description': 'Browse King Machine videos & audio for sweet media. Your eyes will thank you.',
'thumbnail': r're:^https?://.*\.jpg$',
},
},
{
# JWPlayer config passed as variable
'url': 'http://www.txxx.com/videos/3326530/ariele/',
'info_dict': {
'id': '3326530_hq',
'ext': 'mp4',
'title': 'ARIELE | Tube Cup',
'uploader': 'www.txxx.com',
'age_limit': 18,
},
'params': {
'skip_download': True,
}
},
{
# JWPlatform iframe
'url': 'https://www.mediaite.com/tv/dem-senator-claims-gary-cohn-faked-a-bad-connection-during-trump-call-to-get-him-off-the-phone/',
'md5': 'ca00a040364b5b439230e7ebfd02c4e9',
'info_dict': {
'id': 'O0c5JcKT',
'ext': 'mp4',
'upload_date': '20171122',
'timestamp': 1511366290,
'title': 'Dem Senator Claims Gary Cohn Faked a Bad Connection During Trump Call to Get Him Off the Phone',
},
'add_ie': [JWPlatformIE.ie_key()],
},
{
# Video.js embed, multiple formats
'url': 'http://ortcam.com/solidworks-урок-6-настройка-чертежа_33f9b7351.html',
'info_dict': {
'id': 'yygqldloqIk',
'ext': 'mp4',
'title': 'SolidWorks. Урок 6 Настройка чертежа',
'description': 'md5:baf95267792646afdbf030e4d06b2ab3',
'upload_date': '20130314',
'uploader': 'PROстое3D',
'uploader_id': 'PROstoe3D',
},
'params': {
'skip_download': True,
},
},
{
# Video.js embed, single format
'url': 'https://www.vooplayer.com/v3/watch/watch.php?v=NzgwNTg=',
'info_dict': {
'id': 'watch',
'ext': 'mp4',
'title': 'Step 1 - Good Foundation',
'description': 'md5:d1e7ff33a29fc3eb1673d6c270d344f4',
},
'params': {
'skip_download': True,
},
},
# rtl.nl embed
{
'url': 'http://www.rtlnieuws.nl/nieuws/buitenland/aanslagen-kopenhagen',
'playlist_mincount': 5,
'info_dict': {
'id': 'aanslagen-kopenhagen',
'title': 'Aanslagen Kopenhagen',
}
},
# Zapiks embed
{
'url': 'http://www.skipass.com/news/116090-bon-appetit-s5ep3-baqueira-mi-cor.html',
'info_dict': {
'id': '118046',
'ext': 'mp4',
'title': 'EP3S5 - Bon Appétit - Baqueira Mi Corazon !',
}
},
# Kaltura embed (different embed code)
{
'url': 'http://www.premierchristianradio.com/Shows/Saturday/Unbelievable/Conference-Videos/Os-Guinness-Is-It-Fools-Talk-Unbelievable-Conference-2014',
'info_dict': {
'id': '1_a52wc67y',
'ext': 'flv',
'upload_date': '20150127',
'uploader_id': 'PremierMedia',
'timestamp': int,
'title': 'Os Guinness // Is It Fools Talk? // Unbelievable? Conference 2014',
},
},
# Kaltura embed with single quotes
{
'url': 'http://fod.infobase.com/p_ViewPlaylist.aspx?AssignmentID=NUN8ZY',
'info_dict': {
'id': '0_izeg5utt',
'ext': 'mp4',
'title': '35871',
'timestamp': 1355743100,
'upload_date': '20121217',
'uploader_id': 'cplapp@learn360.com',
},
'add_ie': ['Kaltura'],
},
{
# Kaltura embedded via quoted entry_id
'url': 'https://www.oreilly.com/ideas/my-cloud-makes-pretty-pictures',
'info_dict': {
'id': '0_utuok90b',
'ext': 'mp4',
'title': '06_matthew_brender_raj_dutt',
'timestamp': 1466638791,
'upload_date': '20160622',
},
'add_ie': ['Kaltura'],
'expected_warnings': [
'Could not send HEAD request'
],
'params': {
'skip_download': True,
}
},
{
# Kaltura embedded, some fileExt broken (#11480)
'url': 'http://www.cornell.edu/video/nima-arkani-hamed-standard-models-of-particle-physics',
'info_dict': {
'id': '1_sgtvehim',
'ext': 'mp4',
'title': 'Our "Standard Models" of particle physics and cosmology',
'description': 'md5:67ea74807b8c4fea92a6f38d6d323861',
'timestamp': 1321158993,
'upload_date': '20111113',
'uploader_id': 'kps1',
},
'add_ie': ['Kaltura'],
},
{
# Kaltura iframe embed
'url': 'http://www.gsd.harvard.edu/event/i-m-pei-a-centennial-celebration/',
'md5': 'ae5ace8eb09dc1a35d03b579a9c2cc44',
'info_dict': {
'id': '0_f2cfbpwy',
'ext': 'mp4',
'title': 'I. M. Pei: A Centennial Celebration',
'description': 'md5:1db8f40c69edc46ca180ba30c567f37c',
'upload_date': '20170403',
'uploader_id': 'batchUser',
'timestamp': 1491232186,
},
'add_ie': ['Kaltura'],
},
{
# Kaltura iframe embed, more sophisticated
'url': 'http://www.cns.nyu.edu/~eero/math-tools/Videos/lecture-05sep2017.html',
'info_dict': {
'id': '1_9gzouybz',
'ext': 'mp4',
'title': 'lecture-05sep2017',
'description': 'md5:40f347d91fd4ba047e511c5321064b49',
'upload_date': '20170913',
'uploader_id': 'eps2',
'timestamp': 1505340777,
},
'params': {
'skip_download': True,
},
'add_ie': ['Kaltura'],
},
{
# meta twitter:player
'url': 'http://thechive.com/2017/12/08/all-i-want-for-christmas-is-more-twerk/',
'info_dict': {
'id': '0_01b42zps',
'ext': 'mp4',
'title': 'Main Twerk (Video)',
'upload_date': '20171208',
'uploader_id': 'sebastian.salinas@thechive.com',
'timestamp': 1512713057,
},
'params': {
'skip_download': True,
},
'add_ie': ['Kaltura'],
},
# referrer protected EaglePlatform embed
{
'url': 'https://tvrain.ru/lite/teleshow/kak_vse_nachinalos/namin-418921/',
'info_dict': {
'id': '582306',
'ext': 'mp4',
'title': 'Стас Намин: «Мы нарушили девственность Кремля»',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 3382,
'view_count': int,
},
'params': {
'skip_download': True,
},
},
# ClipYou (EaglePlatform) embed (custom URL)
{
'url': 'http://muz-tv.ru/play/7129/',
# Not checking MD5 as sometimes the direct HTTP link results in 404 and HLS is used
'info_dict': {
'id': '12820',
'ext': 'mp4',
'title': "'O Sole Mio",
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 216,
'view_count': int,
},
'params': {
'skip_download': True,
},
'skip': 'This video is unavailable.',
},
# Pladform embed
{
'url': 'http://muz-tv.ru/kinozal/view/7400/',
'info_dict': {
'id': '100183293',
'ext': 'mp4',
'title': 'Тайны перевала Дятлова • 1 серия 2 часть',
'description': 'Документальный сериал-расследование одной из самых жутких тайн ХХ века',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 694,
'age_limit': 0,
},
'skip': 'HTTP Error 404: Not Found',
},
# Playwire embed
{
'url': 'http://www.cinemablend.com/new/First-Joe-Dirt-2-Trailer-Teaser-Stupid-Greatness-70874.html',
'info_dict': {
'id': '3519514',
'ext': 'mp4',
'title': 'Joe Dirt 2 Beautiful Loser Teaser Trailer',
'thumbnail': r're:^https?://.*\.png$',
'duration': 45.115,
},
},
# 5min embed
{
'url': 'http://techcrunch.com/video/facebook-creates-on-this-day-crunch-report/518726732/',
'md5': '4c6f127a30736b59b3e2c19234ee2bf7',
'info_dict': {
'id': '518726732',
'ext': 'mp4',
'title': 'Facebook Creates "On This Day" | Crunch Report',
'description': 'Amazon updates Fire TV line, Tesla\'s Model X spotted in the wild',
'timestamp': 1427237531,
'uploader': 'Crunch Report',
'upload_date': '20150324',
},
'params': {
# m3u8 download
'skip_download': True,
},
},
# Crooks and Liars embed
{
'url': 'http://crooksandliars.com/2015/04/fox-friends-says-protecting-atheists',
'info_dict': {
'id': '8RUoRhRi',
'ext': 'mp4',
'title': "Fox & Friends Says Protecting Atheists From Discrimination Is Anti-Christian!",
'description': 'md5:e1a46ad1650e3a5ec7196d432799127f',
'timestamp': 1428207000,
'upload_date': '20150405',
'uploader': 'Heather',
},
},
# Crooks and Liars external embed
{
'url': 'http://theothermccain.com/2010/02/02/video-proves-that-bill-kristol-has-been-watching-glenn-beck/comment-page-1/',
'info_dict': {
'id': 'MTE3MjUtMzQ2MzA',
'ext': 'mp4',
'title': 'md5:5e3662a81a4014d24c250d76d41a08d5',
'description': 'md5:9b8e9542d6c3c5de42d6451b7d780cec',
'timestamp': 1265032391,
'upload_date': '20100201',
'uploader': 'Heather',
},
},
# NBC Sports vplayer embed
{
'url': 'http://www.riderfans.com/forum/showthread.php?121827-Freeman&s=e98fa1ea6dc08e886b1678d35212494a',
'info_dict': {
'id': 'ln7x1qSThw4k',
'ext': 'flv',
'title': "PFT Live: New leader in the 'new-look' defense",
'description': 'md5:65a19b4bbfb3b0c0c5768bed1dfad74e',
'uploader': 'NBCU-SPORTS',
'upload_date': '20140107',
'timestamp': 1389118457,
},
'skip': 'Invalid Page URL',
},
# NBC News embed
{
'url': 'http://www.vulture.com/2016/06/letterman-couldnt-care-less-about-late-night.html',
'md5': '1aa589c675898ae6d37a17913cf68d66',
'info_dict': {
'id': 'x_dtl_oa_LettermanliftPR_160608',
'ext': 'mp4',
'title': 'David Letterman: A Preview',
'description': 'A preview of Tom Brokaw\'s interview with David Letterman as part of the On Assignment series powered by Dateline. Airs Sunday June 12 at 7/6c.',
'upload_date': '20160609',
'timestamp': 1465431544,
'uploader': 'NBCU-NEWS',
},
},
# UDN embed
{
'url': 'https://video.udn.com/news/300346',
'md5': 'fd2060e988c326991037b9aff9df21a6',
'info_dict': {
'id': '300346',
'ext': 'mp4',
'title': '中一中男師變性 全校師生力挺',
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {
# m3u8 download
'skip_download': True,
},
'expected_warnings': ['Failed to parse JSON Expecting value'],
},
# Brightcove URL in single quotes
{
'url': 'http://www.sportsnet.ca/baseball/mlb/sn-presents-russell-martin-world-citizen/',
'md5': '4ae374f1f8b91c889c4b9203c8c752af',
'info_dict': {
'id': '4255764656001',
'ext': 'mp4',
'title': 'SN Presents: Russell Martin, World Citizen',
'description': 'To understand why he was the Toronto Blue Jays’ top off-season priority is to appreciate his background and upbringing in Montreal, where he first developed his baseball skills. Written and narrated by Stephen Brunt.',
'uploader': 'Rogers Sportsnet',
'uploader_id': '1704050871',
'upload_date': '20150525',
'timestamp': 1432570283,
},
},
# Kinja embed
{
'url': 'http://www.clickhole.com/video/dont-understand-bitcoin-man-will-mumble-explanatio-2537',
'info_dict': {
'id': '106351',
'ext': 'mp4',
'title': 'Don’t Understand Bitcoin? This Man Will Mumble An Explanation At You',
'description': 'Migrated from OnionStudios',
'thumbnail': r're:^https?://.*\.jpe?g$',
'uploader': 'clickhole',
'upload_date': '20150527',
'timestamp': 1432744860,
}
},
# SnagFilms embed
{
'url': 'http://whilewewatch.blogspot.ru/2012/06/whilewewatch-whilewewatch-gripping.html',
'info_dict': {
'id': '74849a00-85a9-11e1-9660-123139220831',
'ext': 'mp4',
'title': '#whilewewatch',
}
},
# AdobeTVVideo embed
{
'url': 'https://helpx.adobe.com/acrobat/how-to/new-experience-acrobat-dc.html?set=acrobat--get-started--essential-beginners',
'md5': '43662b577c018ad707a63766462b1e87',
'info_dict': {
'id': '2456',
'ext': 'mp4',
'title': 'New experience with Acrobat DC',
'description': 'New experience with Acrobat DC',
'duration': 248.667,
},
},
# BrightcoveInPageEmbed embed
{
'url': 'http://www.geekandsundry.com/tabletop-bonus-wils-final-thoughts-on-dread/',
'info_dict': {
'id': '4238694884001',
'ext': 'flv',
'title': 'Tabletop: Dread, Last Thoughts',
'description': 'Tabletop: Dread, Last Thoughts',
'duration': 51690,
},
},
# Brightcove embed, with no valid 'renditions' but valid 'IOSRenditions'
# This video can't be played in browsers if Flash disabled and UA set to iPhone, which is actually a false alarm
{
'url': 'https://dl.dropboxusercontent.com/u/29092637/interview.html',
'info_dict': {
'id': '4785848093001',
'ext': 'mp4',
'title': 'The Cardinal Pell Interview',
'description': 'Sky News Contributor Andrew Bolt interviews George Pell in Rome, following the Cardinal\'s evidence before the Royal Commission into Child Abuse. ',
'uploader': 'GlobeCast Australia - GlobeStream',
'uploader_id': '2733773828001',
'upload_date': '20160304',
'timestamp': 1457083087,
},
'params': {
# m3u8 downloads
'skip_download': True,
},
},
{
# Brightcove embed with whitespace around attribute names
'url': 'http://www.stack.com/video/3167554373001/learn-to-hit-open-three-pointers-with-damian-lillard-s-baseline-drift-drill',
'info_dict': {
'id': '3167554373001',
'ext': 'mp4',
'title': "Learn to Hit Open Three-Pointers With Damian Lillard's Baseline Drift Drill",
'description': 'md5:57bacb0e0f29349de4972bfda3191713',
'uploader_id': '1079349493',
'upload_date': '20140207',
'timestamp': 1391810548,
},
'params': {
'skip_download': True,
},
},
# Another form of arte.tv embed
{
'url': 'http://www.tv-replay.fr/redirection/09-04-16/arte-reportage-arte-11508975.html',
'md5': '850bfe45417ddf221288c88a0cffe2e2',
'info_dict': {
'id': '030273-562_PLUS7-F',
'ext': 'mp4',
'title': 'ARTE Reportage - Nulle part, en France',
'description': 'md5:e3a0e8868ed7303ed509b9e3af2b870d',
'upload_date': '20160409',
},
},
# LiveLeak embed
{
'url': 'http://www.wykop.pl/link/3088787/',
'md5': '7619da8c820e835bef21a1efa2a0fc71',
'info_dict': {
'id': '874_1459135191',
'ext': 'mp4',
'title': 'Man shows poor quality of new apartment building',
'description': 'The wall is like a sand pile.',
'uploader': 'Lake8737',
},
'add_ie': [LiveLeakIE.ie_key()],
},
# Another LiveLeak embed pattern (#13336)
{
'url': 'https://milo.yiannopoulos.net/2017/06/concealed-carry-robbery/',
'info_dict': {
'id': '2eb_1496309988',
'ext': 'mp4',
'title': 'Thief robs place where everyone was armed',
'description': 'md5:694d73ee79e535953cf2488562288eee',
'uploader': 'brazilwtf',
},
'add_ie': [LiveLeakIE.ie_key()],
},
# Duplicated embedded video URLs
{
'url': 'http://www.hudl.com/athlete/2538180/highlights/149298443',
'info_dict': {
'id': '149298443_480_16c25b74_2',
'ext': 'mp4',
'title': 'vs. Blue Orange Spring Game',
'uploader': 'www.hudl.com',
},
},
# twitter:player:stream embed
{
'url': 'http://www.rtl.be/info/video/589263.aspx?CategoryID=288',
'info_dict': {
'id': 'master',
'ext': 'mp4',
'title': 'Une nouvelle espèce de dinosaure découverte en Argentine',
'uploader': 'www.rtl.be',
},
'params': {
# m3u8 downloads
'skip_download': True,
},
},
# twitter:player embed
{
'url': 'http://www.theatlantic.com/video/index/484130/what-do-black-holes-sound-like/',
'md5': 'a3e0df96369831de324f0778e126653c',
'info_dict': {
'id': '4909620399001',
'ext': 'mp4',
'title': 'What Do Black Holes Sound Like?',
'description': 'what do black holes sound like',
'upload_date': '20160524',
'uploader_id': '29913724001',
'timestamp': 1464107587,
'uploader': 'TheAtlantic',
},
'add_ie': ['BrightcoveLegacy'],
},
# Facebook <iframe> embed
{
'url': 'https://www.hostblogger.de/blog/archives/6181-Auto-jagt-Betonmischer.html',
'md5': 'fbcde74f534176ecb015849146dd3aee',
'info_dict': {
'id': '599637780109885',
'ext': 'mp4',
'title': 'Facebook video #599637780109885',
},
},
# Facebook <iframe> embed, plugin video
{
'url': 'http://5pillarsuk.com/2017/06/07/tariq-ramadan-disagrees-with-pr-exercise-by-imams-refusing-funeral-prayers-for-london-attackers/',
'info_dict': {
'id': '1754168231264132',
'ext': 'mp4',
'title': 'About the Imams and Religious leaders refusing to perform funeral prayers for...',
'uploader': 'Tariq Ramadan (official)',
'timestamp': 1496758379,
'upload_date': '20170606',
},
'params': {
'skip_download': True,
},
},
# Facebook API embed
{
'url': 'http://www.lothype.com/blue-stars-2016-preview-standstill-full-show/',
'md5': 'a47372ee61b39a7b90287094d447d94e',
'info_dict': {
'id': '10153467542406923',
'ext': 'mp4',
'title': 'Facebook video #10153467542406923',
},
},
# Wordpress "YouTube Video Importer" plugin
{
'url': 'http://www.lothype.com/blue-devils-drumline-stanford-lot-2016/',
'md5': 'd16797741b560b485194eddda8121b48',
'info_dict': {
'id': 'HNTXWDXV9Is',
'ext': 'mp4',
'title': 'Blue Devils Drumline Stanford lot 2016',
'upload_date': '20160627',
'uploader_id': 'GENOCIDE8GENERAL10',
'uploader': 'cylus cyrus',
},
},
{
# video stored on custom kaltura server
'url': 'http://www.expansion.com/multimedia/videos.html?media=EQcM30NHIPv',
'md5': '537617d06e64dfed891fa1593c4b30cc',
'info_dict': {
'id': '0_1iotm5bh',
'ext': 'mp4',
'title': 'Elecciones británicas: 5 lecciones para Rajoy',
'description': 'md5:435a89d68b9760b92ce67ed227055f16',
'uploader_id': 'videos.expansion@el-mundo.net',
'upload_date': '20150429',
'timestamp': 1430303472,
},
'add_ie': ['Kaltura'],
},
{
# multiple kaltura embeds, nsfw
'url': 'https://www.quartier-rouge.be/prive/femmes/kamila-avec-video-jaime-sadomie.html',
'info_dict': {
'id': 'kamila-avec-video-jaime-sadomie',
'title': "Kamila avec vídeo “J'aime sadomie”",
},
'playlist_count': 8,
},
{
# Non-standard Vimeo embed
'url': 'https://openclassrooms.com/courses/understanding-the-web',
'md5': '64d86f1c7d369afd9a78b38cbb88d80a',
'info_dict': {
'id': '148867247',
'ext': 'mp4',
'title': 'Understanding the web - Teaser',
'description': 'This is "Understanding the web - Teaser" by openclassrooms on Vimeo, the home for high quality videos and the people who love them.',
'upload_date': '20151214',
'uploader': 'OpenClassrooms',
'uploader_id': 'openclassrooms',
},
'add_ie': ['Vimeo'],
},
{
# generic vimeo embed that requires original URL passed as Referer
'url': 'http://racing4everyone.eu/2016/07/30/formula-1-2016-round12-germany/',
'only_matching': True,
},
{
'url': 'https://support.arkena.com/display/PLAY/Ways+to+embed+your+video',
'md5': 'b96f2f71b359a8ecd05ce4e1daa72365',
'info_dict': {
'id': 'b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe',
'ext': 'mp4',
'title': 'Big Buck Bunny',
'description': 'Royalty free test video',
'timestamp': 1432816365,
'upload_date': '20150528',
'is_live': False,
},
'params': {
'skip_download': True,
},
'add_ie': [ArkenaIE.ie_key()],
},
{
'url': 'http://nova.bg/news/view/2016/08/16/156543/%D0%BD%D0%B0-%D0%BA%D0%BE%D1%81%D1%8A%D0%BC-%D0%BE%D1%82-%D0%B2%D0%B7%D1%80%D0%B8%D0%B2-%D0%BE%D1%82%D1%86%D0%B5%D0%BF%D0%B8%D1%85%D0%B0-%D1%86%D1%8F%D0%BB-%D0%BA%D0%B2%D0%B0%D1%80%D1%82%D0%B0%D0%BB-%D0%B7%D0%B0%D1%80%D0%B0%D0%B4%D0%B8-%D0%B8%D0%B7%D1%82%D0%B8%D1%87%D0%B0%D0%BD%D0%B5-%D0%BD%D0%B0-%D0%B3%D0%B0%D0%B7-%D0%B2-%D0%BF%D0%BB%D0%BE%D0%B2%D0%B4%D0%B8%D0%B2/',
'info_dict': {
'id': '1c7141f46c',
'ext': 'mp4',
'title': 'НА КОСЪМ ОТ ВЗРИВ: Изтичане на газ на бензиностанция в Пловдив',
},
'params': {
'skip_download': True,
},
'add_ie': [Vbox7IE.ie_key()],
},
{
# DBTV embeds
'url': 'http://www.dagbladet.no/2016/02/23/nyheter/nordlys/ski/troms/ver/43254897/',
'info_dict': {
'id': '43254897',
'title': 'Etter ett års planlegging, klaffet endelig alt: - Jeg måtte ta en liten dans',
},
'playlist_mincount': 3,
},
{
# Videa embeds
'url': 'http://forum.dvdtalk.com/movie-talk/623756-deleted-magic-star-wars-ot-deleted-alt-scenes-docu-style.html',
'info_dict': {
'id': '623756-deleted-magic-star-wars-ot-deleted-alt-scenes-docu-style',
'title': 'Deleted Magic - Star Wars: OT Deleted / Alt. Scenes Docu. Style - DVD Talk Forum',
},
'playlist_mincount': 2,
},
{
# 20 minuten embed
'url': 'http://www.20min.ch/schweiz/news/story/So-kommen-Sie-bei-Eis-und-Schnee-sicher-an-27032552',
'info_dict': {
'id': '523629',
'ext': 'mp4',
'title': 'So kommen Sie bei Eis und Schnee sicher an',
'description': 'md5:117c212f64b25e3d95747e5276863f7d',
},
'params': {
'skip_download': True,
},
'add_ie': [TwentyMinutenIE.ie_key()],
},
{
# VideoPress embed
'url': 'https://en.support.wordpress.com/videopress/',
'info_dict': {
'id': 'OcobLTqC',
'ext': 'm4v',
'title': 'IMG_5786',
'timestamp': 1435711927,
'upload_date': '20150701',
},
'params': {
'skip_download': True,
},
'add_ie': [VideoPressIE.ie_key()],
},
{
# Rutube embed
'url': 'http://magazzino.friday.ru/videos/vipuski/kazan-2',
'info_dict': {
'id': '9b3d5bee0a8740bf70dfd29d3ea43541',
'ext': 'flv',
'title': 'Магаззино: Казань 2',
'description': 'md5:99bccdfac2269f0e8fdbc4bbc9db184a',
'uploader': 'Магаззино',
'upload_date': '20170228',
'uploader_id': '996642',
},
'params': {
'skip_download': True,
},
'add_ie': [RutubeIE.ie_key()],
},
{
# ThePlatform embedded with whitespaces in URLs
'url': 'http://www.golfchannel.com/topics/shows/golftalkcentral.htm',
'only_matching': True,
},
{
# Senate ISVP iframe https
'url': 'https://www.hsgac.senate.gov/hearings/canadas-fast-track-refugee-plan-unanswered-questions-and-implications-for-us-national-security',
'md5': 'fb8c70b0b515e5037981a2492099aab8',
'info_dict': {
'id': 'govtaff020316',
'ext': 'mp4',
'title': 'Integrated Senate Video Player',
},
'add_ie': [SenateISVPIE.ie_key()],
},
{
# Limelight embeds (1 channel embed + 4 media embeds)
'url': 'http://www.sedona.com/FacilitatorTraining2017',
'info_dict': {
'id': 'FacilitatorTraining2017',
'title': 'Facilitator Training 2017',
},
'playlist_mincount': 5,
},
{
# Limelight embed (LimelightPlayerUtil.embed)
'url': 'https://tv5.ca/videos?v=xuu8qowr291ri',
'info_dict': {
'id': '95d035dc5c8a401588e9c0e6bd1e9c92',
'ext': 'mp4',
'title': '07448641',
'timestamp': 1499890639,
'upload_date': '20170712',
},
'params': {
'skip_download': True,
},
'add_ie': ['LimelightMedia'],
},
{
'url': 'http://kron4.com/2017/04/28/standoff-with-walnut-creek-murder-suspect-ends-with-arrest/',
'info_dict': {
'id': 'standoff-with-walnut-creek-murder-suspect-ends-with-arrest',
'title': 'Standoff with Walnut Creek murder suspect ends',
'description': 'md5:3ccc48a60fc9441eeccfc9c469ebf788',
},
'playlist_mincount': 4,
},
{
# WashingtonPost embed
'url': 'http://www.vanityfair.com/hollywood/2017/04/donald-trump-tv-pitches',
'info_dict': {
'id': '8caf6e88-d0ec-11e5-90d3-34c2c42653ac',
'ext': 'mp4',
'title': "No one has seen the drama series based on Trump's life \u2014 until now",
'description': 'Donald Trump wanted a weekly TV drama based on his life. It never aired. But The Washington Post recently obtained a scene from the pilot script — and enlisted actors.',
'timestamp': 1455216756,
'uploader': 'The Washington Post',
'upload_date': '20160211',
},
'add_ie': [WashingtonPostIE.ie_key()],
},
{
# Mediaset embed
'url': 'http://www.tgcom24.mediaset.it/politica/serracchiani-voglio-vivere-in-una-societa-aperta-reazioni-sproporzionate-_3071354-201702a.shtml',
'info_dict': {
'id': '720642',
'ext': 'mp4',
'title': 'Serracchiani: "Voglio vivere in una società aperta, con tutela del patto di fiducia"',
},
'params': {
'skip_download': True,
},
'add_ie': [MediasetIE.ie_key()],
},
{
# JOJ.sk embeds
'url': 'https://www.noviny.sk/slovensko/238543-slovenskom-sa-prehnala-vlna-silnych-burok',
'info_dict': {
'id': '238543-slovenskom-sa-prehnala-vlna-silnych-burok',
'title': 'Slovenskom sa prehnala vlna silných búrok',
},
'playlist_mincount': 5,
'add_ie': [JojIE.ie_key()],
},
{
# AMP embed (see https://www.ampproject.org/docs/reference/components/amp-video)
'url': 'https://tvrain.ru/amp/418921/',
'md5': 'cc00413936695987e8de148b67d14f1d',
'info_dict': {
'id': '418921',
'ext': 'mp4',
'title': 'Стас Намин: «Мы нарушили девственность Кремля»',
},
},
{
# vzaar embed
'url': 'http://help.vzaar.com/article/165-embedding-video',
'md5': '7e3919d9d2620b89e3e00bec7fe8c9d4',
'info_dict': {
'id': '8707641',
'ext': 'mp4',
'title': 'Building A Business Online: Principal Chairs Q & A',
},
},
{
# multiple HTML5 videos on one page
'url': 'https://www.paragon-software.com/home/rk-free/keyscenarios.html',
'info_dict': {
'id': 'keyscenarios',
'title': 'Rescue Kit 14 Free Edition - Getting started',
},
'playlist_count': 4,
},
{
# vshare embed
'url': 'https://youtube-dl-demo.neocities.org/vshare.html',
'md5': '17b39f55b5497ae8b59f5fbce8e35886',
'info_dict': {
'id': '0f64ce6',
'title': 'vl14062007715967',
'ext': 'mp4',
}
},
{
'url': 'http://www.heidelberg-laureate-forum.org/blog/video/lecture-friday-september-23-2016-sir-c-antony-r-hoare/',
'md5': 'aecd089f55b1cb5a59032cb049d3a356',
'info_dict': {
'id': '90227f51a80c4d8f86c345a7fa62bd9a1d',
'ext': 'mp4',
'title': 'Lecture: Friday, September 23, 2016 - Sir Tony Hoare',
'description': 'md5:5a51db84a62def7b7054df2ade403c6c',
'timestamp': 1474354800,
'upload_date': '20160920',
}
},
{
'url': 'http://www.kidzworld.com/article/30935-trolls-the-beat-goes-on-interview-skylar-astin-and-amanda-leighton',
'info_dict': {
'id': '1731611',
'ext': 'mp4',
'title': 'Official Trailer | TROLLS: THE BEAT GOES ON!',
'description': 'md5:eb5f23826a027ba95277d105f248b825',
'timestamp': 1516100691,
'upload_date': '20180116',
},
'params': {
'skip_download': True,
},
'add_ie': [SpringboardPlatformIE.ie_key()],
},
{
'url': 'https://www.youtube.com/shared?ci=1nEzmT-M4fU',
'info_dict': {
'id': 'uPDB5I9wfp8',
'ext': 'webm',
'title': 'Pocoyo: 90 minutos de episódios completos Português para crianças - PARTE 3',
'description': 'md5:d9e4d9346a2dfff4c7dc4c8cec0f546d',
'upload_date': '20160219',
'uploader': 'Pocoyo - Português (BR)',
'uploader_id': 'PocoyoBrazil',
},
'add_ie': [YoutubeIE.ie_key()],
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.yapfiles.ru/show/1872528/690b05d3054d2dbe1e69523aa21bb3b1.mp4.html',
'info_dict': {
'id': 'vMDE4NzI1Mjgt690b',
'ext': 'mp4',
'title': 'Котята',
},
'add_ie': [YapFilesIE.ie_key()],
'params': {
'skip_download': True,
},
},
{
# CloudflareStream embed
'url': 'https://www.cloudflare.com/products/cloudflare-stream/',
'info_dict': {
'id': '31c9291ab41fac05471db4e73aa11717',
'ext': 'mp4',
'title': '31c9291ab41fac05471db4e73aa11717',
},
'add_ie': [CloudflareStreamIE.ie_key()],
'params': {
'skip_download': True,
},
},
{
# PeerTube embed
'url': 'https://joinpeertube.org/fr/home/',
'info_dict': {
'id': 'home',
'title': 'Reprenez le contrôle de vos vidéos ! #JoinPeertube',
},
'playlist_count': 2,
},
{
# Indavideo embed
'url': 'https://streetkitchen.hu/receptek/igy_kell_otthon_hamburgert_sutni/',
'info_dict': {
'id': '1693903',
'ext': 'mp4',
'title': 'Így kell otthon hamburgert sütni',
'description': 'md5:f5a730ecf900a5c852e1e00540bbb0f7',
'timestamp': 1426330212,
'upload_date': '20150314',
'uploader': 'StreetKitchen',
'uploader_id': '546363',
},
'add_ie': [IndavideoEmbedIE.ie_key()],
'params': {
'skip_download': True,
},
},
{
# APA embed via JWPlatform embed
'url': 'http://www.vol.at/blue-man-group/5593454',
'info_dict': {
'id': 'jjv85FdZ',
'ext': 'mp4',
'title': '"Blau ist mysteriös": Die Blue Man Group im Interview',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 254,
'timestamp': 1519211149,
'upload_date': '20180221',
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://share-videos.se/auto/video/83645793?uid=13',
'md5': 'b68d276de422ab07ee1d49388103f457',
'info_dict': {
'id': '83645793',
'title': 'Lock up and get excited',
'ext': 'mp4'
},
'skip': 'TODO: fix nested playlists processing in tests',
},
{
# Viqeo embeds
'url': 'https://viqeo.tv/',
'info_dict': {
'id': 'viqeo',
'title': 'All-new video platform',
},
'playlist_count': 6,
},
{
# Squarespace video embed, 2019-08-28
'url': 'http://ootboxford.com',
'info_dict': {
'id': 'Tc7b_JGdZfw',
'title': 'Out of the Blue, at Childish Things 10',
'ext': 'mp4',
'description': 'md5:a83d0026666cf5ee970f8bd1cfd69c7f',
'uploader_id': 'helendouglashouse',
'uploader': 'Helen & Douglas House',
'upload_date': '20140328',
},
'params': {
'skip_download': True,
},
},
{
# Zype embed
'url': 'https://www.cookscountry.com/episode/554-smoky-barbecue-favorites',
'info_dict': {
'id': '5b400b834b32992a310622b9',
'ext': 'mp4',
'title': 'Smoky Barbecue Favorites',
'thumbnail': r're:^https?://.*\.jpe?g',
'description': 'md5:5ff01e76316bd8d46508af26dc86023b',
'upload_date': '20170909',
'timestamp': 1504915200,
},
'add_ie': [ZypeIE.ie_key()],
'params': {
'skip_download': True,
},
},
{
# videojs embed
'url': 'https://video.sibnet.ru/shell.php?videoid=3422904',
'info_dict': {
'id': 'shell',
'ext': 'mp4',
'title': 'Доставщик пиццы спросил разрешения сыграть на фортепиано',
'description': 'md5:89209cdc587dab1e4a090453dbaa2cb1',
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Failed to download MPD manifest'],
},
{
# DailyMotion embed with DM.player
'url': 'https://www.beinsports.com/us/copa-del-rey/video/the-locker-room-valencia-beat-barca-in-copa/1203804',
'info_dict': {
'id': 'k6aKkGHd9FJs4mtJN39',
'ext': 'mp4',
'title': 'The Locker Room: Valencia Beat Barca In Copa del Rey Final',
'description': 'This video is private.',
'uploader_id': 'x1jf30l',
'uploader': 'beIN SPORTS USA',
'upload_date': '20190528',
'timestamp': 1559062971,
},
'params': {
'skip_download': True,
},
},
# {
# # TODO: find another test
# # http://schema.org/VideoObject
# 'url': 'https://flipagram.com/f/nyvTSJMKId',
# 'md5': '888dcf08b7ea671381f00fab74692755',
# 'info_dict': {
# 'id': 'nyvTSJMKId',
# 'ext': 'mp4',
# 'title': 'Flipagram by sjuria101 featuring Midnight Memories by One Direction',
# 'description': '#love for cats.',
# 'timestamp': 1461244995,
# 'upload_date': '20160421',
# },
# 'params': {
# 'force_generic_extractor': True,
# },
# }
]
def report_following_redirect(self, new_url):
"""Report information extraction."""
self._downloader.to_screen('[redirect] Following redirect to %s' % new_url)
def _extract_rss(self, url, video_id, doc):
playlist_title = doc.find('./channel/title').text
playlist_desc_el = doc.find('./channel/description')
playlist_desc = None if playlist_desc_el is None else playlist_desc_el.text
entries = []
for it in doc.findall('./channel/item'):
next_url = None
enclosure_nodes = it.findall('./enclosure')
for e in enclosure_nodes:
next_url = e.attrib.get('url')
if next_url:
break
if not next_url:
next_url = xpath_text(it, 'link', fatal=False)
if not next_url:
continue
entries.append({
'_type': 'url_transparent',
'url': next_url,
'title': it.find('title').text,
})
return {
'_type': 'playlist',
'id': url,
'title': playlist_title,
'description': playlist_desc,
'entries': entries,
}
def _extract_camtasia(self, url, video_id, webpage):
""" Returns None if no camtasia video can be found. """
camtasia_cfg = self._search_regex(
r'fo\.addVariable\(\s*"csConfigFile",\s*"([^"]+)"\s*\);',
webpage, 'camtasia configuration file', default=None)
if camtasia_cfg is None:
return None
title = self._html_search_meta('DC.title', webpage, fatal=True)
camtasia_url = compat_urlparse.urljoin(url, camtasia_cfg)
camtasia_cfg = self._download_xml(
camtasia_url, video_id,
note='Downloading camtasia configuration',
errnote='Failed to download camtasia configuration')
fileset_node = camtasia_cfg.find('./playlist/array/fileset')
entries = []
for n in fileset_node.getchildren():
url_n = n.find('./uri')
if url_n is None:
continue
entries.append({
'id': os.path.splitext(url_n.text.rpartition('/')[2])[0],
'title': '%s - %s' % (title, n.tag),
'url': compat_urlparse.urljoin(url, url_n.text),
'duration': float_or_none(n.find('./duration').text),
})
return {
'_type': 'playlist',
'entries': entries,
'title': title,
}
def _real_extract(self, url):
if url.startswith('//'):
return self.url_result(self.http_scheme() + url)
parsed_url = compat_urlparse.urlparse(url)
if not parsed_url.scheme:
default_search = self._downloader.params.get('default_search')
if default_search is None:
default_search = 'fixup_error'
if default_search in ('auto', 'auto_warning', 'fixup_error'):
if re.match(r'^[^\s/]+\.[^\s/]+/', url):
self._downloader.report_warning('The url doesn\'t specify the protocol, trying with http')
return self.url_result('http://' + url)
elif default_search != 'fixup_error':
if default_search == 'auto_warning':
if re.match(r'^(?:url|URL)$', url):
raise ExtractorError(
'Invalid URL: %r . Call youtube-dl like this: youtube-dl -v "https://www.youtube.com/watch?v=BaW_jenozKc" ' % url,
expected=True)
else:
self._downloader.report_warning(
'Falling back to youtube search for %s . Set --default-search "auto" to suppress this warning.' % url)
return self.url_result('ytsearch:' + url)
if default_search in ('error', 'fixup_error'):
raise ExtractorError(
'%r is not a valid URL. '
'Set --default-search "ytsearch" (or run youtube-dl "ytsearch:%s" ) to search YouTube'
% (url, url), expected=True)
else:
if ':' not in default_search:
default_search += ':'
return self.url_result(default_search + url)
url, smuggled_data = unsmuggle_url(url)
force_videoid = None
is_intentional = smuggled_data and smuggled_data.get('to_generic')
if smuggled_data and 'force_videoid' in smuggled_data:
force_videoid = smuggled_data['force_videoid']
video_id = force_videoid
else:
video_id = self._generic_id(url)
self.to_screen('%s: Requesting header' % video_id)
head_req = HEADRequest(url)
head_response = self._request_webpage(
head_req, video_id,
note=False, errnote='Could not send HEAD request to %s' % url,
fatal=False)
if head_response is not False:
# Check for redirect
new_url = head_response.geturl()
if url != new_url:
self.report_following_redirect(new_url)
if force_videoid:
new_url = smuggle_url(
new_url, {'force_videoid': force_videoid})
return self.url_result(new_url)
full_response = None
if head_response is False:
request = sanitized_Request(url)
request.add_header('Accept-Encoding', '*')
full_response = self._request_webpage(request, video_id)
head_response = full_response
info_dict = {
'id': video_id,
'title': self._generic_title(url),
'upload_date': unified_strdate(head_response.headers.get('Last-Modified'))
}
# Check for direct link to a video
content_type = head_response.headers.get('Content-Type', '').lower()
m = re.match(r'^(?P<type>audio|video|application(?=/(?:ogg$|(?:vnd\.apple\.|x-)?mpegurl)))/(?P<format_id>[^;\s]+)', content_type)
if m:
format_id = compat_str(m.group('format_id'))
if format_id.endswith('mpegurl'):
formats = self._extract_m3u8_formats(url, video_id, 'mp4')
elif format_id == 'f4m':
formats = self._extract_f4m_formats(url, video_id)
else:
formats = [{
'format_id': format_id,
'url': url,
'vcodec': 'none' if m.group('type') == 'audio' else None
}]
info_dict['direct'] = True
self._sort_formats(formats)
info_dict['formats'] = formats
return info_dict
if not self._downloader.params.get('test', False) and not is_intentional:
force = self._downloader.params.get('force_generic_extractor', False)
self._downloader.report_warning(
'%s on generic information extractor.' % ('Forcing' if force else 'Falling back'))
if not full_response:
request = sanitized_Request(url)
# Some webservers may serve compressed content of rather big size (e.g. gzipped flac)
# making it impossible to download only chunk of the file (yet we need only 512kB to
# test whether it's HTML or not). According to youtube-dl default Accept-Encoding
# that will always result in downloading the whole file that is not desirable.
# Therefore for extraction pass we have to override Accept-Encoding to any in order
# to accept raw bytes and being able to download only a chunk.
# It may probably better to solve this by checking Content-Type for application/octet-stream
# after HEAD request finishes, but not sure if we can rely on this.
request.add_header('Accept-Encoding', '*')
full_response = self._request_webpage(request, video_id)
first_bytes = full_response.read(512)
# Is it an M3U playlist?
if first_bytes.startswith(b'#EXTM3U'):
info_dict['formats'] = self._extract_m3u8_formats(url, video_id, 'mp4')
self._sort_formats(info_dict['formats'])
return info_dict
# Maybe it's a direct link to a video?
# Be careful not to download the whole thing!
if not is_html(first_bytes):
self._downloader.report_warning(
'URL could be a direct video link, returning it as such.')
info_dict.update({
'direct': True,
'url': url,
})
return info_dict
webpage = self._webpage_read_content(
full_response, url, video_id, prefix=first_bytes)
self.report_extraction(video_id)
# Is it an RSS feed, a SMIL file, an XSPF playlist or a MPD manifest?
try:
doc = compat_etree_fromstring(webpage.encode('utf-8'))
if doc.tag == 'rss':
return self._extract_rss(url, video_id, doc)
elif doc.tag == 'SmoothStreamingMedia':
info_dict['formats'] = self._parse_ism_formats(doc, url)
self._sort_formats(info_dict['formats'])
return info_dict
elif re.match(r'^(?:{[^}]+})?smil$', doc.tag):
smil = self._parse_smil(doc, url, video_id)
self._sort_formats(smil['formats'])
return smil
elif doc.tag == '{http://xspf.org/ns/0/}playlist':
return self.playlist_result(
self._parse_xspf(
doc, video_id, xspf_url=url,
xspf_base_url=full_response.geturl()),
video_id)
elif re.match(r'(?i)^(?:{[^}]+})?MPD$', doc.tag):
info_dict['formats'] = self._parse_mpd_formats(
doc,
mpd_base_url=full_response.geturl().rpartition('/')[0],
mpd_url=url)
self._sort_formats(info_dict['formats'])
return info_dict
elif re.match(r'^{http://ns\.adobe\.com/f4m/[12]\.0}manifest$', doc.tag):
info_dict['formats'] = self._parse_f4m_formats(doc, url, video_id)
self._sort_formats(info_dict['formats'])
return info_dict
except compat_xml_parse_error:
pass
# Is it a Camtasia project?
camtasia_res = self._extract_camtasia(url, video_id, webpage)
if camtasia_res is not None:
return camtasia_res
# Sometimes embedded video player is hidden behind percent encoding
# (e.g. https://github.com/ytdl-org/youtube-dl/issues/2448)
# Unescaping the whole page allows to handle those cases in a generic way
webpage = compat_urllib_parse_unquote(webpage)
# Unescape squarespace embeds to be detected by generic extractor,
# see https://github.com/ytdl-org/youtube-dl/issues/21294
webpage = re.sub(
r'<div[^>]+class=[^>]*?\bsqs-video-wrapper\b[^>]*>',
lambda x: unescapeHTML(x.group(0)), webpage)
# it's tempting to parse this further, but you would
# have to take into account all the variations like
# Video Title - Site Name
# Site Name | Video Title
# Video Title - Tagline | Site Name
# and so on and so forth; it's just not practical
video_title = self._og_search_title(
webpage, default=None) or self._html_search_regex(
r'(?s)<title>(.*?)</title>', webpage, 'video title',
default='video')
# Try to detect age limit automatically
age_limit = self._rta_search(webpage)
# And then there are the jokers who advertise that they use RTA,
# but actually don't.
AGE_LIMIT_MARKERS = [
r'Proudly Labeled <a href="http://www\.rtalabel\.org/" title="Restricted to Adults">RTA</a>',
]
if any(re.search(marker, webpage) for marker in AGE_LIMIT_MARKERS):
age_limit = 18
# video uploader is domain name
video_uploader = self._search_regex(
r'^(?:https?://)?([^/]*)/.*', url, 'video uploader')
video_description = self._og_search_description(webpage, default=None)
video_thumbnail = self._og_search_thumbnail(webpage, default=None)
info_dict.update({
'title': video_title,
'description': video_description,
'thumbnail': video_thumbnail,
'age_limit': age_limit,
})
# Look for Brightcove Legacy Studio embeds
bc_urls = BrightcoveLegacyIE._extract_brightcove_urls(webpage)
if bc_urls:
entries = [{
'_type': 'url',
'url': smuggle_url(bc_url, {'Referer': url}),
'ie_key': 'BrightcoveLegacy'
} for bc_url in bc_urls]
return {
'_type': 'playlist',
'title': video_title,
'id': video_id,
'entries': entries,
}
# Look for Brightcove New Studio embeds
bc_urls = BrightcoveNewIE._extract_urls(self, webpage)
if bc_urls:
return self.playlist_from_matches(
bc_urls, video_id, video_title,
getter=lambda x: smuggle_url(x, {'referrer': url}),
ie='BrightcoveNew')
# Look for Nexx embeds
nexx_urls = NexxIE._extract_urls(webpage)
if nexx_urls:
return self.playlist_from_matches(nexx_urls, video_id, video_title, ie=NexxIE.ie_key())
# Look for Nexx iFrame embeds
nexx_embed_urls = NexxEmbedIE._extract_urls(webpage)
if nexx_embed_urls:
return self.playlist_from_matches(nexx_embed_urls, video_id, video_title, ie=NexxEmbedIE.ie_key())
# Look for ThePlatform embeds
tp_urls = ThePlatformIE._extract_urls(webpage)
if tp_urls:
return self.playlist_from_matches(tp_urls, video_id, video_title, ie='ThePlatform')
# Look for embedded rtl.nl player
matches = re.findall(
r'<iframe[^>]+?src="((?:https?:)?//(?:(?:www|static)\.)?rtl\.nl/(?:system/videoplayer/[^"]+(?:video_)?)?embed[^"]+)"',
webpage)
if matches:
return self.playlist_from_matches(matches, video_id, video_title, ie='RtlNl')
vimeo_urls = VimeoIE._extract_urls(url, webpage)
if vimeo_urls:
return self.playlist_from_matches(vimeo_urls, video_id, video_title, ie=VimeoIE.ie_key())
vid_me_embed_url = self._search_regex(
r'src=[\'"](https?://vid\.me/[^\'"]+)[\'"]',
webpage, 'vid.me embed', default=None)
if vid_me_embed_url is not None:
return self.url_result(vid_me_embed_url, 'Vidme')
# Look for YouTube embeds
youtube_urls = YoutubeIE._extract_urls(webpage)
if youtube_urls:
return self.playlist_from_matches(
youtube_urls, video_id, video_title, ie=YoutubeIE.ie_key())
matches = DailymotionIE._extract_urls(webpage)
if matches:
return self.playlist_from_matches(matches, video_id, video_title)
# Look for embedded Dailymotion playlist player (#3822)
m = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.[a-z]{2,3}/widget/jukebox\?.+?)\1', webpage)
if m:
playlists = re.findall(
r'list\[\]=/playlist/([^/]+)/', unescapeHTML(m.group('url')))
if playlists:
return self.playlist_from_matches(
playlists, video_id, video_title, lambda p: '//dailymotion.com/playlist/%s' % p)
# Look for DailyMail embeds
dailymail_urls = DailyMailIE._extract_urls(webpage)
if dailymail_urls:
return self.playlist_from_matches(
dailymail_urls, video_id, video_title, ie=DailyMailIE.ie_key())
# Look for Teachable embeds, must be before Wistia
teachable_url = TeachableIE._extract_url(webpage, url)
if teachable_url:
return self.url_result(teachable_url)
# Look for embedded Wistia player
wistia_urls = WistiaIE._extract_urls(webpage)
if wistia_urls:
playlist = self.playlist_from_matches(wistia_urls, video_id, video_title, ie=WistiaIE.ie_key())
for entry in playlist['entries']:
entry.update({
'_type': 'url_transparent',
'uploader': video_uploader,
})
return playlist
# Look for SVT player
svt_url = SVTIE._extract_url(webpage)
if svt_url:
return self.url_result(svt_url, 'SVT')
# Look for Bandcamp pages with custom domain
mobj = re.search(r'<meta property="og:url"[^>]*?content="(.*?bandcamp\.com.*?)"', webpage)
if mobj is not None:
burl = unescapeHTML(mobj.group(1))
# Don't set the extractor because it can be a track url or an album
return self.url_result(burl)
# Look for embedded Vevo player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:cache\.)?vevo\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for embedded Viddler player
mobj = re.search(
r'<(?:iframe[^>]+?src|param[^>]+?value)=(["\'])(?P<url>(?:https?:)?//(?:www\.)?viddler\.com/(?:embed|player)/.+?)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for NYTimes player
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//graphics8\.nytimes\.com/bcvideo/[^/]+/iframe/embed\.html.+?)\1>',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for Libsyn player
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//html5-player\.libsyn\.com/embed/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for Ooyala videos
mobj = (re.search(r'player\.ooyala\.com/[^"?]+[?#][^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)', webpage)
or re.search(r'OO\.Player\.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage)
or re.search(r'OO\.Player\.create\.apply\(\s*OO\.Player\s*,\s*op\(\s*\[\s*[\'"][^\'"]*[\'"]\s*,\s*[\'"](?P<ec>.{32})[\'"]', webpage)
or re.search(r'SBN\.VideoLinkset\.ooyala\([\'"](?P<ec>.{32})[\'"]\)', webpage)
or re.search(r'data-ooyala-video-id\s*=\s*[\'"](?P<ec>.{32})[\'"]', webpage))
if mobj is not None:
embed_token = self._search_regex(
r'embedToken[\'"]?\s*:\s*[\'"]([^\'"]+)',
webpage, 'ooyala embed token', default=None)
return OoyalaIE._build_url_result(smuggle_url(
mobj.group('ec'), {
'domain': url,
'embed_token': embed_token,
}))
# Look for multiple Ooyala embeds on SBN network websites
mobj = re.search(r'SBN\.VideoLinkset\.entryGroup\((\[.*?\])', webpage)
if mobj is not None:
embeds = self._parse_json(mobj.group(1), video_id, fatal=False)
if embeds:
return self.playlist_from_matches(
embeds, video_id, video_title,
getter=lambda v: OoyalaIE._url_for_embed_code(smuggle_url(v['provider_video_id'], {'domain': url})), ie='Ooyala')
# Look for Aparat videos
mobj = re.search(r'<iframe .*?src="(http://www\.aparat\.com/video/[^"]+)"', webpage)
if mobj is not None:
return self.url_result(mobj.group(1), 'Aparat')
# Look for MPORA videos
mobj = re.search(r'<iframe .*?src="(http://mpora\.(?:com|de)/videos/[^"]+)"', webpage)
if mobj is not None:
return self.url_result(mobj.group(1), 'Mpora')
# Look for embedded Facebook player
facebook_urls = FacebookIE._extract_urls(webpage)
if facebook_urls:
return self.playlist_from_matches(facebook_urls, video_id, video_title)
# Look for embedded VK player
mobj = re.search(r'<iframe[^>]+?src=(["\'])(?P<url>https?://vk\.com/video_ext\.php.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'VK')
# Look for embedded Odnoklassniki player
odnoklassniki_url = OdnoklassnikiIE._extract_url(webpage)
if odnoklassniki_url:
return self.url_result(odnoklassniki_url, OdnoklassnikiIE.ie_key())
# Look for embedded ivi player
mobj = re.search(r'<embed[^>]+?src=(["\'])(?P<url>https?://(?:www\.)?ivi\.ru/video/player.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Ivi')
# Look for embedded Huffington Post player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://embed\.live\.huffingtonpost\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'HuffPost')
# Look for embed.ly
mobj = re.search(r'class=["\']embedly-card["\'][^>]href=["\'](?P<url>[^"\']+)', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
mobj = re.search(r'class=["\']embedly-embed["\'][^>]src=["\'][^"\']*url=(?P<url>[^&]+)', webpage)
if mobj is not None:
return self.url_result(compat_urllib_parse_unquote(mobj.group('url')))
# Look for funnyordie embed
matches = re.findall(r'<iframe[^>]+?src="(https?://(?:www\.)?funnyordie\.com/embed/[^"]+)"', webpage)
if matches:
return self.playlist_from_matches(
matches, video_id, video_title, getter=unescapeHTML, ie='FunnyOrDie')
# Look for BBC iPlayer embed
matches = re.findall(r'setPlaylist\("(https?://www\.bbc\.co\.uk/iplayer/[^/]+/[\da-z]{8})"\)', webpage)
if matches:
return self.playlist_from_matches(matches, video_id, video_title, ie='BBCCoUk')
# Look for embedded RUTV player
rutv_url = RUTVIE._extract_url(webpage)
if rutv_url:
return self.url_result(rutv_url, 'RUTV')
# Look for embedded TVC player
tvc_url = TVCIE._extract_url(webpage)
if tvc_url:
return self.url_result(tvc_url, 'TVC')
# Look for embedded SportBox player
sportbox_urls = SportBoxIE._extract_urls(webpage)
if sportbox_urls:
return self.playlist_from_matches(sportbox_urls, video_id, video_title, ie=SportBoxIE.ie_key())
# Look for embedded XHamster player
xhamster_urls = XHamsterEmbedIE._extract_urls(webpage)
if xhamster_urls:
return self.playlist_from_matches(xhamster_urls, video_id, video_title, ie='XHamsterEmbed')
# Look for embedded TNAFlixNetwork player
tnaflix_urls = TNAFlixNetworkEmbedIE._extract_urls(webpage)
if tnaflix_urls:
return self.playlist_from_matches(tnaflix_urls, video_id, video_title, ie=TNAFlixNetworkEmbedIE.ie_key())
# Look for embedded PornHub player
pornhub_urls = PornHubIE._extract_urls(webpage)
if pornhub_urls:
return self.playlist_from_matches(pornhub_urls, video_id, video_title, ie=PornHubIE.ie_key())
# Look for embedded DrTuber player
drtuber_urls = DrTuberIE._extract_urls(webpage)
if drtuber_urls:
return self.playlist_from_matches(drtuber_urls, video_id, video_title, ie=DrTuberIE.ie_key())
# Look for embedded RedTube player
redtube_urls = RedTubeIE._extract_urls(webpage)
if redtube_urls:
return self.playlist_from_matches(redtube_urls, video_id, video_title, ie=RedTubeIE.ie_key())
# Look for embedded Tube8 player
tube8_urls = Tube8IE._extract_urls(webpage)
if tube8_urls:
return self.playlist_from_matches(tube8_urls, video_id, video_title, ie=Tube8IE.ie_key())
# Look for embedded Mofosex player
mofosex_urls = MofosexEmbedIE._extract_urls(webpage)
if mofosex_urls:
return self.playlist_from_matches(mofosex_urls, video_id, video_title, ie=MofosexEmbedIE.ie_key())
# Look for embedded Spankwire player
spankwire_urls = SpankwireIE._extract_urls(webpage)
if spankwire_urls:
return self.playlist_from_matches(spankwire_urls, video_id, video_title, ie=SpankwireIE.ie_key())
# Look for embedded YouPorn player
youporn_urls = YouPornIE._extract_urls(webpage)
if youporn_urls:
return self.playlist_from_matches(youporn_urls, video_id, video_title, ie=YouPornIE.ie_key())
# Look for embedded Tvigle player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//cloud\.tvigle\.ru/video/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Tvigle')
# Look for embedded TED player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://embed(?:-ssl)?\.ted\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'TED')
# Look for embedded Ustream videos
ustream_url = UstreamIE._extract_url(webpage)
if ustream_url:
return self.url_result(ustream_url, UstreamIE.ie_key())
# Look for embedded arte.tv player
mobj = re.search(
r'<(?:script|iframe) [^>]*?src="(?P<url>http://www\.arte\.tv/(?:playerv2/embed|arte_vp/index)[^"]+)"',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'ArteTVEmbed')
# Look for embedded francetv player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?://)?embed\.francetv\.fr/\?ue=.+?)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for embedded smotri.com player
smotri_url = SmotriIE._extract_url(webpage)
if smotri_url:
return self.url_result(smotri_url, 'Smotri')
# Look for embedded Myvi.ru player
myvi_url = MyviIE._extract_url(webpage)
if myvi_url:
return self.url_result(myvi_url)
# Look for embedded soundcloud player
soundcloud_urls = SoundcloudEmbedIE._extract_urls(webpage)
if soundcloud_urls:
return self.playlist_from_matches(soundcloud_urls, video_id, video_title, getter=unescapeHTML)
# Look for tunein player
tunein_urls = TuneInBaseIE._extract_urls(webpage)
if tunein_urls:
return self.playlist_from_matches(tunein_urls, video_id, video_title)
# Look for embedded mtvservices player
mtvservices_url = MTVServicesEmbeddedIE._extract_url(webpage)
if mtvservices_url:
return self.url_result(mtvservices_url, ie='MTVServicesEmbedded')
# Look for embedded yahoo player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://(?:screen|movies)\.yahoo\.com/.+?\.html\?format=embed)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Yahoo')
# Look for embedded sbs.com.au player
mobj = re.search(
r'''(?x)
(?:
<meta\s+property="og:video"\s+content=|
<iframe[^>]+?src=
)
(["\'])(?P<url>https?://(?:www\.)?sbs\.com\.au/ondemand/video/.+?)\1''',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'SBS')
# Look for embedded Cinchcast player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://player\.cinchcast\.com/.+?)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Cinchcast')
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://m(?:lb)?\.mlb\.com/shared/video/embed/embed\.html\?.+?)\1',
webpage)
if not mobj:
mobj = re.search(
r'data-video-link=["\'](?P<url>http://m.mlb.com/video/[^"\']+)',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'MLB')
mobj = re.search(
r'<(?:iframe|script)[^>]+?src=(["\'])(?P<url>%s)\1' % CondeNastIE.EMBED_URL,
webpage)
if mobj is not None:
return self.url_result(self._proto_relative_url(mobj.group('url'), scheme='http:'), 'CondeNast')
mobj = re.search(
r'<iframe[^>]+src="(?P<url>https?://(?:new\.)?livestream\.com/[^"]+/player[^"]+)"',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Livestream')
# Look for Zapiks embed
mobj = re.search(
r'<iframe[^>]+src="(?P<url>https?://(?:www\.)?zapiks\.fr/index\.php\?.+?)"', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Zapiks')
# Look for Kaltura embeds
kaltura_urls = KalturaIE._extract_urls(webpage)
if kaltura_urls:
return self.playlist_from_matches(
kaltura_urls, video_id, video_title,
getter=lambda x: smuggle_url(x, {'source_url': url}),
ie=KalturaIE.ie_key())
# Look for EaglePlatform embeds
eagleplatform_url = EaglePlatformIE._extract_url(webpage)
if eagleplatform_url:
return self.url_result(smuggle_url(eagleplatform_url, {'referrer': url}), EaglePlatformIE.ie_key())
# Look for ClipYou (uses EaglePlatform) embeds
mobj = re.search(
r'<iframe[^>]+src="https?://(?P<host>media\.clipyou\.ru)/index/player\?.*\brecord_id=(?P<id>\d+).*"', webpage)
if mobj is not None:
return self.url_result('eagleplatform:%(host)s:%(id)s' % mobj.groupdict(), 'EaglePlatform')
# Look for Pladform embeds
pladform_url = PladformIE._extract_url(webpage)
if pladform_url:
return self.url_result(pladform_url)
# Look for Videomore embeds
videomore_url = VideomoreIE._extract_url(webpage)
if videomore_url:
return self.url_result(videomore_url)
# Look for Webcaster embeds
webcaster_url = WebcasterFeedIE._extract_url(self, webpage)
if webcaster_url:
return self.url_result(webcaster_url, ie=WebcasterFeedIE.ie_key())
# Look for Playwire embeds
mobj = re.search(
r'<script[^>]+data-config=(["\'])(?P<url>(?:https?:)?//config\.playwire\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for 5min embeds
mobj = re.search(
r'<meta[^>]+property="og:video"[^>]+content="https?://embed\.5min\.com/(?P<id>[0-9]+)/?', webpage)
if mobj is not None:
return self.url_result('5min:%s' % mobj.group('id'), 'FiveMin')
# Look for Crooks and Liars embeds
mobj = re.search(
r'<(?:iframe[^>]+src|param[^>]+value)=(["\'])(?P<url>(?:https?:)?//embed\.crooksandliars\.com/(?:embed|v)/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for NBC Sports VPlayer embeds
nbc_sports_url = NBCSportsVPlayerIE._extract_url(webpage)
if nbc_sports_url:
return self.url_result(nbc_sports_url, 'NBCSportsVPlayer')
# Look for NBC News embeds
nbc_news_embed_url = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//www\.nbcnews\.com/widget/video-embed/[^"\']+)\1', webpage)
if nbc_news_embed_url:
return self.url_result(nbc_news_embed_url.group('url'), 'NBCNews')
# Look for Google Drive embeds
google_drive_url = GoogleDriveIE._extract_url(webpage)
if google_drive_url:
return self.url_result(google_drive_url, 'GoogleDrive')
# Look for UDN embeds
mobj = re.search(
r'<iframe[^>]+src="(?:https?:)?(?P<url>%s)"' % UDNEmbedIE._PROTOCOL_RELATIVE_VALID_URL, webpage)
if mobj is not None:
return self.url_result(
compat_urlparse.urljoin(url, mobj.group('url')), 'UDNEmbed')
# Look for Senate ISVP iframe
senate_isvp_url = SenateISVPIE._search_iframe_url(webpage)
if senate_isvp_url:
return self.url_result(senate_isvp_url, 'SenateISVP')
# Look for Kinja embeds
kinja_embed_urls = KinjaEmbedIE._extract_urls(webpage, url)
if kinja_embed_urls:
return self.playlist_from_matches(
kinja_embed_urls, video_id, video_title)
# Look for OnionStudios embeds
onionstudios_url = OnionStudiosIE._extract_url(webpage)
if onionstudios_url:
return self.url_result(onionstudios_url)
# Look for ViewLift embeds
viewlift_url = ViewLiftEmbedIE._extract_url(webpage)
if viewlift_url:
return self.url_result(viewlift_url)
# Look for JWPlatform embeds
jwplatform_urls = JWPlatformIE._extract_urls(webpage)
if jwplatform_urls:
return self.playlist_from_matches(jwplatform_urls, video_id, video_title, ie=JWPlatformIE.ie_key())
# Look for Digiteka embeds
digiteka_url = DigitekaIE._extract_url(webpage)
if digiteka_url:
return self.url_result(self._proto_relative_url(digiteka_url), DigitekaIE.ie_key())
# Look for Arkena embeds
arkena_url = ArkenaIE._extract_url(webpage)
if arkena_url:
return self.url_result(arkena_url, ArkenaIE.ie_key())
# Look for Piksel embeds
piksel_url = PikselIE._extract_url(webpage)
if piksel_url:
return self.url_result(piksel_url, PikselIE.ie_key())
# Look for Limelight embeds
limelight_urls = LimelightBaseIE._extract_urls(webpage, url)
if limelight_urls:
return self.playlist_result(
limelight_urls, video_id, video_title, video_description)
# Look for Anvato embeds
anvato_urls = AnvatoIE._extract_urls(self, webpage, video_id)
if anvato_urls:
return self.playlist_result(
anvato_urls, video_id, video_title, video_description)
# Look for AdobeTVVideo embeds
mobj = re.search(
r'<iframe[^>]+src=[\'"]((?:https?:)?//video\.tv\.adobe\.com/v/\d+[^"]+)[\'"]',
webpage)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group(1))),
'AdobeTVVideo')
# Look for Vine embeds
mobj = re.search(
r'<iframe[^>]+src=[\'"]((?:https?:)?//(?:www\.)?vine\.co/v/[^/]+/embed/(?:simple|postcard))',
webpage)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group(1))), 'Vine')
# Look for VODPlatform embeds
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:(?:www\.)?vod-platform\.net|embed\.kwikmotion\.com)/[eE]mbed/.+?)\1',
webpage)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group('url'))), 'VODPlatform')
# Look for Mangomolo embeds
mobj = re.search(
r'''(?x)<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//
(?:
admin\.mangomolo\.com/analytics/index\.php/customers/embed|
player\.mangomolo\.com/v1
)/
(?:
video\?.*?\bid=(?P<video_id>\d+)|
(?:index|live)\?.*?\bchannelid=(?P<channel_id>(?:[A-Za-z0-9+/=]|%2B|%2F|%3D)+)
).+?)\1''', webpage)
if mobj is not None:
info = {
'_type': 'url_transparent',
'url': self._proto_relative_url(unescapeHTML(mobj.group('url'))),
'title': video_title,
'description': video_description,
'thumbnail': video_thumbnail,
'uploader': video_uploader,
}
video_id = mobj.group('video_id')
if video_id:
info.update({
'ie_key': 'MangomoloVideo',
'id': video_id,
})
else:
info.update({
'ie_key': 'MangomoloLive',
'id': mobj.group('channel_id'),
})
return info
# Look for Instagram embeds
instagram_embed_url = InstagramIE._extract_embed_url(webpage)
if instagram_embed_url is not None:
return self.url_result(
self._proto_relative_url(instagram_embed_url), InstagramIE.ie_key())
# Look for LiveLeak embeds
liveleak_urls = LiveLeakIE._extract_urls(webpage)
if liveleak_urls:
return self.playlist_from_matches(liveleak_urls, video_id, video_title)
# Look for 3Q SDN embeds
threeqsdn_url = ThreeQSDNIE._extract_url(webpage)
if threeqsdn_url:
return {
'_type': 'url_transparent',
'ie_key': ThreeQSDNIE.ie_key(),
'url': self._proto_relative_url(threeqsdn_url),
'title': video_title,
'description': video_description,
'thumbnail': video_thumbnail,
'uploader': video_uploader,
}
# Look for VBOX7 embeds
vbox7_url = Vbox7IE._extract_url(webpage)
if vbox7_url:
return self.url_result(vbox7_url, Vbox7IE.ie_key())
# Look for DBTV embeds
dbtv_urls = DBTVIE._extract_urls(webpage)
if dbtv_urls:
return self.playlist_from_matches(dbtv_urls, video_id, video_title, ie=DBTVIE.ie_key())
# Look for Videa embeds
videa_urls = VideaIE._extract_urls(webpage)
if videa_urls:
return self.playlist_from_matches(videa_urls, video_id, video_title, ie=VideaIE.ie_key())
# Look for 20 minuten embeds
twentymin_urls = TwentyMinutenIE._extract_urls(webpage)
if twentymin_urls:
return self.playlist_from_matches(
twentymin_urls, video_id, video_title, ie=TwentyMinutenIE.ie_key())
# Look for VideoPress embeds
videopress_urls = VideoPressIE._extract_urls(webpage)
if videopress_urls:
return self.playlist_from_matches(
videopress_urls, video_id, video_title, ie=VideoPressIE.ie_key())
# Look for Rutube embeds
rutube_urls = RutubeIE._extract_urls(webpage)
if rutube_urls:
return self.playlist_from_matches(
rutube_urls, video_id, video_title, ie=RutubeIE.ie_key())
# Look for WashingtonPost embeds
wapo_urls = WashingtonPostIE._extract_urls(webpage)
if wapo_urls:
return self.playlist_from_matches(
wapo_urls, video_id, video_title, ie=WashingtonPostIE.ie_key())
# Look for Mediaset embeds
mediaset_urls = MediasetIE._extract_urls(self, webpage)
if mediaset_urls:
return self.playlist_from_matches(
mediaset_urls, video_id, video_title, ie=MediasetIE.ie_key())
# Look for JOJ.sk embeds
joj_urls = JojIE._extract_urls(webpage)
if joj_urls:
return self.playlist_from_matches(
joj_urls, video_id, video_title, ie=JojIE.ie_key())
# Look for megaphone.fm embeds
mpfn_urls = MegaphoneIE._extract_urls(webpage)
if mpfn_urls:
return self.playlist_from_matches(
mpfn_urls, video_id, video_title, ie=MegaphoneIE.ie_key())
# Look for vzaar embeds
vzaar_urls = VzaarIE._extract_urls(webpage)
if vzaar_urls:
return self.playlist_from_matches(
vzaar_urls, video_id, video_title, ie=VzaarIE.ie_key())
channel9_urls = Channel9IE._extract_urls(webpage)
if channel9_urls:
return self.playlist_from_matches(
channel9_urls, video_id, video_title, ie=Channel9IE.ie_key())
vshare_urls = VShareIE._extract_urls(webpage)
if vshare_urls:
return self.playlist_from_matches(
vshare_urls, video_id, video_title, ie=VShareIE.ie_key())
# Look for Mediasite embeds
mediasite_urls = MediasiteIE._extract_urls(webpage)
if mediasite_urls:
entries = [
self.url_result(smuggle_url(
compat_urlparse.urljoin(url, mediasite_url),
{'UrlReferrer': url}), ie=MediasiteIE.ie_key())
for mediasite_url in mediasite_urls]
return self.playlist_result(entries, video_id, video_title)
springboardplatform_urls = SpringboardPlatformIE._extract_urls(webpage)
if springboardplatform_urls:
return self.playlist_from_matches(
springboardplatform_urls, video_id, video_title,
ie=SpringboardPlatformIE.ie_key())
yapfiles_urls = YapFilesIE._extract_urls(webpage)
if yapfiles_urls:
return self.playlist_from_matches(
yapfiles_urls, video_id, video_title, ie=YapFilesIE.ie_key())
vice_urls = ViceIE._extract_urls(webpage)
if vice_urls:
return self.playlist_from_matches(
vice_urls, video_id, video_title, ie=ViceIE.ie_key())
xfileshare_urls = XFileShareIE._extract_urls(webpage)
if xfileshare_urls:
return self.playlist_from_matches(
xfileshare_urls, video_id, video_title, ie=XFileShareIE.ie_key())
cloudflarestream_urls = CloudflareStreamIE._extract_urls(webpage)
if cloudflarestream_urls:
return self.playlist_from_matches(
cloudflarestream_urls, video_id, video_title, ie=CloudflareStreamIE.ie_key())
peertube_urls = PeerTubeIE._extract_urls(webpage, url)
if peertube_urls:
return self.playlist_from_matches(
peertube_urls, video_id, video_title, ie=PeerTubeIE.ie_key())
indavideo_urls = IndavideoEmbedIE._extract_urls(webpage)
if indavideo_urls:
return self.playlist_from_matches(
indavideo_urls, video_id, video_title, ie=IndavideoEmbedIE.ie_key())
apa_urls = APAIE._extract_urls(webpage)
if apa_urls:
return self.playlist_from_matches(
apa_urls, video_id, video_title, ie=APAIE.ie_key())
foxnews_urls = FoxNewsIE._extract_urls(webpage)
if foxnews_urls:
return self.playlist_from_matches(
foxnews_urls, video_id, video_title, ie=FoxNewsIE.ie_key())
sharevideos_urls = [sharevideos_mobj.group('url') for sharevideos_mobj in re.finditer(
r'<iframe[^>]+?\bsrc\s*=\s*(["\'])(?P<url>(?:https?:)?//embed\.share-videos\.se/auto/embed/\d+\?.*?\buid=\d+.*?)\1',
webpage)]
if sharevideos_urls:
return self.playlist_from_matches(
sharevideos_urls, video_id, video_title)
viqeo_urls = ViqeoIE._extract_urls(webpage)
if viqeo_urls:
return self.playlist_from_matches(
viqeo_urls, video_id, video_title, ie=ViqeoIE.ie_key())
expressen_urls = ExpressenIE._extract_urls(webpage)
if expressen_urls:
return self.playlist_from_matches(
expressen_urls, video_id, video_title, ie=ExpressenIE.ie_key())
zype_urls = ZypeIE._extract_urls(webpage)
if zype_urls:
return self.playlist_from_matches(
zype_urls, video_id, video_title, ie=ZypeIE.ie_key())
# Look for HTML5 media
entries = self._parse_html5_media_entries(url, webpage, video_id, m3u8_id='hls')
if entries:
if len(entries) == 1:
entries[0].update({
'id': video_id,
'title': video_title,
})
else:
for num, entry in enumerate(entries, start=1):
entry.update({
'id': '%s-%s' % (video_id, num),
'title': '%s (%d)' % (video_title, num),
})
for entry in entries:
self._sort_formats(entry['formats'])
return self.playlist_result(entries, video_id, video_title)
jwplayer_data = self._find_jwplayer_data(
webpage, video_id, transform_source=js_to_json)
if jwplayer_data:
try:
info = self._parse_jwplayer_data(
jwplayer_data, video_id, require_title=False, base_url=url)
return merge_dicts(info, info_dict)
except ExtractorError:
# See https://github.com/ytdl-org/youtube-dl/pull/16735
pass
# Video.js embed
mobj = re.search(
r'(?s)\bvideojs\s*\(.+?\.src\s*\(\s*((?:\[.+?\]|{.+?}))\s*\)\s*;',
webpage)
if mobj is not None:
sources = self._parse_json(
mobj.group(1), video_id, transform_source=js_to_json,
fatal=False) or []
if not isinstance(sources, list):
sources = [sources]
formats = []
for source in sources:
src = source.get('src')
if not src or not isinstance(src, compat_str):
continue
src = compat_urlparse.urljoin(url, src)
src_type = source.get('type')
if isinstance(src_type, compat_str):
src_type = src_type.lower()
ext = determine_ext(src).lower()
if src_type == 'video/youtube':
return self.url_result(src, YoutubeIE.ie_key())
if src_type == 'application/dash+xml' or ext == 'mpd':
formats.extend(self._extract_mpd_formats(
src, video_id, mpd_id='dash', fatal=False))
elif src_type == 'application/x-mpegurl' or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
src, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
else:
formats.append({
'url': src,
'ext': (mimetype2ext(src_type)
or ext if ext in KNOWN_EXTENSIONS else 'mp4'),
})
if formats:
self._sort_formats(formats)
info_dict['formats'] = formats
return info_dict
# Looking for http://schema.org/VideoObject
json_ld = self._search_json_ld(
webpage, video_id, default={}, expected_type='VideoObject')
if json_ld.get('url'):
return merge_dicts(json_ld, info_dict)
def check_video(vurl):
if YoutubeIE.suitable(vurl):
return True
if RtmpIE.suitable(vurl):
return True
vpath = compat_urlparse.urlparse(vurl).path
vext = determine_ext(vpath)
return '.' in vpath and vext not in ('swf', 'png', 'jpg', 'srt', 'sbv', 'sub', 'vtt', 'ttml', 'js', 'xml')
def filter_video(urls):
return list(filter(check_video, urls))
# Start with something easy: JW Player in SWFObject
found = filter_video(re.findall(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage))
if not found:
# Look for gorilla-vid style embedding
found = filter_video(re.findall(r'''(?sx)
(?:
jw_plugins|
JWPlayerOptions|
jwplayer\s*\(\s*["'][^'"]+["']\s*\)\s*\.setup
)
.*?
['"]?file['"]?\s*:\s*["\'](.*?)["\']''', webpage))
if not found:
# Broaden the search a little bit
found = filter_video(re.findall(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage))
if not found:
# Broaden the findall a little bit: JWPlayer JS loader
found = filter_video(re.findall(
r'[^A-Za-z0-9]?(?:file|video_url)["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage))
if not found:
# Flow player
found = filter_video(re.findall(r'''(?xs)
flowplayer\("[^"]+",\s*
\{[^}]+?\}\s*,
\s*\{[^}]+? ["']?clip["']?\s*:\s*\{\s*
["']?url["']?\s*:\s*["']([^"']+)["']
''', webpage))
if not found:
# Cinerama player
found = re.findall(
r"cinerama\.embedPlayer\(\s*\'[^']+\',\s*'([^']+)'", webpage)
if not found:
# Try to find twitter cards info
# twitter:player:stream should be checked before twitter:player since
# it is expected to contain a raw stream (see
# https://dev.twitter.com/cards/types/player#On_twitter.com_via_desktop_browser)
found = filter_video(re.findall(
r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage))
if not found:
# We look for Open Graph info:
# We have to match any number spaces between elements, some sites try to align them (eg.: statigr.am)
m_video_type = re.findall(r'<meta.*?property="og:video:type".*?content="video/(.*?)"', webpage)
# We only look in og:video if the MIME type is a video, don't try if it's a Flash player:
if m_video_type is not None:
found = filter_video(re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage))
if not found:
REDIRECT_REGEX = r'[0-9]{,2};\s*(?:URL|url)=\'?([^\'"]+)'
found = re.search(
r'(?i)<meta\s+(?=(?:[a-z-]+="[^"]+"\s+)*http-equiv="refresh")'
r'(?:[a-z-]+="[^"]+"\s+)*?content="%s' % REDIRECT_REGEX,
webpage)
if not found:
# Look also in Refresh HTTP header
refresh_header = head_response.headers.get('Refresh')
if refresh_header:
# In python 2 response HTTP headers are bytestrings
if sys.version_info < (3, 0) and isinstance(refresh_header, str):
refresh_header = refresh_header.decode('iso-8859-1')
found = re.search(REDIRECT_REGEX, refresh_header)
if found:
new_url = compat_urlparse.urljoin(url, unescapeHTML(found.group(1)))
if new_url != url:
self.report_following_redirect(new_url)
return {
'_type': 'url',
'url': new_url,
}
else:
found = None
if not found:
# twitter:player is a https URL to iframe player that may or may not
# be supported by youtube-dl thus this is checked the very last (see
# https://dev.twitter.com/cards/types/player#On_twitter.com_via_desktop_browser)
embed_url = self._html_search_meta('twitter:player', webpage, default=None)
if embed_url and embed_url != url:
return self.url_result(embed_url)
if not found:
raise UnsupportedError(url)
entries = []
for video_url in orderedSet(found):
video_url = unescapeHTML(video_url)
video_url = video_url.replace('\\/', '/')
video_url = compat_urlparse.urljoin(url, video_url)
video_id = compat_urllib_parse_unquote(os.path.basename(video_url))
# Sometimes, jwplayer extraction will result in a YouTube URL
if YoutubeIE.suitable(video_url):
entries.append(self.url_result(video_url, 'Youtube'))
continue
# here's a fun little line of code for you:
video_id = os.path.splitext(video_id)[0]
entry_info_dict = {
'id': video_id,
'uploader': video_uploader,
'title': video_title,
'age_limit': age_limit,
}
if RtmpIE.suitable(video_url):
entry_info_dict.update({
'_type': 'url_transparent',
'ie_key': RtmpIE.ie_key(),
'url': video_url,
})
entries.append(entry_info_dict)
continue
ext = determine_ext(video_url)
if ext == 'smil':
entry_info_dict['formats'] = self._extract_smil_formats(video_url, video_id)
elif ext == 'xspf':
return self.playlist_result(self._extract_xspf_playlist(video_url, video_id), video_id)
elif ext == 'm3u8':
entry_info_dict['formats'] = self._extract_m3u8_formats(video_url, video_id, ext='mp4')
elif ext == 'mpd':
entry_info_dict['formats'] = self._extract_mpd_formats(video_url, video_id)
elif ext == 'f4m':
entry_info_dict['formats'] = self._extract_f4m_formats(video_url, video_id)
elif re.search(r'(?i)\.(?:ism|smil)/manifest', video_url) and video_url != url:
# Just matching .ism/manifest is not enough to be reliably sure
# whether it's actually an ISM manifest or some other streaming
# manifest since there are various streaming URL formats
# possible (see [1]) as well as some other shenanigans like
# .smil/manifest URLs that actually serve an ISM (see [2]) and
# so on.
# Thus the most reasonable way to solve this is to delegate
# to generic extractor in order to look into the contents of
# the manifest itself.
# 1. https://azure.microsoft.com/en-us/documentation/articles/media-services-deliver-content-overview/#streaming-url-formats
# 2. https://svs.itworkscdn.net/lbcivod/smil:itwfcdn/lbci/170976.smil/Manifest
entry_info_dict = self.url_result(
smuggle_url(video_url, {'to_generic': True}),
GenericIE.ie_key())
else:
entry_info_dict['url'] = video_url
if entry_info_dict.get('formats'):
self._sort_formats(entry_info_dict['formats'])
entries.append(entry_info_dict)
if len(entries) == 1:
return entries[0]
else:
for num, e in enumerate(entries, start=1):
# 'url' results don't have a title
if e.get('title') is not None:
e['title'] = '%s (%d)' % (e['title'], num)
return {
'_type': 'playlist',
'entries': entries,
}
| unlicense |
ccgreen13/theharvester | discovery/shodan/api.py | 24 | 7784 | try:
from json import dumps, loads
except:
from simplejson import dumps, loads
from urllib2 import urlopen
from urllib import urlencode
__all__ = ['WebAPI']
class WebAPIError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
class WebAPI:
"""Wrapper around the SHODAN webservices API"""
class DatalossDb:
def __init__(self, parent):
self.parent = parent
def search(self, **kwargs):
"""Search the Dataloss DB archive.
Arguments:
name -- Name of the affected company/ organisation
arrest -- whether the incident resulted in an arrest
breaches -- the type of breach that occurred (Hack, MissingLaptop etc.)
country -- country where the incident took place
ext -- whether an external, third party was affected
ext_names -- the name of the third party company that was affected
lawsuit -- whether the incident resulted in a lawsuit
records -- the number of records that were lost/ stolen
recovered -- whether the affected items were recovered
sub_types -- the sub-categorization of the affected company/ organization
source -- whether the incident occurred from inside or outside the organization
stocks -- stock symbol of the affected company
types -- the basic type of organization (government, business, educational)
uid -- unique ID for the incident
Returns:
A dictionary with 2 main items: matches (list) and total (int).
"""
return self.parent._request('datalossdb/search', dict(**kwargs))
class Exploits:
def __init__(self, parent):
self.parent = parent
def search(self, query, sources=[], cve=None, osvdb=None, msb=None, bid=None):
"""Search the entire Shodan Exploits archive using the same query syntax
as the website.
Arguments:
query -- exploit search query; same syntax as website
Optional arguments:
sources -- metasploit, cve, osvdb, exploitdb, or packetstorm
cve -- CVE identifier (ex. 2010-0432)
osvdb -- OSVDB identifier (ex. 11666)
msb -- Microsoft Security Bulletin ID (ex. MS05-030)
bid -- Bugtraq identifier (ex. 13951)
"""
if sources:
query += ' source:' + ','.join(sources)
if cve:
query += ' cve:%s' % (str(cve).strip())
if osvdb:
query += ' osvdb:%s' % (str(osvdb).strip())
if msb:
query += ' msb:%s' % (str(msb).strip())
if bid:
query += ' bid:%s' % (str(bid).strip())
return self.parent._request('search_exploits', {'q': query})
class ExploitDb:
def __init__(self, parent):
self.parent = parent
def download(self, id):
"""Download the exploit code from the ExploitDB archive.
Arguments:
id -- ID of the ExploitDB entry
Returns:
A dictionary with the following fields:
filename -- Name of the file
content-type -- Mimetype
data -- Contents of the file
"""
return self.parent._request('exploitdb/download', {'id': id})
def search(self, query, **kwargs):
"""Search the ExploitDB archive.
Arguments:
query -- Search terms
Optional arguments:
author -- Name of the exploit submitter
platform -- Target platform (e.g. windows, linux, hardware etc.)
port -- Service port number
type -- Any, dos, local, papers, remote, shellcode and webapps
Returns:
A dictionary with 2 main items: matches (list) and total (int).
Each item in 'matches' is a dictionary with the following elements:
id
author
date
description
platform
port
type
"""
return self.parent._request('exploitdb/search', dict(q=query, **kwargs))
class Msf:
def __init__(self, parent):
self.parent = parent
def download(self, id):
"""Download a metasploit module given the fullname (id) of it.
Arguments:
id -- fullname of the module (ex. auxiliary/admin/backupexec/dump)
Returns:
A dictionary with the following fields:
filename -- Name of the file
content-type -- Mimetype
data -- File content
"""
return self.parent._request('msf/download', {'id': id})
def search(self, query, **kwargs):
"""Search for a Metasploit module.
"""
return self.parent._request('msf/search', dict(q=query, **kwargs))
def __init__(self, key):
"""Initializes the API object.
Arguments:
key -- your API key
"""
self.api_key = key
self.base_url = 'http://www.shodanhq.com/api/'
self.dataloss = self.DatalossDb(self)
self.exploits = self.Exploits(self)
self.exploitdb = self.ExploitDb(self)
self.msf = self.Msf(self)
def _request(self, function, params):
"""General-purpose function to create web requests to SHODAN.
Arguments:
function -- name of the function you want to execute
params -- dictionary of parameters for the function
Returns
A JSON string containing the function's results.
"""
# Add the API key parameter automatically
params['key'] = self.api_key
# Send the request
data = urlopen(self.base_url + function + '?' + urlencode(params)).read()
# Parse the text into JSON
data = loads(data)
# Raise an exception if an error occurred
if data.get('error', None):
raise WebAPIError(data['error'])
# Return the data
return data
def fingerprint(self, banner):
"""Determine the software based on the banner.
Arguments:
banner - HTTP banner
Returns:
A list of software that matched the given banner.
"""
return self._request('fingerprint', {'banner': banner})
def host(self, ip):
"""Get all available information on an IP.
Arguments:
ip -- IP of the computer
Returns:
All available information SHODAN has on the given IP,
subject to API key restrictions.
"""
return self._request('host', {'ip': ip})
def search(self, query):
"""Search the SHODAN database.
Arguments:
query -- search query; identical syntax to the website
Returns:
A dictionary with 3 main items: matches, countries and total.
Visit the website for more detailed information.
"""
return self._request('search', {'q': query})
| gpl-2.0 |
mSenyor/kivy | kivy/input/providers/wm_common.py | 57 | 1439 | '''
Common definitions for a Windows provider
=========================================
This file provides common definitions for constants used by WM_Touch / WM_Pen.
'''
WM_MOUSEFIRST = 512
WM_MOUSEMOVE = 512
WM_LBUTTONDOWN = 513
WM_LBUTTONUP = 514
WM_LBUTTONDBLCLK = 515
WM_RBUTTONDOWN = 516
WM_RBUTTONUP = 517
WM_RBUTTONDBLCLK = 518
WM_MBUTTONDOWN = 519
WM_MBUTTONUP = 520
WM_MBUTTONDBLCLK = 521
WM_MOUSEWHEEL = 522
WM_MOUSELAST = 522
WM_TOUCH = 576
TOUCHEVENTF_MOVE = 1
TOUCHEVENTF_DOWN = 2
TOUCHEVENTF_UP = 4
PEN_OR_TOUCH_SIGNATURE = 0xFF515700
PEN_OR_TOUCH_MASK = 0xFFFFFF00
PEN_EVENT_TOUCH_MASK = 0x80
SM_CYCAPTION = 4
WM_TABLET_QUERYSYSTEMGESTURE = 0x000002CC
TABLET_DISABLE_PRESSANDHOLD = 0x00000001
TABLET_DISABLE_PENTAPFEEDBACK = 0x00000008
TABLET_DISABLE_PENBARRELFEEDBACK = 0x00000010
TABLET_DISABLE_TOUCHUIFORCEON = 0x00000100
TABLET_DISABLE_TOUCHUIFORCEOFF = 0x00000200
TABLET_DISABLE_TOUCHSWITCH = 0x00008000
TABLET_DISABLE_FLICKS = 0x00010000
TABLET_ENABLE_FLICKSONCONTEXT = 0x00020000
TABLET_ENABLE_FLICKLEARNINGMODE = 0x00040000
TABLET_DISABLE_SMOOTHSCROLLING = 0x00080000
TABLET_DISABLE_FLICKFALLBACKKEYS = 0x00100000
GWL_WNDPROC = -4
QUERYSYSTEMGESTURE_WNDPROC = (
TABLET_DISABLE_PRESSANDHOLD |
TABLET_DISABLE_PENTAPFEEDBACK |
TABLET_DISABLE_PENBARRELFEEDBACK |
TABLET_DISABLE_SMOOTHSCROLLING |
TABLET_DISABLE_FLICKFALLBACKKEYS |
TABLET_DISABLE_TOUCHSWITCH |
TABLET_DISABLE_FLICKS)
| mit |
aselle/tensorflow | tensorflow/python/framework/graph_to_function_def.py | 48 | 6943 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Utility to convert a Graph to a FunctionDef."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.core.framework import function_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import op_def_registry
def _make_argname_from_tensor_name(name):
return re.sub(":0$", "", name).replace(":", "_o")
def _tensor_to_argdef(t, name=None, used_names=None):
"""Convert tensor t to an argdef, with a specified name or a unique name."""
arg = op_def_pb2.OpDef.ArgDef()
if name is None:
arg.name = _make_argname_from_tensor_name(t.name)
if used_names is not None:
if arg.name in used_names:
i = 0
while True:
new_name = "%s_U%d" % (arg.name, i)
if new_name not in used_names:
arg.name = new_name
break
i += 1
used_names.add(arg.name)
else:
arg.name = name
arg.type = t.dtype.as_datatype_enum
return arg
def _is_in_placeholders(op, func_arg_placeholders):
"""Checks whether any output of this op is in func_arg_placeholders."""
return op.values() and any(x.name in func_arg_placeholders
for x in op.values())
def _get_node_def(op):
return op.node_def # pylint: disable=protected-access
def _get_op_def(op):
return op.op_def or op_def_registry.get_registered_ops()[op.type]
def _create_input_dict(function_graph,
func_arg_placeholders,
initial_value=None):
"""Create a mapping from graph tensor names to function tensor names."""
if initial_value is None:
input_dict = {}
else:
input_dict = dict(initial_value)
for op in function_graph.get_operations():
if _is_in_placeholders(op, func_arg_placeholders):
input_dict[op.name] = op.name
else:
op_def = _get_op_def(op)
attrs = _get_node_def(op).attr
o = 0
for arg_def in op_def.output_arg:
if arg_def.number_attr:
num = attrs[arg_def.number_attr].i
elif arg_def.type_list_attr:
num = len(attrs[arg_def.type_list_attr].list.type)
else:
num = 1
for i in range(num):
result = "%s:%s:%d" % (op.name, arg_def.name, i)
input_dict[op.values()[o].name] = result
if o == 0:
input_dict[op.name] = result
o += 1
return input_dict
def _add_op_node(op, func, input_dict):
"""Converts an op to a function def node and add it to `func`."""
# Add an entry in func.node_def
# Note that extend() makes a copy in this case, see:
# https://developers.google.com/protocol-buffers/docs/reference/python-generated#repeated-message-fields
func.node_def.extend([_get_node_def(op)])
node_def = func.node_def[-1]
for i in range(len(node_def.input)):
if not node_def.input[i].startswith("^"):
assert node_def.input[i] in input_dict, ("%s missing from %s" %
(node_def.input[i],
input_dict.items()))
node_def.input[i] = input_dict[node_def.input[i]]
# The function is stateful if any of its operations are stateful.
# NOTE(mrry): The "Const" node typically does not have an `OpDef` associated
# with it, so we assume any nodes without an `OpDef` are stateless.
# TODO(skyewm): Remove the `is not None` test after we transition to the C
# API.
if op.op_def is not None and op.op_def.is_stateful:
func.signature.is_stateful = True
def graph_to_function_def(graph, operations, inputs, outputs, out_names=None):
"""Returns `graph` as a `FunctionDef` protocol buffer.
This method creates a [`FunctionDef`](
https://www.tensorflow.org/code/tensorflow/core/framework/function.proto)
protocol buffer that contains all the ops in `operations`. The
operations become the body of the function.
The arguments `inputs` and `outputs` will be listed as the inputs
and outputs tensors of the function. They must be lists of
tensors present in the graph. The lists can optionally be empty.
Args:
graph: Graph.
operations: the operations to put in the function. Must be a subset of
the operations in the graph.
inputs: List of tensors. Inputs to the function.
outputs: List of tensors. Outputs of the function.
out_names: Optional list of string names for the outputs.
Returns:
A FunctionDef protocol buffer.
Raises:
ValueError: if out_names is specified and the wrong length.
"""
func = function_pb2.FunctionDef()
func.signature.name = "_"
used_names = set()
func.signature.input_arg.extend(
[_tensor_to_argdef(i, used_names=used_names) for i in inputs])
# Initializes the input map with all placeholder input tensors.
initial_dict = {}
for o, m in zip(inputs, func.signature.input_arg):
initial_dict[o.name] = m.name
if out_names is None:
used_names = set()
func.signature.output_arg.extend(
[_tensor_to_argdef(o, used_names=used_names) for o in outputs])
elif len(outputs) != len(out_names):
raise errors_impl.InvalidArgumentError(
None, None,
"output names must be either empty or equal in size to outputs. "
"output names size = %d outputs size = %d" %
(len(out_names), len(outputs)))
elif len(out_names) != len(set(out_names)):
raise ValueError(
"Must not have duplicates in out_names: %s" % ", ".join(out_names))
else:
func.signature.output_arg.extend(
[_tensor_to_argdef(o, name=n) for o, n in zip(outputs, out_names)])
func_arg_placeholders = set([i.name for i in inputs])
input_dict = _create_input_dict(graph, func_arg_placeholders,
initial_value=initial_dict)
for op in operations:
if _is_in_placeholders(op, func_arg_placeholders):
continue
_add_op_node(op, func, input_dict)
if out_names is None:
for index, o in enumerate(outputs):
k = func.signature.output_arg[index].name
func.ret[k] = input_dict[o.name]
else:
for o, n in zip(outputs, out_names):
func.ret[n] = input_dict[o.name]
return func
| apache-2.0 |
nvoron23/statsmodels | statsmodels/examples/ex_generic_mle.py | 32 | 16462 |
from __future__ import print_function
import numpy as np
from scipy import stats
import statsmodels.api as sm
from statsmodels.base.model import GenericLikelihoodModel
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
# in this dir
probit_mod = sm.Probit(data.endog, data.exog)
probit_res = probit_mod.fit()
loglike = probit_mod.loglike
score = probit_mod.score
mod = GenericLikelihoodModel(data.endog, data.exog*2, loglike, score)
res = mod.fit(method="nm", maxiter = 500)
def probitloglike(params, endog, exog):
"""
Log likelihood for the probit
"""
q = 2*endog - 1
X = exog
return np.add.reduce(stats.norm.logcdf(q*np.dot(X,params)))
mod = GenericLikelihoodModel(data.endog, data.exog, loglike=probitloglike)
res = mod.fit(method="nm", fargs=(data.endog,data.exog), maxiter=500)
print(res)
#np.allclose(res.params, probit_res.params)
print(res.params, probit_res.params)
#datal = sm.datasets.longley.load()
datal = sm.datasets.ccard.load()
datal.exog = sm.add_constant(datal.exog, prepend=False)
# Instance of GenericLikelihood model doesn't work directly, because loglike
# cannot get access to data in self.endog, self.exog
nobs = 5000
rvs = np.random.randn(nobs,6)
datal.exog = rvs[:,:-1]
datal.exog = sm.add_constant(datal.exog, prepend=False)
datal.endog = 1 + rvs.sum(1)
show_error = False
show_error2 = 1#False
if show_error:
def loglike_norm_xb(self, params):
beta = params[:-1]
sigma = params[-1]
xb = np.dot(self.exog, beta)
return stats.norm.logpdf(self.endog, loc=xb, scale=sigma)
mod_norm = GenericLikelihoodModel(datal.endog, datal.exog, loglike_norm_xb)
res_norm = mod_norm.fit(method="nm", maxiter = 500)
print(res_norm.params)
if show_error2:
def loglike_norm_xb(params, endog, exog):
beta = params[:-1]
sigma = params[-1]
#print exog.shape, beta.shape
xb = np.dot(exog, beta)
#print xb.shape, stats.norm.logpdf(endog, loc=xb, scale=sigma).shape
return stats.norm.logpdf(endog, loc=xb, scale=sigma).sum()
mod_norm = GenericLikelihoodModel(datal.endog, datal.exog, loglike_norm_xb)
res_norm = mod_norm.fit(start_params=np.ones(datal.exog.shape[1]+1),
method="nm", maxiter = 5000,
fargs=(datal.endog, datal.exog))
print(res_norm.params)
class MygMLE(GenericLikelihoodModel):
# just for testing
def loglike(self, params):
beta = params[:-1]
sigma = params[-1]
xb = np.dot(self.exog, beta)
return stats.norm.logpdf(self.endog, loc=xb, scale=sigma).sum()
def loglikeobs(self, params):
beta = params[:-1]
sigma = params[-1]
xb = np.dot(self.exog, beta)
return stats.norm.logpdf(self.endog, loc=xb, scale=sigma)
mod_norm2 = MygMLE(datal.endog, datal.exog)
#res_norm = mod_norm.fit(start_params=np.ones(datal.exog.shape[1]+1), method="nm", maxiter = 500)
res_norm2 = mod_norm2.fit(start_params=[1.]*datal.exog.shape[1]+[1], method="nm", maxiter = 500)
print(res_norm2.params)
res2 = sm.OLS(datal.endog, datal.exog).fit()
start_params = np.hstack((res2.params, np.sqrt(res2.mse_resid)))
res_norm3 = mod_norm2.fit(start_params=start_params, method="nm", maxiter = 500,
retall=0)
print(start_params)
print(res_norm3.params)
print(res2.bse)
#print res_norm3.bse # not available
print('llf', res2.llf, res_norm3.llf)
bse = np.sqrt(np.diag(np.linalg.inv(res_norm3.model.hessian(res_norm3.params))))
res_norm3.model.score(res_norm3.params)
#fprime in fit option cannot be overwritten, set to None, when score is defined
# exception is fixed, but I don't think score was supposed to be called
'''
>>> mod_norm2.fit(start_params=start_params, method="bfgs", fprime=None, maxiter
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\s
tatsmodels\model.py", line 316, in fit
disp=disp, retall=retall, callback=callback)
File "C:\Josef\_progs\Subversion\scipy-trunk_after\trunk\dist\scipy-0.9.0.dev6
579.win32\Programs\Python25\Lib\site-packages\scipy\optimize\optimize.py", line
710, in fmin_bfgs
gfk = myfprime(x0)
File "C:\Josef\_progs\Subversion\scipy-trunk_after\trunk\dist\scipy-0.9.0.dev6
579.win32\Programs\Python25\Lib\site-packages\scipy\optimize\optimize.py", line
103, in function_wrapper
return function(x, *args)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\s
tatsmodels\model.py", line 240, in <lambda>
score = lambda params: -self.score(params)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\s
tatsmodels\model.py", line 480, in score
return approx_fprime1(params, self.nloglike)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\s
tatsmodels\sandbox\regression\numdiff.py", line 81, in approx_fprime1
nobs = np.size(f0) #len(f0)
TypeError: object of type 'numpy.float64' has no len()
'''
res_bfgs = mod_norm2.fit(start_params=start_params, method="bfgs", fprime=None,
maxiter = 500, retall=0)
from statsmodels.tools.numdiff import approx_fprime, approx_hess
hb=-approx_hess(res_norm3.params, mod_norm2.loglike, epsilon=-1e-4)
hf=-approx_hess(res_norm3.params, mod_norm2.loglike, epsilon=1e-4)
hh = (hf+hb)/2.
print(np.linalg.eigh(hh))
grad = -approx_fprime(res_norm3.params, mod_norm2.loglike, epsilon=-1e-4)
print(grad)
gradb = -approx_fprime(res_norm3.params, mod_norm2.loglike, epsilon=-1e-4)
gradf = -approx_fprime(res_norm3.params, mod_norm2.loglike, epsilon=1e-4)
print((gradb+gradf)/2.)
print(res_norm3.model.score(res_norm3.params))
print(res_norm3.model.score(start_params))
mod_norm2.loglike(start_params/2.)
print(np.linalg.inv(-1*mod_norm2.hessian(res_norm3.params)))
print(np.sqrt(np.diag(res_bfgs.cov_params())))
print(res_norm3.bse)
print("MLE - OLS parameter estimates")
print(res_norm3.params[:-1] - res2.params)
print("bse diff in percent")
print((res_norm3.bse[:-1] / res2.bse)*100. - 100)
'''
C:\Programs\Python25\lib\site-packages\matplotlib-0.99.1-py2.5-win32.egg\matplotlib\rcsetup.py:117: UserWarning: rcParams key "numerix" is obsolete and has no effect;
please delete it from your matplotlibrc file
warnings.warn('rcParams key "numerix" is obsolete and has no effect;\n'
Optimization terminated successfully.
Current function value: 12.818804
Iterations 6
Optimization terminated successfully.
Current function value: 12.818804
Iterations: 439
Function evaluations: 735
Optimization terminated successfully.
Current function value: 12.818804
Iterations: 439
Function evaluations: 735
<statsmodels.model.LikelihoodModelResults object at 0x02131290>
[ 1.6258006 0.05172931 1.42632252 -7.45229732] [ 1.62581004 0.05172895 1.42633234 -7.45231965]
Warning: Maximum number of function evaluations has been exceeded.
[ -1.18109149 246.94438535 -16.21235536 24.05282629 -324.80867176
274.07378453]
Warning: Maximum number of iterations has been exceeded
[ 17.57107 -149.87528787 19.89079376 -72.49810777 -50.06067953
306.14170418]
Optimization terminated successfully.
Current function value: 506.488765
Iterations: 339
Function evaluations: 550
[ -3.08181404 234.34702702 -14.99684418 27.94090839 -237.1465136
284.75079529]
[ -3.08181304 234.34701361 -14.99684381 27.94088692 -237.14649571
274.6857294 ]
[ 5.51471653 80.36595035 7.46933695 82.92232357 199.35166485]
llf -506.488764864 -506.488764864
Optimization terminated successfully.
Current function value: 506.488765
Iterations: 9
Function evaluations: 13
Gradient evaluations: 13
(array([ 2.41772580e-05, 1.62492628e-04, 2.79438138e-04,
1.90996240e-03, 2.07117946e-01, 1.28747174e+00]), array([[ 1.52225754e-02, 2.01838216e-02, 6.90127235e-02,
-2.57002471e-04, -5.25941060e-01, -8.47339404e-01],
[ 2.39797491e-01, -2.32325602e-01, -9.36235262e-01,
3.02434938e-03, 3.95614029e-02, -1.02035585e-01],
[ -2.11381471e-02, 3.01074776e-02, 7.97208277e-02,
-2.94955832e-04, 8.49402362e-01, -5.20391053e-01],
[ -1.55821981e-01, -9.66926643e-01, 2.01517298e-01,
1.52397702e-03, 4.13805882e-03, -1.19878714e-02],
[ -9.57881586e-01, 9.87911166e-02, -2.67819451e-01,
1.55192932e-03, -1.78717579e-02, -2.55757014e-02],
[ -9.96486655e-04, -2.03697290e-03, -2.98130314e-03,
-9.99992985e-01, -1.71500426e-05, 4.70854949e-06]]))
[[ -4.91007768e-05 -7.28732630e-07 -2.51941401e-05 -2.50111043e-08
-4.77484718e-08 -9.72022463e-08]]
[[ -1.64845915e-08 -2.87059265e-08 -2.88764568e-07 -6.82121026e-09
2.84217094e-10 -1.70530257e-09]]
[ -4.90678076e-05 -6.71320777e-07 -2.46166110e-05 -1.13686838e-08
-4.83169060e-08 -9.37916411e-08]
[ -4.56753924e-05 -6.50857146e-07 -2.31756303e-05 -1.70530257e-08
-4.43378667e-08 -1.75592936e-02]
[[ 2.99386348e+01 -1.24442928e+02 9.67254672e+00 -1.58968536e+02
-5.91960010e+02 -2.48738183e+00]
[ -1.24442928e+02 5.62972166e+03 -5.00079203e+02 -7.13057475e+02
-7.82440674e+03 -1.05126925e+01]
[ 9.67254672e+00 -5.00079203e+02 4.87472259e+01 3.37373299e+00
6.96960872e+02 7.69866589e-01]
[ -1.58968536e+02 -7.13057475e+02 3.37373299e+00 6.82417837e+03
4.84485862e+03 3.21440021e+01]
[ -5.91960010e+02 -7.82440674e+03 6.96960872e+02 4.84485862e+03
3.43753691e+04 9.37524459e+01]
[ -2.48738183e+00 -1.05126925e+01 7.69866589e-01 3.21440021e+01
9.37524459e+01 5.23915258e+02]]
>>> res_norm3.bse
array([ 5.47162086, 75.03147114, 6.98192136, 82.60858536,
185.40595756, 22.88919522])
>>> print res_norm3.model.score(res_norm3.params)
[ -4.90678076e-05 -6.71320777e-07 -2.46166110e-05 -1.13686838e-08
-4.83169060e-08 -9.37916411e-08]
>>> print res_norm3.model.score(start_params)
[ -4.56753924e-05 -6.50857146e-07 -2.31756303e-05 -1.70530257e-08
-4.43378667e-08 -1.75592936e-02]
>>> mod_norm2.loglike(start_params/2.)
-598.56178102781314
>>> print np.linalg.inv(-1*mod_norm2.hessian(res_norm3.params))
[[ 2.99386348e+01 -1.24442928e+02 9.67254672e+00 -1.58968536e+02
-5.91960010e+02 -2.48738183e+00]
[ -1.24442928e+02 5.62972166e+03 -5.00079203e+02 -7.13057475e+02
-7.82440674e+03 -1.05126925e+01]
[ 9.67254672e+00 -5.00079203e+02 4.87472259e+01 3.37373299e+00
6.96960872e+02 7.69866589e-01]
[ -1.58968536e+02 -7.13057475e+02 3.37373299e+00 6.82417837e+03
4.84485862e+03 3.21440021e+01]
[ -5.91960010e+02 -7.82440674e+03 6.96960872e+02 4.84485862e+03
3.43753691e+04 9.37524459e+01]
[ -2.48738183e+00 -1.05126925e+01 7.69866589e-01 3.21440021e+01
9.37524459e+01 5.23915258e+02]]
>>> print np.sqrt(np.diag(res_bfgs.cov_params()))
[ 5.10032831 74.34988912 6.96522122 76.7091604 169.8117832
22.91695494]
>>> print res_norm3.bse
[ 5.47162086 75.03147114 6.98192136 82.60858536 185.40595756
22.88919522]
>>> res_norm3.conf_int
<bound method LikelihoodModelResults.conf_int of <statsmodels.model.LikelihoodModelResults object at 0x021317F0>>
>>> res_norm3.conf_int()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\model.py", line 993, in conf_int
lower = self.params - dist.ppf(1-alpha/2,self.model.df_resid) *\
AttributeError: 'MygMLE' object has no attribute 'df_resid'
>>> res_norm3.params
array([ -3.08181304, 234.34701361, -14.99684381, 27.94088692,
-237.14649571, 274.6857294 ])
>>> res2.params
array([ -3.08181404, 234.34702702, -14.99684418, 27.94090839,
-237.1465136 ])
>>>
>>> res_norm3.params - res2.params
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: shape mismatch: objects cannot be broadcast to a single shape
>>> res_norm3.params[:-1] - res2.params
array([ 9.96859735e-07, -1.34122981e-05, 3.72278400e-07,
-2.14645839e-05, 1.78919019e-05])
>>>
>>> res_norm3.bse[:-1] - res2.bse
array([ -0.04309567, -5.33447922, -0.48741559, -0.31373822, -13.94570729])
>>> (res_norm3.bse[:-1] / res2.bse) - 1
array([-0.00781467, -0.06637735, -0.06525554, -0.00378352, -0.06995531])
>>> (res_norm3.bse[:-1] / res2.bse)*100. - 100
array([-0.7814667 , -6.6377355 , -6.52555369, -0.37835193, -6.99553089])
>>> np.sqrt(np.diag(np.linalg.inv(res_norm3.model.hessian(res_bfgs.params))))
array([ NaN, NaN, NaN, NaN, NaN, NaN])
>>> np.sqrt(np.diag(np.linalg.inv(-res_norm3.model.hessian(res_bfgs.params))))
array([ 5.10032831, 74.34988912, 6.96522122, 76.7091604 ,
169.8117832 , 22.91695494])
>>> res_norm3.bse
array([ 5.47162086, 75.03147114, 6.98192136, 82.60858536,
185.40595756, 22.88919522])
>>> res2.bse
array([ 5.51471653, 80.36595035, 7.46933695, 82.92232357,
199.35166485])
>>>
>>> bse_bfgs = np.sqrt(np.diag(np.linalg.inv(-res_norm3.model.hessian(res_bfgs.params))))
>>> (bse_bfgs[:-1] / res2.bse)*100. - 100
array([ -7.51422527, -7.4858335 , -6.74913633, -7.49275094, -14.8179759 ])
>>> hb=-approx_hess(res_bfgs.params, mod_norm2.loglike, epsilon=-1e-4)
>>> hf=-approx_hess(res_bfgs.params, mod_norm2.loglike, epsilon=1e-4)
>>> hh = (hf+hb)/2.
>>> bse_bfgs = np.sqrt(np.diag(np.linalg.inv(-hh)))
>>> bse_bfgs
array([ NaN, NaN, NaN, NaN, NaN, NaN])
>>> bse_bfgs = np.sqrt(np.diag(np.linalg.inv(hh)))
>>> np.diag(hh)
array([ 9.81680159e-01, 1.39920076e-02, 4.98101826e-01,
3.60955710e-04, 9.57811608e-04, 1.90709670e-03])
>>> np.diag(np.inv(hh))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'module' object has no attribute 'inv'
>>> np.diag(np.linalg.inv(hh))
array([ 2.64875153e+01, 5.91578496e+03, 5.13279911e+01,
6.11533345e+03, 3.33775960e+04, 5.24357391e+02])
>>> res2.bse**2
array([ 3.04120984e+01, 6.45868598e+03, 5.57909945e+01,
6.87611175e+03, 3.97410863e+04])
>>> bse_bfgs
array([ 5.14660231, 76.91414015, 7.1643556 , 78.20059751,
182.69536402, 22.89885131])
>>> bse_bfgs - res_norm3.bse
array([-0.32501855, 1.88266901, 0.18243424, -4.40798785, -2.71059354,
0.00965609])
>>> (bse_bfgs[:-1] / res2.bse)*100. - 100
array([-6.67512508, -4.29511526, -4.0831115 , -5.69415552, -8.35523538])
>>> (res_norm3.bse[:-1] / res2.bse)*100. - 100
array([-0.7814667 , -6.6377355 , -6.52555369, -0.37835193, -6.99553089])
>>> (bse_bfgs / res_norm3.bse)*100. - 100
array([-5.94007812, 2.50917247, 2.61295176, -5.33599242, -1.46197759,
0.04218624])
>>> bse_bfgs
array([ 5.14660231, 76.91414015, 7.1643556 , 78.20059751,
182.69536402, 22.89885131])
>>> res_norm3.bse
array([ 5.47162086, 75.03147114, 6.98192136, 82.60858536,
185.40595756, 22.88919522])
>>> res2.bse
array([ 5.51471653, 80.36595035, 7.46933695, 82.92232357,
199.35166485])
>>> dir(res_bfgs)
['__class__', '__delattr__', '__dict__', '__doc__', '__getattribute__', '__hash__', '__init__', '__module__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__str__', '__weakref__', 'bse', 'conf_int', 'cov_params', 'f_test', 'initialize', 'llf', 'mle_retvals', 'mle_settings', 'model', 'normalized_cov_params', 'params', 'scale', 't', 't_test']
>>> res_bfgs.scale
1.0
>>> res2.scale
81083.015420213851
>>> res2.mse_resid
81083.015420213851
>>> print np.sqrt(np.diag(np.linalg.inv(-1*mod_norm2.hessian(res_bfgs.params))))
[ 5.10032831 74.34988912 6.96522122 76.7091604 169.8117832
22.91695494]
>>> print np.sqrt(np.diag(np.linalg.inv(-1*res_bfgs.model.hessian(res_bfgs.params))))
[ 5.10032831 74.34988912 6.96522122 76.7091604 169.8117832
22.91695494]
Is scale a misnomer, actually scale squared, i.e. variance of error term ?
'''
print(res_norm3.model.score_obs(res_norm3.params).shape)
jac = res_norm3.model.score_obs(res_norm3.params)
print(np.sqrt(np.diag(np.dot(jac.T, jac)))/start_params)
jac2 = res_norm3.model.score_obs(res_norm3.params, centered=True)
print(np.sqrt(np.diag(np.linalg.inv(np.dot(jac.T, jac)))))
print(res_norm3.bse)
print(res2.bse)
| bsd-3-clause |
endlessm/chromium-browser | native_client/tests/testutils.py | 9 | 1150 | # Copyright 2009 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import shutil
import subprocess
import tempfile
import unittest
# From http://lackingrhoticity.blogspot.com/2008/11/tempdirtestcase-python-unittest-helper.html
class TempDirTestCase(unittest.TestCase):
def setUp(self):
self._on_teardown = []
def make_temp_dir(self):
temp_dir = tempfile.mkdtemp(prefix="tmp-%s-" % self.__class__.__name__)
def tear_down():
shutil.rmtree(temp_dir)
self._on_teardown.append(tear_down)
return temp_dir
def tearDown(self):
for func in reversed(self._on_teardown):
func()
def write_file(filename, data):
fh = open(filename, "w")
try:
fh.write(data)
finally:
fh.close()
def read_file(filename):
fh = open(filename, "r")
try:
return fh.read()
finally:
fh.close()
# TODO: use subprocess.check_call when we have Python 2.5 on Windows.
def check_call(*args, **kwargs):
rc = subprocess.call(*args, **kwargs)
if rc != 0:
raise Exception("Failed with return code %i" % rc)
| bsd-3-clause |
jjs0sbw/CSPLN | apps/scaffolding/linux/web2py/gluon/contrib/login_methods/dropbox_account.py | 42 | 4563 | #!/usr/bin/env python
# coding: utf8
"""
Dropbox Authentication for web2py
Developed by Massimo Di Pierro (2012)
Same License as Web2py License
"""
# mind here session is dropbox session, not current.session
import os
import re
import urllib
from dropbox import client, rest, session
from gluon import *
from gluon.tools import fetch
from gluon.storage import Storage
import gluon.contrib.simplejson as json
class DropboxAccount(object):
"""
from gluon.contrib.login_methods.dropbox_account import DropboxAccount
auth.settings.actions_disabled=['register','change_password',
'request_reset_password']
auth.settings.login_form = DropboxAccount(request,
key="...",
secret="...",
access_type="...",
login_url = "http://localhost:8000/%s/default/user/login" % request.application)
when logged in
client = auth.settings.login_form.client
"""
def __init__(self,
request,
key="",
secret="",
access_type="app_folder",
login_url="",
on_login_failure=None,
):
self.request = request
self.key = key
self.secret = secret
self.access_type = access_type
self.login_url = login_url
self.on_login_failure = on_login_failure
self.sess = session.DropboxSession(
self.key, self.secret, self.access_type)
def get_token(self):
if not current.session.dropbox_access_token:
request_token = current.session.dropbox_request_token
self.sess.set_request_token(request_token[0], request_token[1])
access_token = self.sess.obtain_access_token(self.sess.token)
current.session.dropbox_access_token = \
(access_token.key, access_token.secret)
else:
access_token = current.session.dropbox_access_token
self.sess.set_token(access_token[0], access_token[1])
def get_user(self):
if not current.session.dropbox_request_token:
return None
self.get_token()
user = Storage()
self.client = client.DropboxClient(self.sess)
data = self.client.account_info()
display_name = data.get('display_name', '').split(' ', 1)
user = dict(email=data.get('email', None),
first_name=display_name[0],
last_name=display_name[-1],
registration_id=data.get('uid', None))
if not user['registration_id'] and self.on_login_failure:
redirect(self.on_login_failure)
return user
def login_form(self):
request_token = self.sess.obtain_request_token()
current.session.dropbox_request_token = \
(request_token.key, request_token.secret)
dropbox_url = self.sess.build_authorize_url(request_token,
self.login_url)
redirect(dropbox_url)
form = IFRAME(_src=dropbox_url,
_scrolling="no",
_frameborder="no",
_style="width:400px;height:240px;")
return form
def logout_url(self, next="/"):
self.sess.unlink()
current.session.auth = None
return next
def get_client(self):
self.get_token()
self.client = client.DropboxClient(self.sess)
def put(self, filename, file):
if not hasattr(self,'client'): self.get_client()
return self.client.put_file(filename, file)['bytes']
def get(self, filename):
if not hasattr(self,'client'): self.get_client()
return self.client.get_file(filename)
def dir(self, path):
if not hasattr(self,'client'): self.get_client()
return self.client.metadata(path)
def use_dropbox(auth, filename='private/dropbox.key', **kwargs):
path = os.path.join(current.request.folder, filename)
if os.path.exists(path):
request = current.request
key, secret, access_type = open(path, 'r').read().strip().split(':')
host = current.request.env.http_host
login_url = "http://%s/%s/default/user/login" % \
(host, request.application)
auth.settings.actions_disabled = \
['register', 'change_password', 'request_reset_password']
auth.settings.login_form = DropboxAccount(
request, key=key, secret=secret, access_type=access_type,
login_url=login_url, **kwargs)
| gpl-3.0 |
baylee-d/osf.io | addons/github/tests/test_models.py | 6 | 14682 | # -*- coding: utf-8 -*-
import mock
import pytest
import unittest
from json import dumps
from addons.base.tests.models import (OAuthAddonNodeSettingsTestSuiteMixin,
OAuthAddonUserSettingTestSuiteMixin)
from addons.github.models import NodeSettings
from addons.github.tests import factories
from osf_tests.factories import ProjectFactory, UserFactory, DraftRegistrationFactory
from nose.tools import (assert_equal, assert_false, assert_in, assert_is,
assert_not_equal, assert_not_in, assert_true)
from github3 import GitHubError
from github3.repos import Repository
from tests.base import OsfTestCase, get_default_metaschema
from framework.auth import Auth
from addons.base import exceptions
from addons.github.exceptions import NotFoundError
from .utils import create_mock_github
mock_github = create_mock_github()
pytestmark = pytest.mark.django_db
class TestNodeSettings(OAuthAddonNodeSettingsTestSuiteMixin, unittest.TestCase):
short_name = 'github'
full_name = 'GitHub'
ExternalAccountFactory = factories.GitHubAccountFactory
NodeSettingsFactory = factories.GitHubNodeSettingsFactory
NodeSettingsClass = NodeSettings
UserSettingsFactory = factories.GitHubUserSettingsFactory
## Mixin Overrides ##
def _node_settings_class_kwargs(self, node, user_settings):
return {
'user_settings': self.user_settings,
'repo': 'mock',
'user': 'abc',
'owner': self.node
}
def test_set_folder(self):
# GitHub doesn't use folderpicker, and the nodesettings model
# does not need a `set_repo` method
pass
def test_serialize_settings(self):
# GitHub's serialized_settings are a little different from
# common storage addons.
settings = self.node_settings.serialize_waterbutler_settings()
expected = {'owner': self.node_settings.user, 'repo': self.node_settings.repo}
assert_equal(settings, expected)
@mock.patch(
'addons.github.models.UserSettings.revoke_remote_oauth_access',
mock.PropertyMock()
)
def test_complete_has_auth_not_verified(self):
super(TestNodeSettings, self).test_complete_has_auth_not_verified()
@mock.patch('addons.github.api.GitHubClient.repos')
@mock.patch('addons.github.api.GitHubClient.check_authorization')
def test_to_json(self, mock_repos, mock_check_authorization):
mock_repos.return_value = {}
super(TestNodeSettings, self).test_to_json()
@mock.patch('addons.github.api.GitHubClient.repos')
@mock.patch('addons.github.api.GitHubClient.check_authorization')
def test_to_json_user_is_owner(self, mock_check_authorization, mock_repos):
mock_check_authorization.return_value = True
mock_repos.return_value = {}
result = self.node_settings.to_json(self.user)
assert_true(result['user_has_auth'])
assert_equal(result['github_user'], 'abc')
assert_true(result['is_owner'])
assert_true(result['valid_credentials'])
assert_equal(result.get('repo_names', None), [])
@mock.patch('addons.github.api.GitHubClient.repos')
@mock.patch('addons.github.api.GitHubClient.check_authorization')
def test_to_json_user_is_not_owner(self, mock_check_authorization, mock_repos):
mock_check_authorization.return_value = True
mock_repos.return_value = {}
not_owner = UserFactory()
result = self.node_settings.to_json(not_owner)
assert_false(result['user_has_auth'])
assert_equal(result['github_user'], 'abc')
assert_false(result['is_owner'])
assert_true(result['valid_credentials'])
assert_equal(result.get('repo_names', None), None)
@mock.patch('addons.github.api.GitHubClient.repos')
@mock.patch('addons.github.api.GitHubClient.check_authorization')
def test_get_folders(self, mock_check_authorization, mock_repos):
mock_repos.return_value = [Repository.from_json(dumps({'name': 'test',
'id': '12345',
'owner':
{'login': 'test name'}
}))
]
result = self.node_settings.get_folders()
assert_equal(len(result), 1)
assert_equal(result[0]['id'], '12345')
assert_equal(result[0]['name'], 'test')
assert_equal(result[0]['path'], 'test name/test')
assert_equal(result[0]['kind'], 'repo')
@mock.patch('addons.github.api.GitHubClient.repos')
@mock.patch('addons.github.api.GitHubClient.check_authorization')
def test_get_folders_not_have_auth(self, mock_repos, mock_check_authorization):
mock_repos.return_value = [Repository.from_json(dumps({'name': 'test',
'id': '12345',
'owner':
{'login': 'test name'}
}))
]
self.node_settings.user_settings = None
with pytest.raises(exceptions.InvalidAuthError):
self.node_settings.get_folders()
class TestUserSettings(OAuthAddonUserSettingTestSuiteMixin, unittest.TestCase):
short_name = 'github'
full_name = 'GitHub'
ExternalAccountFactory = factories.GitHubAccountFactory
def test_public_id(self):
assert_equal(self.user.external_accounts.first().display_name, self.user_settings.public_id)
class TestCallbacks(OsfTestCase):
def setUp(self):
super(TestCallbacks, self).setUp()
self.project = ProjectFactory()
self.consolidated_auth = Auth(self.project.creator)
self.project.creator.save()
self.non_authenticator = UserFactory()
self.non_authenticator.save()
self.project.save()
self.project.add_contributor(
contributor=self.non_authenticator,
auth=self.consolidated_auth,
)
self.project.add_addon('github', auth=self.consolidated_auth)
self.project.creator.add_addon('github')
self.external_account = factories.GitHubAccountFactory()
self.project.creator.external_accounts.add(self.external_account)
self.project.creator.save()
self.node_settings = self.project.get_addon('github')
self.user_settings = self.project.creator.get_addon('github')
self.node_settings.user_settings = self.user_settings
self.node_settings.user = 'Queen'
self.node_settings.repo = 'Sheer-Heart-Attack'
self.node_settings.external_account = self.external_account
self.node_settings.save()
self.node_settings.set_auth
self.user_settings.oauth_grants[self.project._id] = {self.external_account._id: []}
self.user_settings.save()
@mock.patch('addons.github.api.GitHubClient.repo')
def test_before_make_public(self, mock_repo):
mock_repo.side_effect = NotFoundError
result = self.node_settings.before_make_public(self.project)
assert_is(result, None)
@mock.patch('addons.github.api.GitHubClient.repo')
def test_before_page_load_osf_public_gh_public(self, mock_repo):
self.project.is_public = True
self.project.save()
mock_repo.return_value = Repository.from_json(dumps({'private': False}))
message = self.node_settings.before_page_load(self.project, self.project.creator)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_false(message)
@mock.patch('addons.github.api.GitHubClient.repo')
def test_before_page_load_osf_public_gh_private(self, mock_repo):
self.project.is_public = True
self.project.save()
mock_repo.return_value = Repository.from_json(dumps({'private': True}))
message = self.node_settings.before_page_load(self.project, self.project.creator)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_true(message)
@mock.patch('addons.github.api.GitHubClient.repo')
def test_before_page_load_osf_private_gh_public(self, mock_repo):
mock_repo.return_value = Repository.from_json(dumps({'private': False}))
message = self.node_settings.before_page_load(self.project, self.project.creator)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_true(message)
@mock.patch('addons.github.api.GitHubClient.repo')
def test_before_page_load_osf_private_gh_private(self, mock_repo):
mock_repo.return_value = Repository.from_json(dumps({'private': True}))
message = self.node_settings.before_page_load(self.project, self.project.creator)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_false(message)
def test_before_page_load_not_contributor(self):
message = self.node_settings.before_page_load(self.project, UserFactory())
assert_false(message)
def test_before_page_load_not_logged_in(self):
message = self.node_settings.before_page_load(self.project, None)
assert_false(message)
def test_before_remove_contributor_authenticator(self):
message = self.node_settings.before_remove_contributor(
self.project, self.project.creator
)
assert_true(message)
def test_before_remove_contributor_not_authenticator(self):
message = self.node_settings.before_remove_contributor(
self.project, self.non_authenticator
)
assert_false(message)
def test_after_remove_contributor_authenticator_self(self):
message = self.node_settings.after_remove_contributor(
self.project, self.project.creator, self.consolidated_auth
)
assert_equal(
self.node_settings.user_settings,
None
)
assert_true(message)
assert_not_in('You can re-authenticate', message)
def test_after_remove_contributor_authenticator_not_self(self):
auth = Auth(user=self.non_authenticator)
message = self.node_settings.after_remove_contributor(
self.project, self.project.creator, auth
)
assert_equal(
self.node_settings.user_settings,
None
)
assert_true(message)
assert_in('You can re-authenticate', message)
def test_after_remove_contributor_not_authenticator(self):
self.node_settings.after_remove_contributor(
self.project, self.non_authenticator, self.consolidated_auth
)
assert_not_equal(
self.node_settings.user_settings,
None,
)
def test_after_fork_authenticator(self):
fork = ProjectFactory()
clone = self.node_settings.after_fork(
self.project, fork, self.project.creator,
)
assert_equal(
self.node_settings.user_settings,
clone.user_settings,
)
def test_after_fork_not_authenticator(self):
fork = ProjectFactory()
clone = self.node_settings.after_fork(
self.project, fork, self.non_authenticator,
)
assert_equal(
clone.user_settings,
None,
)
def test_after_delete(self):
self.project.remove_node(Auth(user=self.project.creator))
# Ensure that changes to node settings have been saved
self.node_settings.reload()
assert_true(self.node_settings.user_settings is None)
@mock.patch('website.archiver.tasks.archive')
def test_does_not_get_copied_to_registrations(self, mock_archive):
registration = self.project.register_node(
schema=get_default_metaschema(),
auth=Auth(user=self.project.creator),
draft_registration=DraftRegistrationFactory(branched_from=self.project),
)
assert_false(registration.has_addon('github'))
class TestGithubNodeSettings(unittest.TestCase):
def setUp(self):
super(TestGithubNodeSettings, self).setUp()
self.user = UserFactory()
self.user.add_addon('github')
self.user_settings = self.user.get_addon('github')
self.external_account = factories.GitHubAccountFactory()
self.user_settings.owner.external_accounts.add(self.external_account)
self.user_settings.owner.save()
self.node_settings = factories.GitHubNodeSettingsFactory(user_settings=self.user_settings)
@mock.patch('addons.github.api.GitHubClient.delete_hook')
def test_delete_hook(self, mock_delete_hook):
self.node_settings.hook_id = 'hook'
self.node_settings.save()
args = (
self.node_settings.user,
self.node_settings.repo,
self.node_settings.hook_id,
)
res = self.node_settings.delete_hook()
assert_true(res)
mock_delete_hook.assert_called_with(*args)
@mock.patch('addons.github.api.GitHubClient.delete_hook')
def test_delete_hook_no_hook(self, mock_delete_hook):
res = self.node_settings.delete_hook()
assert_false(res)
assert_false(mock_delete_hook.called)
@mock.patch('addons.github.api.GitHubClient.delete_hook')
def test_delete_hook_not_found(self, mock_delete_hook):
self.node_settings.hook_id = 'hook'
self.node_settings.save()
mock_delete_hook.side_effect = NotFoundError
args = (
self.node_settings.user,
self.node_settings.repo,
self.node_settings.hook_id,
)
res = self.node_settings.delete_hook()
assert_false(res)
mock_delete_hook.assert_called_with(*args)
@mock.patch('addons.github.api.GitHubClient.delete_hook')
def test_delete_hook_error(self, mock_delete_hook):
self.node_settings.hook_id = 'hook'
self.node_settings.save()
mock_delete_hook.side_effect = GitHubError(mock.Mock())
args = (
self.node_settings.user,
self.node_settings.repo,
self.node_settings.hook_id,
)
res = self.node_settings.delete_hook()
assert_false(res)
mock_delete_hook.assert_called_with(*args)
| apache-2.0 |
dipakvwarade/pychef | chef/tests/test_api.py | 3 | 1062 | import os
import unittest2
from chef.api import ChefAPI
class APITestCase(unittest2.TestCase):
def load(self, path):
path = os.path.join(os.path.dirname(__file__), 'configs', path)
return ChefAPI.from_config_file(path)
def test_basic(self):
api = self.load('basic.rb')
self.assertEqual(api.url, 'http://chef:4000')
self.assertEqual(api.client, 'test_1')
def test_current_dir(self):
api = self.load('current_dir.rb')
path = os.path.join(os.path.dirname(__file__), 'configs', 'test_1')
self.assertEqual(os.path.normpath(api.client), path)
def test_env_variables(self):
try:
os.environ['_PYCHEF_TEST_'] = 'foobar'
api = self.load('env_values.rb')
self.assertEqual(api.client, 'foobar')
finally:
del os.environ['_PYCHEF_TEST_']
def test_bad_key_raises(self):
invalids = [None, '']
for item in invalids:
self.assertRaises(
ValueError, ChefAPI, 'foobar', item, 'user')
| apache-2.0 |
yencarnacion/jaikuengine | .google_appengine/lib/django-1.3/django/db/models/sql/query.py | 30 | 82774 | """
Create SQL statements for QuerySets.
The code in here encapsulates all of the SQL construction so that QuerySets
themselves do not have to (and could be backed by things other than SQL
databases). The abstraction barrier only works one way: this module has to know
all about the internals of models in order to get the information it needs.
"""
from django.utils.copycompat import deepcopy
from django.utils.tree import Node
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_unicode
from django.db import connections, DEFAULT_DB_ALIAS
from django.db.models import signals
from django.db.models.fields import FieldDoesNotExist
from django.db.models.query_utils import select_related_descend, InvalidQuery
from django.db.models.sql import aggregates as base_aggregates_module
from django.db.models.sql.constants import *
from django.db.models.sql.datastructures import EmptyResultSet, Empty, MultiJoin
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.where import (WhereNode, Constraint, EverythingNode,
ExtraWhere, AND, OR)
from django.core.exceptions import FieldError
__all__ = ['Query', 'RawQuery']
class RawQuery(object):
"""
A single raw SQL query
"""
def __init__(self, sql, using, params=None):
self.params = params or ()
self.sql = sql
self.using = using
self.cursor = None
# Mirror some properties of a normal query so that
# the compiler can be used to process results.
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.extra_select = {}
self.aggregate_select = {}
def clone(self, using):
return RawQuery(self.sql, using, params=self.params)
def convert_values(self, value, field, connection):
"""Convert the database-returned value into a type that is consistent
across database backends.
By default, this defers to the underlying backend operations, but
it can be overridden by Query classes for specific backends.
"""
return connection.ops.convert_values(value, field)
def get_columns(self):
if self.cursor is None:
self._execute_query()
converter = connections[self.using].introspection.table_name_converter
return [converter(column_meta[0])
for column_meta in self.cursor.description]
def __iter__(self):
# Always execute a new query for a new iterator.
# This could be optimized with a cache at the expense of RAM.
self._execute_query()
if not connections[self.using].features.can_use_chunked_reads:
# If the database can't use chunked reads we need to make sure we
# evaluate the entire query up front.
result = list(self.cursor)
else:
result = self.cursor
return iter(result)
def __repr__(self):
return "<RawQuery: %r>" % (self.sql % self.params)
def _execute_query(self):
self.cursor = connections[self.using].cursor()
self.cursor.execute(self.sql, self.params)
class Query(object):
"""
A single SQL query.
"""
# SQL join types. These are part of the class because their string forms
# vary from database to database and can be customised by a subclass.
INNER = 'INNER JOIN'
LOUTER = 'LEFT OUTER JOIN'
alias_prefix = 'T'
query_terms = QUERY_TERMS
aggregates_module = base_aggregates_module
compiler = 'SQLCompiler'
def __init__(self, model, where=WhereNode):
self.model = model
self.alias_refcount = {}
self.alias_map = {} # Maps alias to join information
self.table_map = {} # Maps table names to list of aliases.
self.join_map = {}
self.rev_join_map = {} # Reverse of join_map.
self.quote_cache = {}
self.default_cols = True
self.default_ordering = True
self.standard_ordering = True
self.ordering_aliases = []
self.select_fields = []
self.related_select_fields = []
self.dupe_avoidance = {}
self.used_aliases = set()
self.filter_is_sticky = False
self.included_inherited_models = {}
# SQL-related attributes
self.select = []
self.tables = [] # Aliases in the order they are created.
self.where = where()
self.where_class = where
self.group_by = None
self.having = where()
self.order_by = []
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.distinct = False
self.select_related = False
self.related_select_cols = []
# SQL aggregate-related attributes
self.aggregates = SortedDict() # Maps alias -> SQL aggregate function
self.aggregate_select_mask = None
self._aggregate_select_cache = None
# Arbitrary maximum limit for select_related. Prevents infinite
# recursion. Can be changed by the depth parameter to select_related().
self.max_depth = 5
# These are for extensions. The contents are more or less appended
# verbatim to the appropriate clause.
self.extra = SortedDict() # Maps col_alias -> (col_sql, params).
self.extra_select_mask = None
self._extra_select_cache = None
self.extra_tables = ()
self.extra_order_by = ()
# A tuple that is a set of model field names and either True, if these
# are the fields to defer, or False if these are the only fields to
# load.
self.deferred_loading = (set(), True)
def __str__(self):
"""
Returns the query as a string of SQL with the parameter values
substituted in.
Parameter values won't necessarily be quoted correctly, since that is
done by the database interface at execution time.
"""
sql, params = self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
return sql % params
def __deepcopy__(self, memo):
result = self.clone(memo=memo)
memo[id(self)] = result
return result
def __getstate__(self):
"""
Pickling support.
"""
obj_dict = self.__dict__.copy()
obj_dict['related_select_fields'] = []
obj_dict['related_select_cols'] = []
# Fields can't be pickled, so if a field list has been
# specified, we pickle the list of field names instead.
# None is also a possible value; that can pass as-is
obj_dict['select_fields'] = [
f is not None and f.name or None
for f in obj_dict['select_fields']
]
return obj_dict
def __setstate__(self, obj_dict):
"""
Unpickling support.
"""
# Rebuild list of field instances
opts = obj_dict['model']._meta
obj_dict['select_fields'] = [
name is not None and opts.get_field(name) or None
for name in obj_dict['select_fields']
]
self.__dict__.update(obj_dict)
def prepare(self):
return self
def get_compiler(self, using=None, connection=None):
if using is None and connection is None:
raise ValueError("Need either using or connection")
if using:
connection = connections[using]
# Check that the compiler will be able to execute the query
for alias, aggregate in self.aggregate_select.items():
connection.ops.check_aggregate_support(aggregate)
return connection.ops.compiler(self.compiler)(self, connection, using)
def get_meta(self):
"""
Returns the Options instance (the model._meta) from which to start
processing. Normally, this is self.model._meta, but it can be changed
by subclasses.
"""
return self.model._meta
def clone(self, klass=None, memo=None, **kwargs):
"""
Creates a copy of the current instance. The 'kwargs' parameter can be
used by clients to update attributes after copying has taken place.
"""
obj = Empty()
obj.__class__ = klass or self.__class__
obj.model = self.model
obj.alias_refcount = self.alias_refcount.copy()
obj.alias_map = self.alias_map.copy()
obj.table_map = self.table_map.copy()
obj.join_map = self.join_map.copy()
obj.rev_join_map = self.rev_join_map.copy()
obj.quote_cache = {}
obj.default_cols = self.default_cols
obj.default_ordering = self.default_ordering
obj.standard_ordering = self.standard_ordering
obj.included_inherited_models = self.included_inherited_models.copy()
obj.ordering_aliases = []
obj.select_fields = self.select_fields[:]
obj.related_select_fields = self.related_select_fields[:]
obj.dupe_avoidance = self.dupe_avoidance.copy()
obj.select = self.select[:]
obj.tables = self.tables[:]
obj.where = deepcopy(self.where, memo=memo)
obj.where_class = self.where_class
if self.group_by is None:
obj.group_by = None
else:
obj.group_by = self.group_by[:]
obj.having = deepcopy(self.having, memo=memo)
obj.order_by = self.order_by[:]
obj.low_mark, obj.high_mark = self.low_mark, self.high_mark
obj.distinct = self.distinct
obj.select_related = self.select_related
obj.related_select_cols = []
obj.aggregates = deepcopy(self.aggregates, memo=memo)
if self.aggregate_select_mask is None:
obj.aggregate_select_mask = None
else:
obj.aggregate_select_mask = self.aggregate_select_mask.copy()
# _aggregate_select_cache cannot be copied, as doing so breaks the
# (necessary) state in which both aggregates and
# _aggregate_select_cache point to the same underlying objects.
# It will get re-populated in the cloned queryset the next time it's
# used.
obj._aggregate_select_cache = None
obj.max_depth = self.max_depth
obj.extra = self.extra.copy()
if self.extra_select_mask is None:
obj.extra_select_mask = None
else:
obj.extra_select_mask = self.extra_select_mask.copy()
if self._extra_select_cache is None:
obj._extra_select_cache = None
else:
obj._extra_select_cache = self._extra_select_cache.copy()
obj.extra_tables = self.extra_tables
obj.extra_order_by = self.extra_order_by
obj.deferred_loading = deepcopy(self.deferred_loading, memo=memo)
if self.filter_is_sticky and self.used_aliases:
obj.used_aliases = self.used_aliases.copy()
else:
obj.used_aliases = set()
obj.filter_is_sticky = False
obj.__dict__.update(kwargs)
if hasattr(obj, '_setup_query'):
obj._setup_query()
return obj
def convert_values(self, value, field, connection):
"""Convert the database-returned value into a type that is consistent
across database backends.
By default, this defers to the underlying backend operations, but
it can be overridden by Query classes for specific backends.
"""
return connection.ops.convert_values(value, field)
def resolve_aggregate(self, value, aggregate, connection):
"""Resolve the value of aggregates returned by the database to
consistent (and reasonable) types.
This is required because of the predisposition of certain backends
to return Decimal and long types when they are not needed.
"""
if value is None:
if aggregate.is_ordinal:
return 0
# Return None as-is
return value
elif aggregate.is_ordinal:
# Any ordinal aggregate (e.g., count) returns an int
return int(value)
elif aggregate.is_computed:
# Any computed aggregate (e.g., avg) returns a float
return float(value)
else:
# Return value depends on the type of the field being processed.
return self.convert_values(value, aggregate.field, connection)
def get_aggregation(self, using):
"""
Returns the dictionary with the values of the existing aggregations.
"""
if not self.aggregate_select:
return {}
# If there is a group by clause, aggregating does not add useful
# information but retrieves only the first row. Aggregate
# over the subquery instead.
if self.group_by is not None:
from django.db.models.sql.subqueries import AggregateQuery
query = AggregateQuery(self.model)
obj = self.clone()
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
for alias, aggregate in self.aggregate_select.items():
if aggregate.is_summary:
query.aggregate_select[alias] = aggregate
del obj.aggregate_select[alias]
try:
query.add_subquery(obj, using)
except EmptyResultSet:
return dict(
(alias, None)
for alias in query.aggregate_select
)
else:
query = self
self.select = []
self.default_cols = False
self.extra = {}
self.remove_inherited_models()
query.clear_ordering(True)
query.clear_limits()
query.select_related = False
query.related_select_cols = []
query.related_select_fields = []
result = query.get_compiler(using).execute_sql(SINGLE)
if result is None:
result = [None for q in query.aggregate_select.items()]
return dict([
(alias, self.resolve_aggregate(val, aggregate, connection=connections[using]))
for (alias, aggregate), val
in zip(query.aggregate_select.items(), result)
])
def get_count(self, using):
"""
Performs a COUNT() query using the current filter constraints.
"""
obj = self.clone()
if len(self.select) > 1 or self.aggregate_select:
# If a select clause exists, then the query has already started to
# specify the columns that are to be returned.
# In this case, we need to use a subquery to evaluate the count.
from django.db.models.sql.subqueries import AggregateQuery
subquery = obj
subquery.clear_ordering(True)
subquery.clear_limits()
obj = AggregateQuery(obj.model)
try:
obj.add_subquery(subquery, using=using)
except EmptyResultSet:
# add_subquery evaluates the query, if it's an EmptyResultSet
# then there are can be no results, and therefore there the
# count is obviously 0
return 0
obj.add_count_column()
number = obj.get_aggregation(using=using)[None]
# Apply offset and limit constraints manually, since using LIMIT/OFFSET
# in SQL (in variants that provide them) doesn't change the COUNT
# output.
number = max(0, number - self.low_mark)
if self.high_mark is not None:
number = min(number, self.high_mark - self.low_mark)
return number
def has_results(self, using):
q = self.clone()
q.add_extra({'a': 1}, None, None, None, None, None)
q.select = []
q.select_fields = []
q.default_cols = False
q.select_related = False
q.set_extra_mask(('a',))
q.set_aggregate_mask(())
q.clear_ordering(True)
q.set_limits(high=1)
compiler = q.get_compiler(using=using)
return bool(compiler.execute_sql(SINGLE))
def combine(self, rhs, connector):
"""
Merge the 'rhs' query into the current one (with any 'rhs' effects
being applied *after* (that is, "to the right of") anything in the
current query. 'rhs' is not modified during a call to this function.
The 'connector' parameter describes how to connect filters from the
'rhs' query.
"""
assert self.model == rhs.model, \
"Cannot combine queries on two different base models."
assert self.can_filter(), \
"Cannot combine queries once a slice has been taken."
assert self.distinct == rhs.distinct, \
"Cannot combine a unique query with a non-unique query."
self.remove_inherited_models()
# Work out how to relabel the rhs aliases, if necessary.
change_map = {}
used = set()
conjunction = (connector == AND)
first = True
for alias in rhs.tables:
if not rhs.alias_refcount[alias]:
# An unused alias.
continue
promote = (rhs.alias_map[alias][JOIN_TYPE] == self.LOUTER)
new_alias = self.join(rhs.rev_join_map[alias],
(conjunction and not first), used, promote, not conjunction)
used.add(new_alias)
change_map[alias] = new_alias
first = False
# So that we don't exclude valid results in an "or" query combination,
# all joins exclusive to either the lhs or the rhs must be converted
# to an outer join.
if not conjunction:
l_tables = set(self.tables)
r_tables = set(rhs.tables)
# Update r_tables aliases.
for alias in change_map:
if alias in r_tables:
# r_tables may contain entries that have a refcount of 0
# if the query has references to a table that can be
# trimmed because only the foreign key is used.
# We only need to fix the aliases for the tables that
# actually have aliases.
if rhs.alias_refcount[alias]:
r_tables.remove(alias)
r_tables.add(change_map[alias])
# Find aliases that are exclusive to rhs or lhs.
# These are promoted to outer joins.
outer_tables = (l_tables | r_tables) - (l_tables & r_tables)
for alias in outer_tables:
# Again, some of the tables won't have aliases due to
# the trimming of unnecessary tables.
if self.alias_refcount.get(alias) or rhs.alias_refcount.get(alias):
self.promote_alias(alias, True)
# Now relabel a copy of the rhs where-clause and add it to the current
# one.
if rhs.where:
w = deepcopy(rhs.where)
w.relabel_aliases(change_map)
if not self.where:
# Since 'self' matches everything, add an explicit "include
# everything" where-constraint so that connections between the
# where clauses won't exclude valid results.
self.where.add(EverythingNode(), AND)
elif self.where:
# rhs has an empty where clause.
w = self.where_class()
w.add(EverythingNode(), AND)
else:
w = self.where_class()
self.where.add(w, connector)
# Selection columns and extra extensions are those provided by 'rhs'.
self.select = []
for col in rhs.select:
if isinstance(col, (list, tuple)):
self.select.append((change_map.get(col[0], col[0]), col[1]))
else:
item = deepcopy(col)
item.relabel_aliases(change_map)
self.select.append(item)
self.select_fields = rhs.select_fields[:]
if connector == OR:
# It would be nice to be able to handle this, but the queries don't
# really make sense (or return consistent value sets). Not worth
# the extra complexity when you can write a real query instead.
if self.extra and rhs.extra:
raise ValueError("When merging querysets using 'or', you "
"cannot have extra(select=...) on both sides.")
self.extra.update(rhs.extra)
extra_select_mask = set()
if self.extra_select_mask is not None:
extra_select_mask.update(self.extra_select_mask)
if rhs.extra_select_mask is not None:
extra_select_mask.update(rhs.extra_select_mask)
if extra_select_mask:
self.set_extra_mask(extra_select_mask)
self.extra_tables += rhs.extra_tables
# Ordering uses the 'rhs' ordering, unless it has none, in which case
# the current ordering is used.
self.order_by = rhs.order_by and rhs.order_by[:] or self.order_by
self.extra_order_by = rhs.extra_order_by or self.extra_order_by
def deferred_to_data(self, target, callback):
"""
Converts the self.deferred_loading data structure to an alternate data
structure, describing the field that *will* be loaded. This is used to
compute the columns to select from the database and also by the
QuerySet class to work out which fields are being initialised on each
model. Models that have all their fields included aren't mentioned in
the result, only those that have field restrictions in place.
The "target" parameter is the instance that is populated (in place).
The "callback" is a function that is called whenever a (model, field)
pair need to be added to "target". It accepts three parameters:
"target", and the model and list of fields being added for that model.
"""
field_names, defer = self.deferred_loading
if not field_names:
return
columns = set()
orig_opts = self.model._meta
seen = {}
must_include = {self.model: set([orig_opts.pk])}
for field_name in field_names:
parts = field_name.split(LOOKUP_SEP)
cur_model = self.model
opts = orig_opts
for name in parts[:-1]:
old_model = cur_model
source = opts.get_field_by_name(name)[0]
cur_model = opts.get_field_by_name(name)[0].rel.to
opts = cur_model._meta
# Even if we're "just passing through" this model, we must add
# both the current model's pk and the related reference field
# to the things we select.
must_include[old_model].add(source)
add_to_dict(must_include, cur_model, opts.pk)
field, model, _, _ = opts.get_field_by_name(parts[-1])
if model is None:
model = cur_model
add_to_dict(seen, model, field)
if defer:
# We need to load all fields for each model, except those that
# appear in "seen" (for all models that appear in "seen"). The only
# slight complexity here is handling fields that exist on parent
# models.
workset = {}
for model, values in seen.iteritems():
for field, m in model._meta.get_fields_with_model():
if field in values:
continue
add_to_dict(workset, m or model, field)
for model, values in must_include.iteritems():
# If we haven't included a model in workset, we don't add the
# corresponding must_include fields for that model, since an
# empty set means "include all fields". That's why there's no
# "else" branch here.
if model in workset:
workset[model].update(values)
for model, values in workset.iteritems():
callback(target, model, values)
else:
for model, values in must_include.iteritems():
if model in seen:
seen[model].update(values)
else:
# As we've passed through this model, but not explicitly
# included any fields, we have to make sure it's mentioned
# so that only the "must include" fields are pulled in.
seen[model] = values
# Now ensure that every model in the inheritance chain is mentioned
# in the parent list. Again, it must be mentioned to ensure that
# only "must include" fields are pulled in.
for model in orig_opts.get_parent_list():
if model not in seen:
seen[model] = set()
for model, values in seen.iteritems():
callback(target, model, values)
def deferred_to_columns_cb(self, target, model, fields):
"""
Callback used by deferred_to_columns(). The "target" parameter should
be a set instance.
"""
table = model._meta.db_table
if table not in target:
target[table] = set()
for field in fields:
target[table].add(field.column)
def table_alias(self, table_name, create=False):
"""
Returns a table alias for the given table_name and whether this is a
new alias or not.
If 'create' is true, a new alias is always created. Otherwise, the
most recently created alias for the table (if one exists) is reused.
"""
current = self.table_map.get(table_name)
if not create and current:
alias = current[0]
self.alias_refcount[alias] += 1
return alias, False
# Create a new alias for this table.
if current:
alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1)
current.append(alias)
else:
# The first occurence of a table uses the table name directly.
alias = table_name
self.table_map[alias] = [alias]
self.alias_refcount[alias] = 1
self.tables.append(alias)
return alias, True
def ref_alias(self, alias):
""" Increases the reference count for this alias. """
self.alias_refcount[alias] += 1
def unref_alias(self, alias):
""" Decreases the reference count for this alias. """
self.alias_refcount[alias] -= 1
def promote_alias(self, alias, unconditional=False):
"""
Promotes the join type of an alias to an outer join if it's possible
for the join to contain NULL values on the left. If 'unconditional' is
False, the join is only promoted if it is nullable, otherwise it is
always promoted.
Returns True if the join was promoted by this call.
"""
if ((unconditional or self.alias_map[alias][NULLABLE]) and
self.alias_map[alias][JOIN_TYPE] != self.LOUTER):
data = list(self.alias_map[alias])
data[JOIN_TYPE] = self.LOUTER
self.alias_map[alias] = tuple(data)
return True
return False
def promote_alias_chain(self, chain, must_promote=False):
"""
Walks along a chain of aliases, promoting the first nullable join and
any joins following that. If 'must_promote' is True, all the aliases in
the chain are promoted.
"""
for alias in chain:
if self.promote_alias(alias, must_promote):
must_promote = True
def promote_unused_aliases(self, initial_refcounts, used_aliases):
"""
Given a "before" copy of the alias_refcounts dictionary (as
'initial_refcounts') and a collection of aliases that may have been
changed or created, works out which aliases have been created since
then and which ones haven't been used and promotes all of those
aliases, plus any children of theirs in the alias tree, to outer joins.
"""
# FIXME: There's some (a lot of!) overlap with the similar OR promotion
# in add_filter(). It's not quite identical, but is very similar. So
# pulling out the common bits is something for later.
considered = {}
for alias in self.tables:
if alias not in used_aliases:
continue
if (alias not in initial_refcounts or
self.alias_refcount[alias] == initial_refcounts[alias]):
parent = self.alias_map[alias][LHS_ALIAS]
must_promote = considered.get(parent, False)
promoted = self.promote_alias(alias, must_promote)
considered[alias] = must_promote or promoted
def change_aliases(self, change_map):
"""
Changes the aliases in change_map (which maps old-alias -> new-alias),
relabelling any references to them in select columns and the where
clause.
"""
assert set(change_map.keys()).intersection(set(change_map.values())) == set()
# 1. Update references in "select" (normal columns plus aliases),
# "group by", "where" and "having".
self.where.relabel_aliases(change_map)
self.having.relabel_aliases(change_map)
for columns in [self.select, self.group_by or []]:
for pos, col in enumerate(columns):
if isinstance(col, (list, tuple)):
old_alias = col[0]
columns[pos] = (change_map.get(old_alias, old_alias), col[1])
else:
col.relabel_aliases(change_map)
for mapping in [self.aggregates]:
for key, col in mapping.items():
if isinstance(col, (list, tuple)):
old_alias = col[0]
mapping[key] = (change_map.get(old_alias, old_alias), col[1])
else:
col.relabel_aliases(change_map)
# 2. Rename the alias in the internal table/alias datastructures.
for old_alias, new_alias in change_map.iteritems():
alias_data = list(self.alias_map[old_alias])
alias_data[RHS_ALIAS] = new_alias
t = self.rev_join_map[old_alias]
data = list(self.join_map[t])
data[data.index(old_alias)] = new_alias
self.join_map[t] = tuple(data)
self.rev_join_map[new_alias] = t
del self.rev_join_map[old_alias]
self.alias_refcount[new_alias] = self.alias_refcount[old_alias]
del self.alias_refcount[old_alias]
self.alias_map[new_alias] = tuple(alias_data)
del self.alias_map[old_alias]
table_aliases = self.table_map[alias_data[TABLE_NAME]]
for pos, alias in enumerate(table_aliases):
if alias == old_alias:
table_aliases[pos] = new_alias
break
for pos, alias in enumerate(self.tables):
if alias == old_alias:
self.tables[pos] = new_alias
break
for key, alias in self.included_inherited_models.items():
if alias in change_map:
self.included_inherited_models[key] = change_map[alias]
# 3. Update any joins that refer to the old alias.
for alias, data in self.alias_map.iteritems():
lhs = data[LHS_ALIAS]
if lhs in change_map:
data = list(data)
data[LHS_ALIAS] = change_map[lhs]
self.alias_map[alias] = tuple(data)
def bump_prefix(self, exceptions=()):
"""
Changes the alias prefix to the next letter in the alphabet and
relabels all the aliases. Even tables that previously had no alias will
get an alias after this call (it's mostly used for nested queries and
the outer query will already be using the non-aliased table name).
Subclasses who create their own prefix should override this method to
produce a similar result (a new prefix and relabelled aliases).
The 'exceptions' parameter is a container that holds alias names which
should not be changed.
"""
current = ord(self.alias_prefix)
assert current < ord('Z')
prefix = chr(current + 1)
self.alias_prefix = prefix
change_map = {}
for pos, alias in enumerate(self.tables):
if alias in exceptions:
continue
new_alias = '%s%d' % (prefix, pos)
change_map[alias] = new_alias
self.tables[pos] = new_alias
self.change_aliases(change_map)
def get_initial_alias(self):
"""
Returns the first alias for this query, after increasing its reference
count.
"""
if self.tables:
alias = self.tables[0]
self.ref_alias(alias)
else:
alias = self.join((None, self.model._meta.db_table, None, None))
return alias
def count_active_tables(self):
"""
Returns the number of tables in this query with a non-zero reference
count.
"""
return len([1 for count in self.alias_refcount.itervalues() if count])
def join(self, connection, always_create=False, exclusions=(),
promote=False, outer_if_first=False, nullable=False, reuse=None):
"""
Returns an alias for the join in 'connection', either reusing an
existing alias for that join or creating a new one. 'connection' is a
tuple (lhs, table, lhs_col, col) where 'lhs' is either an existing
table alias or a table name. The join correspods to the SQL equivalent
of::
lhs.lhs_col = table.col
If 'always_create' is True and 'reuse' is None, a new alias is always
created, regardless of whether one already exists or not. If
'always_create' is True and 'reuse' is a set, an alias in 'reuse' that
matches the connection will be returned, if possible. If
'always_create' is False, the first existing alias that matches the
'connection' is returned, if any. Otherwise a new join is created.
If 'exclusions' is specified, it is something satisfying the container
protocol ("foo in exclusions" must work) and specifies a list of
aliases that should not be returned, even if they satisfy the join.
If 'promote' is True, the join type for the alias will be LOUTER (if
the alias previously existed, the join type will be promoted from INNER
to LOUTER, if necessary).
If 'outer_if_first' is True and a new join is created, it will have the
LOUTER join type. This is used when joining certain types of querysets
and Q-objects together.
If 'nullable' is True, the join can potentially involve NULL values and
is a candidate for promotion (to "left outer") when combining querysets.
"""
lhs, table, lhs_col, col = connection
if lhs in self.alias_map:
lhs_table = self.alias_map[lhs][TABLE_NAME]
else:
lhs_table = lhs
if reuse and always_create and table in self.table_map:
# Convert the 'reuse' to case to be "exclude everything but the
# reusable set, minus exclusions, for this table".
exclusions = set(self.table_map[table]).difference(reuse).union(set(exclusions))
always_create = False
t_ident = (lhs_table, table, lhs_col, col)
if not always_create:
for alias in self.join_map.get(t_ident, ()):
if alias not in exclusions:
if lhs_table and not self.alias_refcount[self.alias_map[alias][LHS_ALIAS]]:
# The LHS of this join tuple is no longer part of the
# query, so skip this possibility.
continue
if self.alias_map[alias][LHS_ALIAS] != lhs:
continue
self.ref_alias(alias)
if promote:
self.promote_alias(alias)
return alias
# No reuse is possible, so we need a new alias.
alias, _ = self.table_alias(table, True)
if not lhs:
# Not all tables need to be joined to anything. No join type
# means the later columns are ignored.
join_type = None
elif promote or outer_if_first:
join_type = self.LOUTER
else:
join_type = self.INNER
join = (table, alias, join_type, lhs, lhs_col, col, nullable)
self.alias_map[alias] = join
if t_ident in self.join_map:
self.join_map[t_ident] += (alias,)
else:
self.join_map[t_ident] = (alias,)
self.rev_join_map[alias] = t_ident
return alias
def setup_inherited_models(self):
"""
If the model that is the basis for this QuerySet inherits other models,
we need to ensure that those other models have their tables included in
the query.
We do this as a separate step so that subclasses know which
tables are going to be active in the query, without needing to compute
all the select columns (this method is called from pre_sql_setup(),
whereas column determination is a later part, and side-effect, of
as_sql()).
"""
opts = self.model._meta
root_alias = self.tables[0]
seen = {None: root_alias}
# Skip all proxy to the root proxied model
proxied_model = get_proxied_model(opts)
for field, model in opts.get_fields_with_model():
if model not in seen:
if model is proxied_model:
seen[model] = root_alias
else:
link_field = opts.get_ancestor_link(model)
seen[model] = self.join((root_alias, model._meta.db_table,
link_field.column, model._meta.pk.column))
self.included_inherited_models = seen
def remove_inherited_models(self):
"""
Undoes the effects of setup_inherited_models(). Should be called
whenever select columns (self.select) are set explicitly.
"""
for key, alias in self.included_inherited_models.items():
if key:
self.unref_alias(alias)
self.included_inherited_models = {}
def need_force_having(self, q_object):
"""
Returns whether or not all elements of this q_object need to be put
together in the HAVING clause.
"""
for child in q_object.children:
if isinstance(child, Node):
if self.need_force_having(child):
return True
else:
if child[0].split(LOOKUP_SEP)[0] in self.aggregates:
return True
return False
def add_aggregate(self, aggregate, model, alias, is_summary):
"""
Adds a single aggregate expression to the Query
"""
opts = model._meta
field_list = aggregate.lookup.split(LOOKUP_SEP)
if len(field_list) == 1 and aggregate.lookup in self.aggregates:
# Aggregate is over an annotation
field_name = field_list[0]
col = field_name
source = self.aggregates[field_name]
if not is_summary:
raise FieldError("Cannot compute %s('%s'): '%s' is an aggregate" % (
aggregate.name, field_name, field_name))
elif ((len(field_list) > 1) or
(field_list[0] not in [i.name for i in opts.fields]) or
self.group_by is None or
not is_summary):
# If:
# - the field descriptor has more than one part (foo__bar), or
# - the field descriptor is referencing an m2m/m2o field, or
# - this is a reference to a model field (possibly inherited), or
# - this is an annotation over a model field
# then we need to explore the joins that are required.
field, source, opts, join_list, last, _ = self.setup_joins(
field_list, opts, self.get_initial_alias(), False)
# Process the join chain to see if it can be trimmed
col, _, join_list = self.trim_joins(source, join_list, last, False)
# If the aggregate references a model or field that requires a join,
# those joins must be LEFT OUTER - empty join rows must be returned
# in order for zeros to be returned for those aggregates.
for column_alias in join_list:
self.promote_alias(column_alias, unconditional=True)
col = (join_list[-1], col)
else:
# The simplest cases. No joins required -
# just reference the provided column alias.
field_name = field_list[0]
source = opts.get_field(field_name)
col = field_name
# Add the aggregate to the query
aggregate.add_to_query(self, alias, col=col, source=source, is_summary=is_summary)
def add_filter(self, filter_expr, connector=AND, negate=False, trim=False,
can_reuse=None, process_extras=True, force_having=False):
"""
Add a single filter to the query. The 'filter_expr' is a pair:
(filter_string, value). E.g. ('name__contains', 'fred')
If 'negate' is True, this is an exclude() filter. It's important to
note that this method does not negate anything in the where-clause
object when inserting the filter constraints. This is because negated
filters often require multiple calls to add_filter() and the negation
should only happen once. So the caller is responsible for this (the
caller will normally be add_q(), so that as an example).
If 'trim' is True, we automatically trim the final join group (used
internally when constructing nested queries).
If 'can_reuse' is a set, we are processing a component of a
multi-component filter (e.g. filter(Q1, Q2)). In this case, 'can_reuse'
will be a set of table aliases that can be reused in this filter, even
if we would otherwise force the creation of new aliases for a join
(needed for nested Q-filters). The set is updated by this method.
If 'process_extras' is set, any extra filters returned from the table
joining process will be processed. This parameter is set to False
during the processing of extra filters to avoid infinite recursion.
"""
arg, value = filter_expr
parts = arg.split(LOOKUP_SEP)
if not parts:
raise FieldError("Cannot parse keyword query %r" % arg)
# Work out the lookup type and remove it from 'parts', if necessary.
if len(parts) == 1 or parts[-1] not in self.query_terms:
lookup_type = 'exact'
else:
lookup_type = parts.pop()
# By default, this is a WHERE clause. If an aggregate is referenced
# in the value, the filter will be promoted to a HAVING
having_clause = False
# Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
# uses of None as a query value.
if value is None:
if lookup_type != 'exact':
raise ValueError("Cannot use None as a query value")
lookup_type = 'isnull'
value = True
elif callable(value):
value = value()
elif hasattr(value, 'evaluate'):
# If value is a query expression, evaluate it
value = SQLEvaluator(value, self)
having_clause = value.contains_aggregate
if parts[0] in self.aggregates:
aggregate = self.aggregates[parts[0]]
entry = self.where_class()
entry.add((aggregate, lookup_type, value), AND)
if negate:
entry.negate()
self.having.add(entry, connector)
return
opts = self.get_meta()
alias = self.get_initial_alias()
allow_many = trim or not negate
try:
field, target, opts, join_list, last, extra_filters = self.setup_joins(
parts, opts, alias, True, allow_many, can_reuse=can_reuse,
negate=negate, process_extras=process_extras)
except MultiJoin, e:
self.split_exclude(filter_expr, LOOKUP_SEP.join(parts[:e.level]),
can_reuse)
return
table_promote = False
join_promote = False
if (lookup_type == 'isnull' and value is True and not negate and
len(join_list) > 1):
# If the comparison is against NULL, we may need to use some left
# outer joins when creating the join chain. This is only done when
# needed, as it's less efficient at the database level.
self.promote_alias_chain(join_list)
join_promote = True
# Process the join list to see if we can remove any inner joins from
# the far end (fewer tables in a query is better).
col, alias, join_list = self.trim_joins(target, join_list, last, trim)
if connector == OR:
# Some joins may need to be promoted when adding a new filter to a
# disjunction. We walk the list of new joins and where it diverges
# from any previous joins (ref count is 1 in the table list), we
# make the new additions (and any existing ones not used in the new
# join list) an outer join.
join_it = iter(join_list)
table_it = iter(self.tables)
join_it.next(), table_it.next()
unconditional = False
for join in join_it:
table = table_it.next()
# Once we hit an outer join, all subsequent joins must
# also be promoted, regardless of whether they have been
# promoted as a result of this pass through the tables.
unconditional = (unconditional or
self.alias_map[join][JOIN_TYPE] == self.LOUTER)
if join == table and self.alias_refcount[join] > 1:
# We have more than one reference to this join table.
# This means that we are dealing with two different query
# subtrees, so we don't need to do any join promotion.
continue
join_promote = join_promote or self.promote_alias(join, unconditional)
if table != join:
table_promote = self.promote_alias(table)
# We only get here if we have found a table that exists
# in the join list, but isn't on the original tables list.
# This means we've reached the point where we only have
# new tables, so we can break out of this promotion loop.
break
self.promote_alias_chain(join_it, join_promote)
self.promote_alias_chain(table_it, table_promote or join_promote)
if having_clause or force_having:
if (alias, col) not in self.group_by:
self.group_by.append((alias, col))
self.having.add((Constraint(alias, col, field), lookup_type, value),
connector)
else:
self.where.add((Constraint(alias, col, field), lookup_type, value),
connector)
if negate:
self.promote_alias_chain(join_list)
if lookup_type != 'isnull':
if len(join_list) > 1:
for alias in join_list:
if self.alias_map[alias][JOIN_TYPE] == self.LOUTER:
j_col = self.alias_map[alias][RHS_JOIN_COL]
entry = self.where_class()
entry.add(
(Constraint(alias, j_col, None), 'isnull', True),
AND
)
entry.negate()
self.where.add(entry, AND)
break
if not (lookup_type == 'in'
and not hasattr(value, 'as_sql')
and not hasattr(value, '_as_sql')
and not value) and field.null:
# Leaky abstraction artifact: We have to specifically
# exclude the "foo__in=[]" case from this handling, because
# it's short-circuited in the Where class.
# We also need to handle the case where a subquery is provided
self.where.add((Constraint(alias, col, None), 'isnull', False), AND)
if can_reuse is not None:
can_reuse.update(join_list)
if process_extras:
for filter in extra_filters:
self.add_filter(filter, negate=negate, can_reuse=can_reuse,
process_extras=False)
def add_q(self, q_object, used_aliases=None, force_having=False):
"""
Adds a Q-object to the current filter.
Can also be used to add anything that has an 'add_to_query()' method.
"""
if used_aliases is None:
used_aliases = self.used_aliases
if hasattr(q_object, 'add_to_query'):
# Complex custom objects are responsible for adding themselves.
q_object.add_to_query(self, used_aliases)
else:
if self.where and q_object.connector != AND and len(q_object) > 1:
self.where.start_subtree(AND)
subtree = True
else:
subtree = False
connector = AND
if q_object.connector == OR and not force_having:
force_having = self.need_force_having(q_object)
for child in q_object.children:
if connector == OR:
refcounts_before = self.alias_refcount.copy()
if force_having:
self.having.start_subtree(connector)
else:
self.where.start_subtree(connector)
if isinstance(child, Node):
self.add_q(child, used_aliases, force_having=force_having)
else:
self.add_filter(child, connector, q_object.negated,
can_reuse=used_aliases, force_having=force_having)
if force_having:
self.having.end_subtree()
else:
self.where.end_subtree()
if connector == OR:
# Aliases that were newly added or not used at all need to
# be promoted to outer joins if they are nullable relations.
# (they shouldn't turn the whole conditional into the empty
# set just because they don't match anything).
self.promote_unused_aliases(refcounts_before, used_aliases)
connector = q_object.connector
if q_object.negated:
self.where.negate()
if subtree:
self.where.end_subtree()
if self.filter_is_sticky:
self.used_aliases = used_aliases
def setup_joins(self, names, opts, alias, dupe_multis, allow_many=True,
allow_explicit_fk=False, can_reuse=None, negate=False,
process_extras=True):
"""
Compute the necessary table joins for the passage through the fields
given in 'names'. 'opts' is the Options class for the current model
(which gives the table we are joining to), 'alias' is the alias for the
table we are joining to. If dupe_multis is True, any many-to-many or
many-to-one joins will always create a new alias (necessary for
disjunctive filters). If can_reuse is not None, it's a list of aliases
that can be reused in these joins (nothing else can be reused in this
case). Finally, 'negate' is used in the same sense as for add_filter()
-- it indicates an exclude() filter, or something similar. It is only
passed in here so that it can be passed to a field's extra_filter() for
customised behaviour.
Returns the final field involved in the join, the target database
column (used for any 'where' constraint), the final 'opts' value and the
list of tables joined.
"""
joins = [alias]
last = [0]
dupe_set = set()
exclusions = set()
extra_filters = []
int_alias = None
for pos, name in enumerate(names):
if int_alias is not None:
exclusions.add(int_alias)
exclusions.add(alias)
last.append(len(joins))
if name == 'pk':
name = opts.pk.name
try:
field, model, direct, m2m = opts.get_field_by_name(name)
except FieldDoesNotExist:
for f in opts.fields:
if allow_explicit_fk and name == f.attname:
# XXX: A hack to allow foo_id to work in values() for
# backwards compatibility purposes. If we dropped that
# feature, this could be removed.
field, model, direct, m2m = opts.get_field_by_name(f.name)
break
else:
names = opts.get_all_field_names() + self.aggregate_select.keys()
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
if not allow_many and (m2m or not direct):
for alias in joins:
self.unref_alias(alias)
raise MultiJoin(pos + 1)
if model:
# The field lives on a base class of the current model.
# Skip the chain of proxy to the concrete proxied model
proxied_model = get_proxied_model(opts)
for int_model in opts.get_base_chain(model):
if int_model is proxied_model:
opts = int_model._meta
else:
lhs_col = opts.parents[int_model].column
dedupe = lhs_col in opts.duplicate_targets
if dedupe:
exclusions.update(self.dupe_avoidance.get(
(id(opts), lhs_col), ()))
dupe_set.add((opts, lhs_col))
opts = int_model._meta
alias = self.join((alias, opts.db_table, lhs_col,
opts.pk.column), exclusions=exclusions)
joins.append(alias)
exclusions.add(alias)
for (dupe_opts, dupe_col) in dupe_set:
self.update_dupe_avoidance(dupe_opts, dupe_col,
alias)
cached_data = opts._join_cache.get(name)
orig_opts = opts
dupe_col = direct and field.column or field.field.column
dedupe = dupe_col in opts.duplicate_targets
if dupe_set or dedupe:
if dedupe:
dupe_set.add((opts, dupe_col))
exclusions.update(self.dupe_avoidance.get((id(opts), dupe_col),
()))
if process_extras and hasattr(field, 'extra_filters'):
extra_filters.extend(field.extra_filters(names, pos, negate))
if direct:
if m2m:
# Many-to-many field defined on the current model.
if cached_data:
(table1, from_col1, to_col1, table2, from_col2,
to_col2, opts, target) = cached_data
else:
table1 = field.m2m_db_table()
from_col1 = opts.get_field_by_name(
field.m2m_target_field_name())[0].column
to_col1 = field.m2m_column_name()
opts = field.rel.to._meta
table2 = opts.db_table
from_col2 = field.m2m_reverse_name()
to_col2 = opts.get_field_by_name(
field.m2m_reverse_target_field_name())[0].column
target = opts.pk
orig_opts._join_cache[name] = (table1, from_col1,
to_col1, table2, from_col2, to_col2, opts,
target)
int_alias = self.join((alias, table1, from_col1, to_col1),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
if int_alias == table2 and from_col2 == to_col2:
joins.append(int_alias)
alias = int_alias
else:
alias = self.join(
(int_alias, table2, from_col2, to_col2),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.extend([int_alias, alias])
elif field.rel:
# One-to-one or many-to-one field
if cached_data:
(table, from_col, to_col, opts, target) = cached_data
else:
opts = field.rel.to._meta
target = field.rel.get_related_field()
table = opts.db_table
from_col = field.column
to_col = target.column
orig_opts._join_cache[name] = (table, from_col, to_col,
opts, target)
alias = self.join((alias, table, from_col, to_col),
exclusions=exclusions, nullable=field.null)
joins.append(alias)
else:
# Non-relation fields.
target = field
break
else:
orig_field = field
field = field.field
if m2m:
# Many-to-many field defined on the target model.
if cached_data:
(table1, from_col1, to_col1, table2, from_col2,
to_col2, opts, target) = cached_data
else:
table1 = field.m2m_db_table()
from_col1 = opts.get_field_by_name(
field.m2m_reverse_target_field_name())[0].column
to_col1 = field.m2m_reverse_name()
opts = orig_field.opts
table2 = opts.db_table
from_col2 = field.m2m_column_name()
to_col2 = opts.get_field_by_name(
field.m2m_target_field_name())[0].column
target = opts.pk
orig_opts._join_cache[name] = (table1, from_col1,
to_col1, table2, from_col2, to_col2, opts,
target)
int_alias = self.join((alias, table1, from_col1, to_col1),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
alias = self.join((int_alias, table2, from_col2, to_col2),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.extend([int_alias, alias])
else:
# One-to-many field (ForeignKey defined on the target model)
if cached_data:
(table, from_col, to_col, opts, target) = cached_data
else:
local_field = opts.get_field_by_name(
field.rel.field_name)[0]
opts = orig_field.opts
table = opts.db_table
from_col = local_field.column
to_col = field.column
# In case of a recursive FK, use the to_field for
# reverse lookups as well
if orig_field.model is local_field.model:
target = opts.get_field_by_name(
field.rel.field_name)[0]
else:
target = opts.pk
orig_opts._join_cache[name] = (table, from_col, to_col,
opts, target)
alias = self.join((alias, table, from_col, to_col),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.append(alias)
for (dupe_opts, dupe_col) in dupe_set:
if int_alias is None:
to_avoid = alias
else:
to_avoid = int_alias
self.update_dupe_avoidance(dupe_opts, dupe_col, to_avoid)
if pos != len(names) - 1:
if pos == len(names) - 2:
raise FieldError("Join on field %r not permitted. Did you misspell %r for the lookup type?" % (name, names[pos + 1]))
else:
raise FieldError("Join on field %r not permitted." % name)
return field, target, opts, joins, last, extra_filters
def trim_joins(self, target, join_list, last, trim):
"""
Sometimes joins at the end of a multi-table sequence can be trimmed. If
the final join is against the same column as we are comparing against,
and is an inner join, we can go back one step in a join chain and
compare against the LHS of the join instead (and then repeat the
optimization). The result, potentially, involves less table joins.
The 'target' parameter is the final field being joined to, 'join_list'
is the full list of join aliases.
The 'last' list contains offsets into 'join_list', corresponding to
each component of the filter. Many-to-many relations, for example, add
two tables to the join list and we want to deal with both tables the
same way, so 'last' has an entry for the first of the two tables and
then the table immediately after the second table, in that case.
The 'trim' parameter forces the final piece of the join list to be
trimmed before anything. See the documentation of add_filter() for
details about this.
Returns the final active column and table alias and the new active
join_list.
"""
final = len(join_list)
penultimate = last.pop()
if penultimate == final:
penultimate = last.pop()
if trim and len(join_list) > 1:
extra = join_list[penultimate:]
join_list = join_list[:penultimate]
final = penultimate
penultimate = last.pop()
col = self.alias_map[extra[0]][LHS_JOIN_COL]
for alias in extra:
self.unref_alias(alias)
else:
col = target.column
alias = join_list[-1]
while final > 1:
join = self.alias_map[alias]
if col != join[RHS_JOIN_COL] or join[JOIN_TYPE] != self.INNER:
break
self.unref_alias(alias)
alias = join[LHS_ALIAS]
col = join[LHS_JOIN_COL]
join_list = join_list[:-1]
final -= 1
if final == penultimate:
penultimate = last.pop()
return col, alias, join_list
def update_dupe_avoidance(self, opts, col, alias):
"""
For a column that is one of multiple pointing to the same table, update
the internal data structures to note that this alias shouldn't be used
for those other columns.
"""
ident = id(opts)
for name in opts.duplicate_targets[col]:
try:
self.dupe_avoidance[ident, name].add(alias)
except KeyError:
self.dupe_avoidance[ident, name] = set([alias])
def split_exclude(self, filter_expr, prefix, can_reuse):
"""
When doing an exclude against any kind of N-to-many relation, we need
to use a subquery. This method constructs the nested query, given the
original exclude filter (filter_expr) and the portion up to the first
N-to-many relation field.
"""
query = Query(self.model)
query.add_filter(filter_expr, can_reuse=can_reuse)
query.bump_prefix()
query.clear_ordering(True)
query.set_start(prefix)
# Adding extra check to make sure the selected field will not be null
# since we are adding a IN <subquery> clause. This prevents the
# database from tripping over IN (...,NULL,...) selects and returning
# nothing
alias, col = query.select[0]
query.where.add((Constraint(alias, col, None), 'isnull', False), AND)
self.add_filter(('%s__in' % prefix, query), negate=True, trim=True,
can_reuse=can_reuse)
# If there's more than one join in the inner query (before any initial
# bits were trimmed -- which means the last active table is more than
# two places into the alias list), we need to also handle the
# possibility that the earlier joins don't match anything by adding a
# comparison to NULL (e.g. in
# Tag.objects.exclude(parent__parent__name='t1'), a tag with no parent
# would otherwise be overlooked).
active_positions = [pos for (pos, count) in
enumerate(query.alias_refcount.itervalues()) if count]
if active_positions[-1] > 1:
self.add_filter(('%s__isnull' % prefix, False), negate=True,
trim=True, can_reuse=can_reuse)
def set_limits(self, low=None, high=None):
"""
Adjusts the limits on the rows retrieved. We use low/high to set these,
as it makes it more Pythonic to read and write. When the SQL query is
created, they are converted to the appropriate offset and limit values.
Any limits passed in here are applied relative to the existing
constraints. So low is added to the current low value and both will be
clamped to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
def clear_limits(self):
"""
Clears any existing limits.
"""
self.low_mark, self.high_mark = 0, None
def can_filter(self):
"""
Returns True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.low_mark and self.high_mark is None
def clear_select_fields(self):
"""
Clears the list of fields to select (but not extra_select columns).
Some queryset types completely replace any existing list of select
columns.
"""
self.select = []
self.select_fields = []
def add_fields(self, field_names, allow_m2m=True):
"""
Adds the given (model) fields to the select set. The field names are
added in the order specified.
"""
alias = self.get_initial_alias()
opts = self.get_meta()
try:
for name in field_names:
field, target, u2, joins, u3, u4 = self.setup_joins(
name.split(LOOKUP_SEP), opts, alias, False, allow_m2m,
True)
final_alias = joins[-1]
col = target.column
if len(joins) > 1:
join = self.alias_map[final_alias]
if col == join[RHS_JOIN_COL]:
self.unref_alias(final_alias)
final_alias = join[LHS_ALIAS]
col = join[LHS_JOIN_COL]
joins = joins[:-1]
self.promote_alias_chain(joins[1:])
self.select.append((final_alias, col))
self.select_fields.append(field)
except MultiJoin:
raise FieldError("Invalid field name: '%s'" % name)
except FieldError:
names = opts.get_all_field_names() + self.extra.keys() + self.aggregate_select.keys()
names.sort()
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
self.remove_inherited_models()
def add_ordering(self, *ordering):
"""
Adds items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-' or '?') -- or ordinals,
corresponding to column positions in the 'select' list.
If 'ordering' is empty, all ordering is cleared from the query.
"""
errors = []
for item in ordering:
if not ORDER_PATTERN.match(item):
errors.append(item)
if errors:
raise FieldError('Invalid order_by arguments: %s' % errors)
if ordering:
self.order_by.extend(ordering)
else:
self.default_ordering = False
def clear_ordering(self, force_empty=False):
"""
Removes any ordering settings. If 'force_empty' is True, there will be
no ordering in the resulting query (not even the model's default).
"""
self.order_by = []
self.extra_order_by = ()
if force_empty:
self.default_ordering = False
def set_group_by(self):
"""
Expands the GROUP BY clause required by the query.
This will usually be the set of all non-aggregate fields in the
return data. If the database backend supports grouping by the
primary key, and the query would be equivalent, the optimization
will be made automatically.
"""
self.group_by = []
for sel in self.select:
self.group_by.append(sel)
def add_count_column(self):
"""
Converts the query to do count(...) or count(distinct(pk)) in order to
get its size.
"""
if not self.distinct:
if not self.select:
count = self.aggregates_module.Count('*', is_summary=True)
else:
assert len(self.select) == 1, \
"Cannot add count col with multiple cols in 'select': %r" % self.select
count = self.aggregates_module.Count(self.select[0])
else:
opts = self.model._meta
if not self.select:
count = self.aggregates_module.Count((self.join((None, opts.db_table, None, None)), opts.pk.column),
is_summary=True, distinct=True)
else:
# Because of SQL portability issues, multi-column, distinct
# counts need a sub-query -- see get_count() for details.
assert len(self.select) == 1, \
"Cannot add count col with multiple cols in 'select'."
count = self.aggregates_module.Count(self.select[0], distinct=True)
# Distinct handling is done in Count(), so don't do it at this
# level.
self.distinct = False
# Set only aggregate to be the count column.
# Clear out the select cache to reflect the new unmasked aggregates.
self.aggregates = {None: count}
self.set_aggregate_mask(None)
self.group_by = None
def add_select_related(self, fields):
"""
Sets up the select_related data structure so that we only select
certain related models (as opposed to all models, when
self.select_related=True).
"""
field_dict = {}
for field in fields:
d = field_dict
for part in field.split(LOOKUP_SEP):
d = d.setdefault(part, {})
self.select_related = field_dict
self.related_select_cols = []
self.related_select_fields = []
def add_extra(self, select, select_params, where, params, tables, order_by):
"""
Adds data to the various extra_* attributes for user-created additions
to the query.
"""
if select:
# We need to pair any placeholder markers in the 'select'
# dictionary with their parameters in 'select_params' so that
# subsequent updates to the select dictionary also adjust the
# parameters appropriately.
select_pairs = SortedDict()
if select_params:
param_iter = iter(select_params)
else:
param_iter = iter([])
for name, entry in select.items():
entry = force_unicode(entry)
entry_params = []
pos = entry.find("%s")
while pos != -1:
entry_params.append(param_iter.next())
pos = entry.find("%s", pos + 2)
select_pairs[name] = (entry, entry_params)
# This is order preserving, since self.extra_select is a SortedDict.
self.extra.update(select_pairs)
if where or params:
self.where.add(ExtraWhere(where, params), AND)
if tables:
self.extra_tables += tuple(tables)
if order_by:
self.extra_order_by = order_by
def clear_deferred_loading(self):
"""
Remove any fields from the deferred loading set.
"""
self.deferred_loading = (set(), True)
def add_deferred_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
exclude from loading from the database when automatic column selection
is done. The new field names are added to any existing field names that
are deferred (or removed from any existing field names that are marked
as the only ones for immediate loading).
"""
# Fields on related models are stored in the literal double-underscore
# format, so that we can use a set datastructure. We do the foo__bar
# splitting and handling when computing the SQL colum names (as part of
# get_columns()).
existing, defer = self.deferred_loading
if defer:
# Add to existing deferred names.
self.deferred_loading = existing.union(field_names), True
else:
# Remove names from the set of any existing "immediate load" names.
self.deferred_loading = existing.difference(field_names), False
def add_immediate_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
retrieve when the SQL is executed ("immediate loading" fields). The
field names replace any existing immediate loading field names. If
there are field names already specified for deferred loading, those
names are removed from the new field_names before storing the new names
for immediate loading. (That is, immediate loading overrides any
existing immediate values, but respects existing deferrals.)
"""
existing, defer = self.deferred_loading
if defer:
# Remove any existing deferred names from the current set before
# setting the new names.
self.deferred_loading = set(field_names).difference(existing), False
else:
# Replace any existing "immediate load" field names.
self.deferred_loading = set(field_names), False
def get_loaded_field_names(self):
"""
If any fields are marked to be deferred, returns a dictionary mapping
models to a set of names in those fields that will be loaded. If a
model is not in the returned dictionary, none of it's fields are
deferred.
If no fields are marked for deferral, returns an empty dictionary.
"""
collection = {}
self.deferred_to_data(collection, self.get_loaded_field_names_cb)
return collection
def get_loaded_field_names_cb(self, target, model, fields):
"""
Callback used by get_deferred_field_names().
"""
target[model] = set([f.name for f in fields])
def set_aggregate_mask(self, names):
"Set the mask of aggregates that will actually be returned by the SELECT"
if names is None:
self.aggregate_select_mask = None
else:
self.aggregate_select_mask = set(names)
self._aggregate_select_cache = None
def set_extra_mask(self, names):
"""
Set the mask of extra select items that will be returned by SELECT,
we don't actually remove them from the Query since they might be used
later
"""
if names is None:
self.extra_select_mask = None
else:
self.extra_select_mask = set(names)
self._extra_select_cache = None
def _aggregate_select(self):
"""The SortedDict of aggregate columns that are not masked, and should
be used in the SELECT clause.
This result is cached for optimization purposes.
"""
if self._aggregate_select_cache is not None:
return self._aggregate_select_cache
elif self.aggregate_select_mask is not None:
self._aggregate_select_cache = SortedDict([
(k,v) for k,v in self.aggregates.items()
if k in self.aggregate_select_mask
])
return self._aggregate_select_cache
else:
return self.aggregates
aggregate_select = property(_aggregate_select)
def _extra_select(self):
if self._extra_select_cache is not None:
return self._extra_select_cache
elif self.extra_select_mask is not None:
self._extra_select_cache = SortedDict([
(k,v) for k,v in self.extra.items()
if k in self.extra_select_mask
])
return self._extra_select_cache
else:
return self.extra
extra_select = property(_extra_select)
def set_start(self, start):
"""
Sets the table from which to start joining. The start position is
specified by the related attribute from the base model. This will
automatically set to the select column to be the column linked from the
previous table.
This method is primarily for internal use and the error checking isn't
as friendly as add_filter(). Mostly useful for querying directly
against the join table of many-to-many relation in a subquery.
"""
opts = self.model._meta
alias = self.get_initial_alias()
field, col, opts, joins, last, extra = self.setup_joins(
start.split(LOOKUP_SEP), opts, alias, False)
select_col = self.alias_map[joins[1]][LHS_JOIN_COL]
select_alias = alias
# The call to setup_joins added an extra reference to everything in
# joins. Reverse that.
for alias in joins:
self.unref_alias(alias)
# We might be able to trim some joins from the front of this query,
# providing that we only traverse "always equal" connections (i.e. rhs
# is *always* the same value as lhs).
for alias in joins[1:]:
join_info = self.alias_map[alias]
if (join_info[LHS_JOIN_COL] != select_col
or join_info[JOIN_TYPE] != self.INNER):
break
self.unref_alias(select_alias)
select_alias = join_info[RHS_ALIAS]
select_col = join_info[RHS_JOIN_COL]
self.select = [(select_alias, select_col)]
self.remove_inherited_models()
def get_order_dir(field, default='ASC'):
"""
Returns the field name and direction for an order specification. For
example, '-foo' is returned as ('foo', 'DESC').
The 'default' param is used to indicate which way no prefix (or a '+'
prefix) should sort. The '-' prefix always sorts the opposite way.
"""
dirn = ORDER_DIR[default]
if field[0] == '-':
return field[1:], dirn[1]
return field, dirn[0]
def setup_join_cache(sender, **kwargs):
"""
The information needed to join between model fields is something that is
invariant over the life of the model, so we cache it in the model's Options
class, rather than recomputing it all the time.
This method initialises the (empty) cache when the model is created.
"""
sender._meta._join_cache = {}
signals.class_prepared.connect(setup_join_cache)
def add_to_dict(data, key, value):
"""
A helper function to add "value" to the set of values for "key", whether or
not "key" already exists.
"""
if key in data:
data[key].add(value)
else:
data[key] = set([value])
def get_proxied_model(opts):
int_opts = opts
proxied_model = None
while int_opts.proxy:
proxied_model = int_opts.proxy_for_model
int_opts = proxied_model._meta
return proxied_model
| apache-2.0 |
PennartLoettring/Poettrix | rootfs/usr/lib/python3.4/unittest/util.py | 89 | 5429 | """Various utility functions."""
from collections import namedtuple, OrderedDict
from os.path import commonprefix
__unittest = True
_MAX_LENGTH = 80
_PLACEHOLDER_LEN = 12
_MIN_BEGIN_LEN = 5
_MIN_END_LEN = 5
_MIN_COMMON_LEN = 5
_MIN_DIFF_LEN = _MAX_LENGTH - \
(_MIN_BEGIN_LEN + _PLACEHOLDER_LEN + _MIN_COMMON_LEN +
_PLACEHOLDER_LEN + _MIN_END_LEN)
assert _MIN_DIFF_LEN >= 0
def _shorten(s, prefixlen, suffixlen):
skip = len(s) - prefixlen - suffixlen
if skip > _PLACEHOLDER_LEN:
s = '%s[%d chars]%s' % (s[:prefixlen], skip, s[len(s) - suffixlen:])
return s
def _common_shorten_repr(*args):
args = tuple(map(safe_repr, args))
maxlen = max(map(len, args))
if maxlen <= _MAX_LENGTH:
return args
prefix = commonprefix(args)
prefixlen = len(prefix)
common_len = _MAX_LENGTH - \
(maxlen - prefixlen + _MIN_BEGIN_LEN + _PLACEHOLDER_LEN)
if common_len > _MIN_COMMON_LEN:
assert _MIN_BEGIN_LEN + _PLACEHOLDER_LEN + _MIN_COMMON_LEN + \
(maxlen - prefixlen) < _MAX_LENGTH
prefix = _shorten(prefix, _MIN_BEGIN_LEN, common_len)
return tuple(prefix + s[prefixlen:] for s in args)
prefix = _shorten(prefix, _MIN_BEGIN_LEN, _MIN_COMMON_LEN)
return tuple(prefix + _shorten(s[prefixlen:], _MIN_DIFF_LEN, _MIN_END_LEN)
for s in args)
def safe_repr(obj, short=False):
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < _MAX_LENGTH:
return result
return result[:_MAX_LENGTH] + ' [truncated]...'
def strclass(cls):
return "%s.%s" % (cls.__module__, cls.__name__)
def sorted_list_difference(expected, actual):
"""Finds elements in only one or the other of two, sorted input lists.
Returns a two-element tuple of lists. The first list contains those
elements in the "expected" list but not in the "actual" list, and the
second contains those elements in the "actual" list but not in the
"expected" list. Duplicate elements in either input list are ignored.
"""
i = j = 0
missing = []
unexpected = []
while True:
try:
e = expected[i]
a = actual[j]
if e < a:
missing.append(e)
i += 1
while expected[i] == e:
i += 1
elif e > a:
unexpected.append(a)
j += 1
while actual[j] == a:
j += 1
else:
i += 1
try:
while expected[i] == e:
i += 1
finally:
j += 1
while actual[j] == a:
j += 1
except IndexError:
missing.extend(expected[i:])
unexpected.extend(actual[j:])
break
return missing, unexpected
def unorderable_list_difference(expected, actual):
"""Same behavior as sorted_list_difference but
for lists of unorderable items (like dicts).
As it does a linear search per item (remove) it
has O(n*n) performance."""
missing = []
while expected:
item = expected.pop()
try:
actual.remove(item)
except ValueError:
missing.append(item)
# anything left in actual is unexpected
return missing, actual
def three_way_cmp(x, y):
"""Return -1 if x < y, 0 if x == y and 1 if x > y"""
return (x > y) - (x < y)
_Mismatch = namedtuple('Mismatch', 'actual expected value')
def _count_diff_all_purpose(actual, expected):
'Returns list of (cnt_act, cnt_exp, elem) triples where the counts differ'
# elements need not be hashable
s, t = list(actual), list(expected)
m, n = len(s), len(t)
NULL = object()
result = []
for i, elem in enumerate(s):
if elem is NULL:
continue
cnt_s = cnt_t = 0
for j in range(i, m):
if s[j] == elem:
cnt_s += 1
s[j] = NULL
for j, other_elem in enumerate(t):
if other_elem == elem:
cnt_t += 1
t[j] = NULL
if cnt_s != cnt_t:
diff = _Mismatch(cnt_s, cnt_t, elem)
result.append(diff)
for i, elem in enumerate(t):
if elem is NULL:
continue
cnt_t = 0
for j in range(i, n):
if t[j] == elem:
cnt_t += 1
t[j] = NULL
diff = _Mismatch(0, cnt_t, elem)
result.append(diff)
return result
def _ordered_count(iterable):
'Return dict of element counts, in the order they were first seen'
c = OrderedDict()
for elem in iterable:
c[elem] = c.get(elem, 0) + 1
return c
def _count_diff_hashable(actual, expected):
'Returns list of (cnt_act, cnt_exp, elem) triples where the counts differ'
# elements must be hashable
s, t = _ordered_count(actual), _ordered_count(expected)
result = []
for elem, cnt_s in s.items():
cnt_t = t.get(elem, 0)
if cnt_s != cnt_t:
diff = _Mismatch(cnt_s, cnt_t, elem)
result.append(diff)
for elem, cnt_t in t.items():
if elem not in s:
diff = _Mismatch(0, cnt_t, elem)
result.append(diff)
return result
| gpl-2.0 |
takeshineshiro/heat | heat/tests/test_instance_group.py | 1 | 17980 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
import six
from heat.common import exception
from heat.common import grouputils
from heat.common import template_format
from heat.engine.resources.openstack.heat import instance_group as instgrp
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.engine import stack as parser
from heat.tests.autoscaling import inline_templates
from heat.tests import common
from heat.tests import utils
class TestInstanceGroup(common.HeatTestCase):
def setUp(self):
super(TestInstanceGroup, self).setUp()
t = template_format.parse(inline_templates.as_template)
self.stack = utils.parse_stack(t, params=inline_templates.as_params)
self.defn = rsrc_defn.ResourceDefinition(
'asg', 'OS::Heat::InstanceGroup',
{'Size': 2, 'AvailabilityZones': ['zoneb'],
'LaunchConfigurationName': 'config'})
self.instance_group = instgrp.InstanceGroup('asg',
self.defn, self.stack)
def test_update_timeout(self):
self.stack.timeout_secs = mock.Mock(return_value=100)
# there are 3 batches, so we need 2 pauses by 20 sec
# result timeout should be 100 - 2 * 20 = 60
self.assertEqual(60, self.instance_group._update_timeout(
batch_cnt=3, pause_sec=20))
def test_child_template(self):
self.instance_group._create_template = mock.Mock(return_value='tpl')
self.assertEqual('tpl', self.instance_group.child_template())
self.instance_group._create_template.assert_called_once_with(2)
def test_child_params(self):
expected = {'parameters': {},
'resource_registry': {
'OS::Heat::ScaledResource': 'AWS::EC2::Instance'}}
self.assertEqual(expected, self.instance_group.child_params())
def test_tags_default(self):
expected = [{'Value': u'asg',
'Key': 'metering.groupname'}]
self.assertEqual(expected, self.instance_group._tags())
def test_tags_with_extra(self):
self.instance_group.properties.data['Tags'] = [
{'Key': 'fee', 'Value': 'foo'}]
expected = [{'Key': 'fee', 'Value': 'foo'},
{'Value': u'asg',
'Key': 'metering.groupname'}]
self.assertEqual(expected, self.instance_group._tags())
def test_tags_with_metering(self):
self.instance_group.properties.data['Tags'] = [
{'Key': 'metering.fee', 'Value': 'foo'}]
expected = [{'Key': 'metering.fee', 'Value': 'foo'}]
self.assertEqual(expected, self.instance_group._tags())
def test_validate_launch_conf(self):
props = self.instance_group.properties.data
props['LaunchConfigurationName'] = 'urg_i_cant_spell'
creator = scheduler.TaskRunner(self.instance_group.create)
error = self.assertRaises(exception.ResourceFailure, creator)
self.assertIn('(urg_i_cant_spell) reference can not be found.',
six.text_type(error))
def test_validate_launch_conf_no_ref(self):
props = self.instance_group.properties.data
props['LaunchConfigurationName'] = 'JobServerConfig'
creator = scheduler.TaskRunner(self.instance_group.create)
error = self.assertRaises(exception.ResourceFailure, creator)
self.assertIn('(JobServerConfig) reference can not be',
six.text_type(error))
def test_handle_create(self):
self.instance_group.create_with_template = mock.Mock(return_value=None)
self.instance_group.validate_launchconfig = mock.Mock(
return_value=None)
self.instance_group._create_template = mock.Mock(return_value='{}')
self.instance_group.handle_create()
self.instance_group.validate_launchconfig.assert_called_once_with()
self.instance_group._create_template.assert_called_once_with(2)
self.instance_group.create_with_template.assert_called_once_with('{}')
def test_update_in_failed(self):
self.instance_group.state_set('CREATE', 'FAILED')
# to update the failed instance_group
self.instance_group.resize = mock.Mock(return_value=None)
self.instance_group.handle_update(self.defn, None, None)
self.instance_group.resize.assert_called_once_with(2)
def test_handle_delete(self):
self.instance_group.delete_nested = mock.Mock(return_value=None)
self.instance_group.handle_delete()
self.instance_group.delete_nested.assert_called_once_with()
def test_handle_update_size(self):
self.instance_group._try_rolling_update = mock.Mock(return_value=None)
self.instance_group.resize = mock.Mock(return_value=None)
props = {'Size': 5}
defn = rsrc_defn.ResourceDefinition(
'nopayload',
'AWS::AutoScaling::AutoScalingGroup',
props)
self.instance_group.handle_update(defn, None, props)
self.instance_group.resize.assert_called_once_with(5)
def test_attributes(self):
mock_members = self.patchobject(grouputils, 'get_members')
instances = []
for ip_ex in six.moves.range(1, 4):
inst = mock.Mock()
inst.FnGetAtt.return_value = '2.1.3.%d' % ip_ex
instances.append(inst)
mock_members.return_value = instances
res = self.instance_group._resolve_attribute('InstanceList')
self.assertEqual('2.1.3.1,2.1.3.2,2.1.3.3', res)
class TestLaunchConfig(common.HeatTestCase):
def create_resource(self, t, stack, resource_name):
# subsequent resources may need to reference previous created resources
# use the stack's resource objects instead of instantiating new ones
rsrc = stack[resource_name]
self.assertIsNone(rsrc.validate())
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
return rsrc
def test_update_metadata_replace(self):
"""Updating the config's metadata causes a config replacement."""
lc_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Resources": {
"JobServerConfig" : {
"Type" : "AWS::AutoScaling::LaunchConfiguration",
"Metadata": {"foo": "bar"},
"Properties": {
"ImageId" : "foo",
"InstanceType" : "m1.large",
"KeyName" : "test",
}
}
}
}
'''
self.stub_ImageConstraint_validate()
self.stub_FlavorConstraint_validate()
self.stub_KeypairConstraint_validate()
t = template_format.parse(lc_template)
stack = utils.parse_stack(t)
rsrc = self.create_resource(t, stack, 'JobServerConfig')
props = copy.copy(rsrc.properties.data)
metadata = copy.copy(rsrc.metadata_get())
update_snippet = rsrc_defn.ResourceDefinition(rsrc.name,
rsrc.type(),
props,
metadata)
# Change nothing in the first update
scheduler.TaskRunner(rsrc.update, update_snippet)()
self.assertEqual('bar', metadata['foo'])
metadata['foo'] = 'wibble'
update_snippet = rsrc_defn.ResourceDefinition(rsrc.name,
rsrc.type(),
props,
metadata)
# Changing metadata in the second update triggers UpdateReplace
updater = scheduler.TaskRunner(rsrc.update, update_snippet)
self.assertRaises(exception.UpdateReplace, updater)
class LoadbalancerReloadTest(common.HeatTestCase):
def test_Instances(self):
t = template_format.parse(inline_templates.as_template)
stack = utils.parse_stack(t)
lb = stack['ElasticLoadBalancer']
lb.update = mock.Mock(return_value=None)
defn = rsrc_defn.ResourceDefinition(
'asg', 'OS::Heat::InstanceGroup',
{'Size': 2,
'AvailabilityZones': ['zoneb'],
"LaunchConfigurationName": "LaunchConfig",
"LoadBalancerNames": ["ElasticLoadBalancer"]})
group = instgrp.InstanceGroup('asg', defn, stack)
mock_members = self.patchobject(grouputils, 'get_member_refids')
mock_members.return_value = ['aaaa', 'bbb']
expected = rsrc_defn.ResourceDefinition(
'ElasticLoadBalancer',
'AWS::ElasticLoadBalancing::LoadBalancer',
{'Instances': ['aaaa', 'bbb'],
'Listeners': [{'InstancePort': u'80',
'LoadBalancerPort': u'80',
'Protocol': 'HTTP'}],
'AvailabilityZones': ['nova']},
metadata={},
deletion_policy='Delete'
)
group._lb_reload()
mock_members.assert_called_once_with(group, exclude=[])
lb.update.assert_called_once_with(expected)
def test_members(self):
t = template_format.parse(inline_templates.as_template)
t['Resources']['ElasticLoadBalancer'] = {
'Type': 'OS::Neutron::LoadBalancer',
'Properties': {
'protocol_port': 8080,
}
}
stack = utils.parse_stack(t)
lb = stack['ElasticLoadBalancer']
lb.update = mock.Mock(return_value=None)
defn = rsrc_defn.ResourceDefinition(
'asg', 'OS::Heat::InstanceGroup',
{'Size': 2,
'AvailabilityZones': ['zoneb'],
"LaunchConfigurationName": "LaunchConfig",
"LoadBalancerNames": ["ElasticLoadBalancer"]})
group = instgrp.InstanceGroup('asg', defn, stack)
mock_members = self.patchobject(grouputils, 'get_member_refids')
mock_members.return_value = ['aaaa', 'bbb']
expected = rsrc_defn.ResourceDefinition(
'ElasticLoadBalancer',
'OS::Neutron::LoadBalancer',
{'protocol_port': 8080,
'members': ['aaaa', 'bbb']},
metadata={},
deletion_policy='Delete')
group._lb_reload()
mock_members.assert_called_once_with(group, exclude=[])
lb.update.assert_called_once_with(expected)
def test_lb_reload_invalid_resource(self):
t = template_format.parse(inline_templates.as_template)
t['Resources']['ElasticLoadBalancer'] = {
'Type': 'AWS::EC2::Volume',
'Properties': {
'AvailabilityZone': 'nova'
}
}
stack = utils.parse_stack(t)
lb = stack['ElasticLoadBalancer']
lb.update = mock.Mock(return_value=None)
defn = rsrc_defn.ResourceDefinition(
'asg', 'OS::Heat::InstanceGroup',
{'Size': 2,
'AvailabilityZones': ['zoneb'],
"LaunchConfigurationName": "LaunchConfig",
"LoadBalancerNames": ["ElasticLoadBalancer"]})
group = instgrp.InstanceGroup('asg', defn, stack)
mock_members = self.patchobject(grouputils, 'get_member_refids')
mock_members.return_value = ['aaaa', 'bbb']
error = self.assertRaises(exception.Error,
group._lb_reload)
self.assertEqual(
"Unsupported resource 'ElasticLoadBalancer' in "
"LoadBalancerNames",
six.text_type(error))
def test_lb_reload_static_resolve(self):
t = template_format.parse(inline_templates.as_template)
properties = t['Resources']['ElasticLoadBalancer']['Properties']
properties['AvailabilityZones'] = {'Fn::GetAZs': ''}
self.patchobject(parser.Stack, 'get_availability_zones',
return_value=['abc', 'xyz'])
mock_members = self.patchobject(grouputils, 'get_member_refids')
mock_members.return_value = ['aaaabbbbcccc']
# Check that the Fn::GetAZs is correctly resolved
expected = {u'Properties': {'Instances': ['aaaabbbbcccc'],
u'Listeners': [{u'InstancePort': u'80',
u'LoadBalancerPort': u'80',
u'Protocol': u'HTTP'}],
u'AvailabilityZones': ['abc', 'xyz']},
u'DeletionPolicy': 'Delete',
u'Metadata': {}}
stack = utils.parse_stack(t, params=inline_templates.as_params)
lb = stack['ElasticLoadBalancer']
lb.state_set(lb.CREATE, lb.COMPLETE)
lb.handle_update = mock.Mock(return_value=None)
group = stack['WebServerGroup']
group._lb_reload()
lb.handle_update.assert_called_once_with(
mock.ANY, expected,
{'Instances': ['aaaabbbbcccc']})
class ReplaceTest(common.HeatTestCase):
scenarios = [
('1', dict(min_in_service=0, batch_size=1, updates=2)),
('2', dict(min_in_service=0, batch_size=2, updates=1)),
('3', dict(min_in_service=3, batch_size=1, updates=3)),
('4', dict(min_in_service=3, batch_size=2, updates=2))]
def setUp(self):
super(ReplaceTest, self).setUp()
t = template_format.parse(inline_templates.as_template)
self.stack = utils.parse_stack(t, params=inline_templates.as_params)
lc = self.create_launch_config(t, self.stack)
lcid = lc.FnGetRefId()
self.defn = rsrc_defn.ResourceDefinition(
'asg', 'OS::Heat::InstanceGroup',
{'Size': 2, 'AvailabilityZones': ['zoneb'],
'LaunchConfigurationName': lcid})
self.group = instgrp.InstanceGroup('asg', self.defn, self.stack)
self.group._lb_reload = mock.Mock()
self.group.update_with_template = mock.Mock()
self.group.check_update_complete = mock.Mock()
self.group._nested = self.get_fake_nested_stack()
def create_launch_config(self, t, stack):
self.stub_ImageConstraint_validate()
self.stub_FlavorConstraint_validate()
self.stub_SnapshotConstraint_validate()
rsrc = stack['LaunchConfig']
self.assertIsNone(rsrc.validate())
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
return rsrc
def get_fake_nested_stack(self):
nested_t = '''
heat_template_version: 2013-05-23
description: AutoScaling Test
resources:
one:
type: ResourceWithPropsAndAttrs
properties:
Foo: hello
two:
type: ResourceWithPropsAndAttrs
properties:
Foo: fee
'''
return utils.parse_stack(template_format.parse(nested_t))
def test_rolling_updates(self):
self.group._replace(self.min_in_service, self.batch_size, 0)
self.assertEqual(self.updates,
len(self.group.update_with_template.call_args_list))
self.assertEqual(self.updates + 1,
len(self.group._lb_reload.call_args_list))
class TestGetBatches(common.HeatTestCase):
scenarios = [
('4_1_0', dict(curr_cap=4, bat_size=1, min_serv=0,
batches=[(4, 1)] * 4)),
('4_1_4', dict(curr_cap=4, bat_size=1, min_serv=4,
batches=([(5, 1)] * 4) + [(4, 0)])),
('4_1_5', dict(curr_cap=4, bat_size=1, min_serv=5,
batches=([(5, 1)] * 4) + [(4, 0)])),
('4_2_0', dict(curr_cap=4, bat_size=2, min_serv=0,
batches=[(4, 2)] * 2)),
('4_2_4', dict(curr_cap=4, bat_size=2, min_serv=4,
batches=([(6, 2)] * 2) + [(4, 0)])),
('5_2_0', dict(curr_cap=5, bat_size=2, min_serv=0,
batches=([(5, 2)] * 2) + [(5, 1)])),
('5_2_4', dict(curr_cap=5, bat_size=2, min_serv=4,
batches=([(6, 2)] * 2) + [(5, 1)])),
('3_2_0', dict(curr_cap=3, bat_size=2, min_serv=0,
batches=[(3, 2), (3, 1)])),
('3_2_4', dict(curr_cap=3, bat_size=2, min_serv=4,
batches=[(5, 2), (4, 1), (3, 0)])),
('4_4_0', dict(curr_cap=4, bat_size=4, min_serv=0,
batches=[(4, 4)])),
('4_5_0', dict(curr_cap=4, bat_size=5, min_serv=0,
batches=[(4, 4)])),
('4_4_1', dict(curr_cap=4, bat_size=4, min_serv=1,
batches=[(5, 4), (4, 0)])),
('4_6_1', dict(curr_cap=4, bat_size=6, min_serv=1,
batches=[(5, 4), (4, 0)])),
('4_4_2', dict(curr_cap=4, bat_size=4, min_serv=2,
batches=[(6, 4), (4, 0)])),
('4_4_4', dict(curr_cap=4, bat_size=4, min_serv=4,
batches=[(8, 4), (4, 0)])),
('4_5_6', dict(curr_cap=4, bat_size=5, min_serv=6,
batches=[(8, 4), (4, 0)])),
]
def test_get_batches(self):
batches = list(instgrp.InstanceGroup._get_batches(self.curr_cap,
self.bat_size,
self.min_serv))
self.assertEqual(self.batches, batches)
| apache-2.0 |
rabimba/google-appengine-wx-launcher | launcher/platform.py | 28 | 17271 | #!/usr/bin/env python
#
# Copyright 2008 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Platform specific Python for the launcher.
The launcher is a project manager for Google App Engine. This
includes a UI for "New Project" (to create from the template), Run
(locally), and Deploy (to Google). It also includes general client
support such as auto-update of the SDK.
The Mac download of Google App Engine is the Mac Launcher with the SDK
embedded inside. This launcher will be for Windows and Linux.
The Platform class and it's derivatives provides a platform abstraction.
"""
import os
import subprocess
if os.name == 'posix':
import signal
elif os.name == 'nt':
import win32api
import _winreg
class Error(Exception):
"""Error is a base exception class for this module."""
class PlatformException(Error):
"""Exceptional platform condition, such as 'unsupported platform'."""
class PlatformUnimplemented(Error):
"""An override of a platform method is not implemented."""
class Platform(object):
"""Pure virtual base class for platform-specific methods."""
def __init__(self, briefname, exists=os.path.exists):
self._briefname = briefname # short platform name
self.__exists = exists
def __call__(self):
"""We are callable and return self.
This is an implementation detail of how we create a platform singleton.
See the end of this file for the singleton creator.
Returns:
self
"""
return self
@staticmethod
def _PlatformSpecificObject():
"""Create and return a platform-specific object.
A platform-specific object is a class derived from Platform that
overrides a few methods.
Returns:
A platform-specific subclass of class Platform.
Raises:
PlatformException: An unsupported platform.
"""
if os.name == 'posix':
if os.uname()[0] == 'Darwin':
return PlatformMac()
elif os.uname()[0] == 'Linux':
return PlatformLinux()
elif os.name == 'nt':
return PlatformWin()
raise PlatformException('unsupported platform')
def _FindInPath(self, filename, extra_dirs=None):
"""Find the file in the path, or None.
Args:
filename: A file basename (e.g. "echo", "ls") we wish to find in our PATH.
extra_dirs: If given, a list of additional directories to search.
Returns:
A fully qualified pathname for the input filename, or None if
the file cannot be found in the PATH.
"""
paths = []
if 'PATH' in os.environ:
paths += os.environ['PATH'].split(os.pathsep)
if extra_dirs:
paths += extra_dirs
for path in paths:
fullpath = os.path.join(path, filename)
if self.__exists(fullpath):
return fullpath
return None
def KillProcess(self, process):
"""Kill the specified process.
Args:
process: The subprocess.Popen process to be killed.
Raises:
PlatformUnimplemented: Always; should be overridden in subclass.
"""
raise PlatformUnimplemented()
def PythonCommand(self):
"""Return a default path to the Python we want to use.
Return None if we can't find one.
This method does not look at preference settings.
Raises:
PlatformUnimplemented: Always; should be overridden in subclass.
"""
raise PlatformUnimplemented()
def AppEngineBaseDirectory(self):
"""Return a path to the base dir of the App Engine SDK, or None.
Raises:
PlatformUnimplemented: Always; should be overridden in subclass.
"""
raise PlatformUnimplemented()
def PreferencesFile(self, make_parent_directory=True):
"""Filename of our preferences file.
Args:
make_parent_directory: If True, mkdir the parent directory if needed.
Currently only relevant on Windows.
Raises:
PlatformUnimplemented: Always; should be overridden in subclass.
"""
raise PlatformUnimplemented()
def ProjectsFile(self, make_parent_directory=True):
"""Filename of our projects file.
Args:
make_parent_directory: If True, mkdir the parent directory if needed.
Currently only relevant on Windows.
Raises:
PlatformUnimplemented: Always; should be overridden in subclass.
"""
raise PlatformUnimplemented()
def OpenCommand(self, path):
"""Command for opening a file or folder on disk.
Args:
path: An absolute path to the file or folder.
Returns:
A tuple suitable for subprocess.Popen(), or None.
Raises:
PlatformUnimplemented: Always; should be overridden in subclass.
"""
raise PlatformUnimplemented()
def DefaultEditor(self):
"""Default executable for editing a file.
Raises:
PlatformUnimplemented: Always; should be overridden in subclass.
"""
raise PlatformUnimplemented()
def EditCommand(self, editor, application):
"""Command for editing an application.
Args:
editor: the editor to use
application: a full path to an application to edit
Returns:
A tuple suitable for subprocess.Popen(), or None.
Raises:
PlatformUnimplemented: Always; should be overridden in subclass.
"""
raise PlatformUnimplemented()
def BriefPlatformName(self):
"""Return a brief platform name.
Returns:
A brief string representing our platform.
"""
return self._briefname
def IsSuccessfulCommandResultCode(self, code):
"""Whether the result code from a command actually a success.
Args:
code: The numerical result code from the subprocess wait() function.
Returns:
True iff the result code is considered a success, especially the
result code returned when the subprocess is intentionally killed.
Raises:
PlatformUnimplemented: Always; should be overridden in subclass.
"""
raise PlatformUnimplemented()
class PlatformPosix(Platform):
"""Common Platform base class for Linux and Mac."""
def KillProcess(self, process):
"""Kill the specified process, a subprocess.Popen object."""
os.kill(process.pid, signal.SIGTERM)
def PythonCommand(self):
"""Return a default path to the Python we want to use.
Returns:
A default path to the Python we want to use.
"""
extra = ('/usr/bin', '/usr/local/bin')
return self._FindInPath('python', extra)
def AppEngineBaseDirectory(self):
"""Return a path to the base dir of the App Engine SDK, or None.
Returns:
A path to the base dir of the App Engine SDK, or None if not found.
"""
for dirname in ('/usr/local/google_appengine',):
if os.path.isdir(dirname):
return dirname
return None
def PreferencesFile(self, make_parent_directory=True):
"""Filename of our preferences file.
Arg make_parent_directory is ignored (unnecessary), but we retain
it to keep the signature in sync with the Windows version.
Returns:
The filename of our preference file.
"""
# No need to make the parent directory when it is ~
return os.path.expanduser('~/.google_appengine_launcher.ini')
def ProjectsFile(self, make_parent_directory=True):
"""Filename of our projects file.
Arg make_parent_directory is ignored (unnecessary), but we retain
it to keep the signature in sync with the Windows version.
Returns:
The filename of our projects file.
"""
# No need to make the parent directory when it is ~
return os.path.expanduser('~/.google_appengine_projects.ini')
def IsSuccessfulCommandResultCode(self, code):
"""Is the result code from a command actually a success?
Args:
code: The numerical result code from the subprocess wait() function.
Returns:
True if the result code is considered a success, especially the
result code returned when the subprocess is intentionally killed.
False otherwise
"""
# zero is the traditional "successful exit code". Python's subprocess
# module returns a code of "negative the signal number" when a process
# is killed by a signal. The launcher uses signal 15 (SIGTERM) to
# kill the subprocess on the user's behalf, so it's considered to
# be successful.
if code in (0, -15):
return True
return False
class PlatformMac(PlatformPosix):
"""Mac-specific platform object."""
def __init__(self):
super(PlatformMac, self).__init__('mac')
def OpenCommand(self, path):
"""Command for opening a file or folder on disk.
Args:
path: An absolute path to the file or folder.
Returns:
A tuple suitable for subprocess.Popen(), or None.
"""
return ('/usr/bin/open', path)
def DefaultEditor(self):
"""Default executable for editing a file.
Returns: the default editor
"""
return '/Applications/TextEdit.app'
def EditCommand(self, editor, application):
"""Edit an application.
Args:
editor: the editor to use (ignored on OSX for now)
application: a full path to an application to edit
Returns:
A tuple suitable for subprocess.Popen(), or None.
"""
# TODO(jrg): implement properly for OSX
return self.OpenCommand(os.path.join(application, 'app.yaml'))
class PlatformLinux(PlatformPosix):
"""Linux-specific platform object."""
def __init__(self, exists=os.path.exists):
super(PlatformLinux, self).__init__('linux', exists=exists)
def OpenCommand(self, path):
"""Command for opening a file or folder on disk.
First choice for open command is based on the users DESKTOP_SESSION
environment variable. If this environment variable is absent, or either
of the relevant executables are absent it will default to either one,
with the gnome browser as the preferred default.
TODO(rafek): Make the browser user selectable if user wishes.
Args:
path: An absolute path to the file or folder.
Returns:
A tuple suitable for subprocess.Popen(), or None.
"""
desktop_session = os.environ.get('DESKTOP_SESSION', 'gnome')
gnome_opener = self._FindInPath('gnome-open')
kde_opener = self._FindInPath('konqueror')
if not(gnome_opener or kde_opener):
return None
elif desktop_session == 'kde' and kde_opener:
executable = kde_opener
else:
# Just try to use the first available.
executable = gnome_opener or kde_opener
return (executable, path)
def DefaultEditor(self):
"""Default executable for editing a file.
Returns: the default editor
"""
return self._FindInPath('emacs')
def EditCommand(self, editor, application):
"""Command for editing an application.
Args:
editor: the editor to use (ignored on OSX for now)
application: a full path to an application to edit
Returns:
A tuple suitable for subprocess.Popen(), or None.
"""
return (editor, os.path.join(application, 'app.yaml'))
class PlatformWin(Platform):
"""Win-specific platform object."""
def __init__(self):
super(PlatformWin, self).__init__('win')
def KillProcess(self, process):
"""Kill the specified process, a subprocess.Popen object."""
PROCESS_TERMINATE = 1 # same name as win32 constant
handle = win32api.OpenProcess(PROCESS_TERMINATE, False, process.pid)
win32api.TerminateProcess(handle, -1)
win32api.CloseHandle(handle)
def PythonCommand(self):
"""Return a default path to the Python we want to use.
Note a preference for pythonw.exe instead of python.exe to avoid
bringing up a console window when the dev_appserver is run.
pythonw.exe and python.exe are equivalent but pythonw.exe is less
likely to have a mapping.
Returns:
A default path to the Python we want to use or None.
"""
# Look for a Python set in the registry. Try and use the highest
# version available.
#
# TODO(jrg):
# This does NOT find cygwin Python. Is that OK? The wxPython
# packages on the net use the Windows Python (not the cygwin one),
# so finding the cygwin Python is probably not helpful. However,
# we're currently only using the return value to run the
# dev_appserver, which doesn't care about wx. Perhaps use of 2
# different Pythons (one for the launcher, one for the
# dev_appserver) is not as confusing as it sounds?
pythonpath = ''
try:
basepath = r'SOFTWARE\Python\PythonCore'
reg = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, basepath)
(subkeys_count, unused_values, unused_lastmod) = _winreg.QueryInfoKey(reg)
names = []
for i in range(subkeys_count):
names.append(_winreg.EnumKey(reg, i))
names.sort(reverse=True) # so the latest version is first
_winreg.CloseKey(reg)
reg = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,
basepath + ('\\%s\\' % names[0]))
install_path = _winreg.QueryValue(reg, 'InstallPath')
# Try pythonw.exe first.
pythonpath = os.path.join(install_path, 'pythonw.exe')
if not os.path.exists(pythonpath):
pythonpath = os.path.join(install_path, 'python.exe')
_winreg.CloseKey(reg)
except WindowsError:
# It's fine if the registry key doesn't exist -- we'll fall through
# to _FindInPath().
pass
if os.path.exists(pythonpath):
return pythonpath
# Try pythonw.exe first.
for exe in ('pythonw.exe', 'python.exe'):
pythonpath = self._FindInPath(exe)
if pythonpath and os.path.exists(pythonpath):
return pythonpath
return None
def AppEngineBaseDirectory(self):
"""Return a path to the base dir of the App Engine SDK, or None.
Returns:
A path to the base dir of the App Engine SDK, or None if not found.
"""
candidate = os.path.dirname(os.getcwd())
for dirname in (candidate,):
if (os.path.isdir(dirname) and
os.path.exists(os.path.join(dirname, 'dev_appserver.py'))):
return dirname
return None
def PreferencesFile(self, make_parent_directory=True):
"""Filename of our preferences file.
Returns:
The filename of our preference file.
"""
# On Windows, expanduser('~') is
# C:\Documents and Settings\$USER\Application Data\.
# (Question: should I use os.environ['APPDATA'] instead?
# It's the same value but may be "more correct".)
basedir = os.path.expanduser('~/Google')
if not os.path.exists(basedir) and make_parent_directory:
os.mkdir(basedir)
return os.path.join(basedir, 'google_appengine_launcher.ini')
def ProjectsFile(self, make_parent_directory=True):
"""Filename of our projects file.
Returns:
The filename of our projects file.
"""
basedir = os.path.expanduser('~/Google')
if not os.path.exists(basedir) and make_parent_directory:
os.mkdir(basedir)
return os.path.join(basedir, 'google_appengine_projects.ini')
def OpenCommand(self, path):
"""Command for opening a file or folder on disk.
Args:
path: An absolute path to the file or folder.
Returns:
A tuple suitable for subprocess.Popen(), or None.
"""
cmd_exe = self._FindInPath('cmd.exe')
# The empty argument allows cmd.exe to open paths with spaces in them
# without incident.
return (cmd_exe, '/c', 'start', '', path)
def DefaultEditor(self):
"""Default executable for editing a file.
Returns: the default editor
"""
# notepad doesn't understand Unix line endings, so try hard to
# find wordpad (even if not in the path).
wp = (self._FindInPath('wordpad.exe') or
'c:/Program Files/Windows NT/Accessories/wordpad.exe')
if wp:
return wp
return self._FindInPath('notepad.exe')
def EditCommand(self, editor, application):
"""Command for editing an application.
Args:
editor: the editor to use (ignored on OSX for now)
application: a full path to an application to edit
Returns:
A tuple suitable for subprocess.Popen(), or None.
"""
return (editor, os.path.join(application, 'app.yaml'))
def IsSuccessfulCommandResultCode(self, code):
"""Is the result code from a command actually a success?
Args:
code: The numerical result code from the subprocess wait() function.
Returns:
True if the result code is considered a success, especially the
result code returned when the subprocess is intentionally killed.
False otherwise
"""
# By observation, the launcher intentionally killing a process on windows
# results in a return code of -1, so that is one of our success values.
if code in (0, -1):
return True
return False
# Make a singleton. Note it is callable and returns self.
Platform = Platform._PlatformSpecificObject()
| apache-2.0 |
kofron/bigcouch | couchjs/scons/scons-local-2.0.1/SCons/Tool/mslink.py | 61 | 10682 | """SCons.Tool.mslink
Tool-specific initialization for the Microsoft linker.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/mslink.py 5134 2010/08/16 23:02:40 bdeegan"
import os.path
import SCons.Action
import SCons.Defaults
import SCons.Errors
import SCons.Platform.win32
import SCons.Tool
import SCons.Tool.msvc
import SCons.Tool.msvs
import SCons.Util
from MSCommon import msvc_setup_env_once, msvc_exists
def pdbGenerator(env, target, source, for_signature):
try:
return ['/PDB:%s' % target[0].attributes.pdb, '/DEBUG']
except (AttributeError, IndexError):
return None
def _dllTargets(target, source, env, for_signature, paramtp):
listCmd = []
dll = env.FindIxes(target, '%sPREFIX' % paramtp, '%sSUFFIX' % paramtp)
if dll: listCmd.append("/out:%s"%dll.get_string(for_signature))
implib = env.FindIxes(target, 'LIBPREFIX', 'LIBSUFFIX')
if implib: listCmd.append("/implib:%s"%implib.get_string(for_signature))
return listCmd
def _dllSources(target, source, env, for_signature, paramtp):
listCmd = []
deffile = env.FindIxes(source, "WINDOWSDEFPREFIX", "WINDOWSDEFSUFFIX")
for src in source:
# Check explicitly for a non-None deffile so that the __cmp__
# method of the base SCons.Util.Proxy class used for some Node
# proxies doesn't try to use a non-existent __dict__ attribute.
if deffile and src == deffile:
# Treat this source as a .def file.
listCmd.append("/def:%s" % src.get_string(for_signature))
else:
# Just treat it as a generic source file.
listCmd.append(src)
return listCmd
def windowsShlinkTargets(target, source, env, for_signature):
return _dllTargets(target, source, env, for_signature, 'SHLIB')
def windowsShlinkSources(target, source, env, for_signature):
return _dllSources(target, source, env, for_signature, 'SHLIB')
def _windowsLdmodTargets(target, source, env, for_signature):
"""Get targets for loadable modules."""
return _dllTargets(target, source, env, for_signature, 'LDMODULE')
def _windowsLdmodSources(target, source, env, for_signature):
"""Get sources for loadable modules."""
return _dllSources(target, source, env, for_signature, 'LDMODULE')
def _dllEmitter(target, source, env, paramtp):
"""Common implementation of dll emitter."""
SCons.Tool.msvc.validate_vars(env)
extratargets = []
extrasources = []
dll = env.FindIxes(target, '%sPREFIX' % paramtp, '%sSUFFIX' % paramtp)
no_import_lib = env.get('no_import_lib', 0)
if not dll:
raise SCons.Errors.UserError('A shared library should have exactly one target with the suffix: %s' % env.subst('$%sSUFFIX' % paramtp))
insert_def = env.subst("$WINDOWS_INSERT_DEF")
if not insert_def in ['', '0', 0] and \
not env.FindIxes(source, "WINDOWSDEFPREFIX", "WINDOWSDEFSUFFIX"):
# append a def file to the list of sources
extrasources.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"WINDOWSDEFPREFIX", "WINDOWSDEFSUFFIX"))
version_num, suite = SCons.Tool.msvs.msvs_parse_version(env.get('MSVS_VERSION', '6.0'))
if version_num >= 8.0 and env.get('WINDOWS_INSERT_MANIFEST', 0):
# MSVC 8 automatically generates .manifest files that must be installed
extratargets.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"WINDOWSSHLIBMANIFESTPREFIX", "WINDOWSSHLIBMANIFESTSUFFIX"))
if 'PDB' in env and env['PDB']:
pdb = env.arg2nodes('$PDB', target=target, source=source)[0]
extratargets.append(pdb)
target[0].attributes.pdb = pdb
if not no_import_lib and \
not env.FindIxes(target, "LIBPREFIX", "LIBSUFFIX"):
# Append an import library to the list of targets.
extratargets.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"LIBPREFIX", "LIBSUFFIX"))
# and .exp file is created if there are exports from a DLL
extratargets.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"WINDOWSEXPPREFIX", "WINDOWSEXPSUFFIX"))
return (target+extratargets, source+extrasources)
def windowsLibEmitter(target, source, env):
return _dllEmitter(target, source, env, 'SHLIB')
def ldmodEmitter(target, source, env):
"""Emitter for loadable modules.
Loadable modules are identical to shared libraries on Windows, but building
them is subject to different parameters (LDMODULE*).
"""
return _dllEmitter(target, source, env, 'LDMODULE')
def prog_emitter(target, source, env):
SCons.Tool.msvc.validate_vars(env)
extratargets = []
exe = env.FindIxes(target, "PROGPREFIX", "PROGSUFFIX")
if not exe:
raise SCons.Errors.UserError("An executable should have exactly one target with the suffix: %s" % env.subst("$PROGSUFFIX"))
version_num, suite = SCons.Tool.msvs.msvs_parse_version(env.get('MSVS_VERSION', '6.0'))
if version_num >= 8.0 and env.get('WINDOWS_INSERT_MANIFEST', 0):
# MSVC 8 automatically generates .manifest files that have to be installed
extratargets.append(
env.ReplaceIxes(exe,
"PROGPREFIX", "PROGSUFFIX",
"WINDOWSPROGMANIFESTPREFIX", "WINDOWSPROGMANIFESTSUFFIX"))
if 'PDB' in env and env['PDB']:
pdb = env.arg2nodes('$PDB', target=target, source=source)[0]
extratargets.append(pdb)
target[0].attributes.pdb = pdb
return (target+extratargets,source)
def RegServerFunc(target, source, env):
if 'register' in env and env['register']:
ret = regServerAction([target[0]], [source[0]], env)
if ret:
raise SCons.Errors.UserError("Unable to register %s" % target[0])
else:
print "Registered %s sucessfully" % target[0]
return ret
return 0
regServerAction = SCons.Action.Action("$REGSVRCOM", "$REGSVRCOMSTR")
regServerCheck = SCons.Action.Action(RegServerFunc, None)
shlibLinkAction = SCons.Action.Action('${TEMPFILE("$SHLINK $SHLINKFLAGS $_SHLINK_TARGETS $_LIBDIRFLAGS $_LIBFLAGS $_PDB $_SHLINK_SOURCES")}')
compositeShLinkAction = shlibLinkAction + regServerCheck
ldmodLinkAction = SCons.Action.Action('${TEMPFILE("$LDMODULE $LDMODULEFLAGS $_LDMODULE_TARGETS $_LIBDIRFLAGS $_LIBFLAGS $_PDB $_LDMODULE_SOURCES")}')
compositeLdmodAction = ldmodLinkAction + regServerCheck
def generate(env):
"""Add Builders and construction variables for ar to an Environment."""
SCons.Tool.createSharedLibBuilder(env)
SCons.Tool.createProgBuilder(env)
env['SHLINK'] = '$LINK'
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS /dll')
env['_SHLINK_TARGETS'] = windowsShlinkTargets
env['_SHLINK_SOURCES'] = windowsShlinkSources
env['SHLINKCOM'] = compositeShLinkAction
env.Append(SHLIBEMITTER = [windowsLibEmitter])
env['LINK'] = 'link'
env['LINKFLAGS'] = SCons.Util.CLVar('/nologo')
env['_PDB'] = pdbGenerator
env['LINKCOM'] = '${TEMPFILE("$LINK $LINKFLAGS /OUT:$TARGET.windows $_LIBDIRFLAGS $_LIBFLAGS $_PDB $SOURCES.windows")}'
env.Append(PROGEMITTER = [prog_emitter])
env['LIBDIRPREFIX']='/LIBPATH:'
env['LIBDIRSUFFIX']=''
env['LIBLINKPREFIX']=''
env['LIBLINKSUFFIX']='$LIBSUFFIX'
env['WIN32DEFPREFIX'] = ''
env['WIN32DEFSUFFIX'] = '.def'
env['WIN32_INSERT_DEF'] = 0
env['WINDOWSDEFPREFIX'] = '${WIN32DEFPREFIX}'
env['WINDOWSDEFSUFFIX'] = '${WIN32DEFSUFFIX}'
env['WINDOWS_INSERT_DEF'] = '${WIN32_INSERT_DEF}'
env['WIN32EXPPREFIX'] = ''
env['WIN32EXPSUFFIX'] = '.exp'
env['WINDOWSEXPPREFIX'] = '${WIN32EXPPREFIX}'
env['WINDOWSEXPSUFFIX'] = '${WIN32EXPSUFFIX}'
env['WINDOWSSHLIBMANIFESTPREFIX'] = ''
env['WINDOWSSHLIBMANIFESTSUFFIX'] = '${SHLIBSUFFIX}.manifest'
env['WINDOWSPROGMANIFESTPREFIX'] = ''
env['WINDOWSPROGMANIFESTSUFFIX'] = '${PROGSUFFIX}.manifest'
env['REGSVRACTION'] = regServerCheck
env['REGSVR'] = os.path.join(SCons.Platform.win32.get_system_root(),'System32','regsvr32')
env['REGSVRFLAGS'] = '/s '
env['REGSVRCOM'] = '$REGSVR $REGSVRFLAGS ${TARGET.windows}'
# Set-up ms tools paths
msvc_setup_env_once(env)
# Loadable modules are on Windows the same as shared libraries, but they
# are subject to different build parameters (LDMODULE* variables).
# Therefore LDMODULE* variables correspond as much as possible to
# SHLINK*/SHLIB* ones.
SCons.Tool.createLoadableModuleBuilder(env)
env['LDMODULE'] = '$SHLINK'
env['LDMODULEPREFIX'] = '$SHLIBPREFIX'
env['LDMODULESUFFIX'] = '$SHLIBSUFFIX'
env['LDMODULEFLAGS'] = '$SHLINKFLAGS'
env['_LDMODULE_TARGETS'] = _windowsLdmodTargets
env['_LDMODULE_SOURCES'] = _windowsLdmodSources
env['LDMODULEEMITTER'] = [ldmodEmitter]
env['LDMODULECOM'] = compositeLdmodAction
def exists(env):
return msvc_exists()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
rcarrillocruz/ansible | lib/ansible/modules/network/nxos/nxos_snmp_community.py | 55 | 8226 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_snmp_community
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages SNMP community configs.
description:
- Manages SNMP community configuration.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
options:
community:
description:
- Case-sensitive community string.
required: true
access:
description:
- Access type for community.
required: false
default: null
choices: ['ro','rw']
group:
description:
- Group to which the community belongs.
required: false
default: null
acl:
description:
- ACL name to filter snmp requests.
required: false
default: 1
state:
description:
- Manage the state of the resource.
required: true
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# ensure snmp community is configured
- nxos_snmp_community:
community: TESTING7
group: network-operator
state: present
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"group": "network-operator"}
existing:
description: k/v pairs of existing snmp community
returned: always
type: dict
sample: {}
end_state:
description: k/v pairs of snmp community after module execution
returned: always
type: dict
sample: {"acl": "None", "group": "network-operator"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["snmp-server community TESTING7 group network-operator"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
import re
import re
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if 'show run' not in command:
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_snmp_groups(module):
command = 'show snmp group'
data = execute_show_command(command, module)[0]
group_list = []
try:
group_table = data['TABLE_role']['ROW_role']
for group in group_table:
group_list.append(group['role_name'])
except (KeyError, AttributeError):
return group_list
return group_list
def get_snmp_community(module, find_filter=None):
command = 'show snmp community'
data = execute_show_command(command, module)[0]
community_dict = {}
community_map = {
'grouporaccess': 'group',
'aclfilter': 'acl'
}
try:
community_table = data['TABLE_snmp_community']['ROW_snmp_community']
for each in community_table:
community = apply_key_map(community_map, each)
key = each['community_name']
community_dict[key] = community
except (KeyError, AttributeError):
return community_dict
if find_filter:
find = community_dict.get(find_filter, None)
if find_filter is None or find is None:
return {}
else:
fix_find = {}
for (key, value) in find.items():
if isinstance(value, str):
fix_find[key] = value.strip()
else:
fix_find[key] = value
return fix_find
def config_snmp_community(delta, community):
CMDS = {
'group': 'snmp-server community {0} group {group}',
'acl': 'snmp-server community {0} use-acl {acl}'
}
commands = []
for k, v in delta.items():
cmd = CMDS.get(k).format(community, **delta)
if cmd:
commands.append(cmd)
cmd = None
return commands
def main():
argument_spec = dict(
community=dict(required=True, type='str'),
access=dict(choices=['ro', 'rw']),
group=dict(type='str'),
acl=dict(type='str'),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=[['access', 'group']],
mutually_exclusive=[['access', 'group']],
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
access = module.params['access']
group = module.params['group']
community = module.params['community']
acl = module.params['acl']
state = module.params['state']
if access:
if access == 'ro':
group = 'network-operator'
elif access == 'rw':
group = 'network-admin'
# group check - ensure group being configured exists on the device
configured_groups = get_snmp_groups(module)
if group not in configured_groups:
module.fail_json(msg="group not on switch."
"please add before moving forward")
existing = get_snmp_community(module, community)
args = dict(group=group, acl=acl)
proposed = dict((k, v) for k, v in args.items() if v is not None)
delta = dict(set(proposed.items()).difference(existing.items()))
changed = False
end_state = existing
commands = []
if state == 'absent':
if existing:
command = "no snmp-server community {0}".format(community)
commands.append(command)
cmds = flatten_list(commands)
elif state == 'present':
if delta:
command = config_snmp_community(dict(delta), community)
commands.append(command)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
end_state = get_snmp_community(module, community)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
spacelis/hrnn4sim | hrnn4sim/base.py | 1 | 6693 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: training.py
Author: Wen Li
Email: spacelis@gmail.com
Github: http://github.com/spacelis
Description: Training utility functions
"""
# pylint: disable=invalid-name
from __future__ import print_function
from datetime import datetime
from os.path import join as pjoin
import pandas as pd
import tensorflow as tf
from tensorflow.python.lib.io import file_io
from keras import backend as K
from keras.callbacks import TensorBoard, ModelCheckpoint
from .vectorization import get_fullbatch, get_minibatches
from .vectorization import dataset_tokenize
def read_data(fin, filename):
""" Resove file format for the input file and return a file object """
if filename.endswith('.csv'):
return pd.read_csv(fin)
elif filename.endswith('.feather'):
return pd.read_feather(filename)
raise ValueError(f'File format not supported: {filename}')
class ModelBase(object):
""" A Base model for handling training, validation and prediction"""
def __init__(self, log_device=False):
super(ModelBase, self).__init__()
self.model = None
self.vectorizer = None
if log_device:
self.session = tf.Session(config=tf.ConfigProto(log_device_placement=True))
else:
self.session = tf.Session()
def split_examples(self, examples, ratio=0.8): # pylint: disable=no-self-use
''' Split training and validating data set '''
total_cnt = len(examples)
train_cnt = int(total_cnt * ratio)
valid_cnt = total_cnt - train_cnt
train_set = examples[:train_cnt]
valid_set = examples[-valid_cnt:]
return train_set, valid_set
def make_vectorizer(self, examples, **kwargs):
''' Make a vectorizer for the model '''
raise NotImplementedError()
def build(self):
''' Build the model '''
raise NotImplementedError()
def save_model(self, job_dir, model_dir, model_label):
""" Save the trained model to the job_dir"""
self.model.save_weights('model.h5.tmp')
with file_io.FileIO('model.h5.tmp', mode='rb') as fin:
model_path = pjoin(job_dir, model_dir, 'model_{}.h5'.format(model_label))
with file_io.FileIO(model_path, mode='wb') as fout:
fout.write(fin.read())
print("Saved {}".format(model_path))
def load_model(self, job_dir, model_dir, model_label):
""" Loading model from files """
model_path = pjoin(job_dir, model_dir, 'model_{}.h5'.format(model_label))
with file_io.FileIO(model_path, mode='rb') as fin:
with file_io.FileIO('model.h5.tmp', mode='wb') as fout:
fout.write(fin.read())
self.model.load_weights('model.h5.tmp')
print("Load {}".format(model_path))
def train(self, trainfile, model_label=None, # pylint: disable=too-many-arguments
epochs=30, batch_size=100,
val_file=None, val_split=0.8,
shuffle=False, include_eos=False,
job_dir='.', model_dir='ckpt'):
# pylint: disable=too-many-locals
''' Train the model '''
with file_io.FileIO(trainfile, 'r') as fin:
examples = read_data(fin, trainfile)
if shuffle:
examples = examples.sample(frac=1).reset_index(drop=True)
else:
examples = examples.reset_index(drop=True)
if val_file is not None:
with file_io.FileIO(trainfile, 'r') as fin:
val_examples = read_data(fin, trainfile)
if shuffle:
val_examples = val_examples.sample(frac=1).reset_index(drop=True)
else:
val_examples = val_examples.reset_index(drop=True)
self.vectorizer = self.make_vectorizer(pd.concat([examples, val_examples]),
include_eos=include_eos)
else:
self.vectorizer = self.make_vectorizer(examples, include_eos=include_eos)
self.build()
if model_label is not None:
self.load_model(job_dir, model_dir, model_label)
label = '{}_{}'.format(self.__class__.__name__, datetime.now().strftime("%Y%m%d_%H%M%S"))
# Write Summaries to Tensorboard log
tensorboardCB = TensorBoard(
log_dir=pjoin(job_dir, 'tfgraph', label),
#histogram_freq=100,
write_graph=True)
ckpt_label = '{}_epoch_{{epoch:02d}}_acc_{{val_acc:.4f}}'.format(label)
checkpointCB = ModelCheckpoint(ckpt_label, monitor='val_acc', save_weights_only=True)
# Train the model
if val_file is not None:
train_set, valid_set = self.split_examples(examples, val_split)
else:
train_set, valid_set = examples, val_examples
x, y = get_fullbatch(train_set, self.vectorizer, multiple=batch_size)
vx, vy = get_fullbatch(valid_set, self.vectorizer, multiple=batch_size)
# Training
K.set_session(self.session)
self.model.fit(x, y, batch_size=batch_size, epochs=epochs,
validation_data=(vx, vy),
callbacks=[tensorboardCB, checkpointCB])
# Validation
loss, acc = self.model.evaluate(vx, vy, batch_size=batch_size)
model_label = '{}_loss_{:.4f}_acc_{:.4f}'.format(label, loss, acc)
self.save_model(job_dir, model_dir, model_label)
print()
print('Loss =', loss)
print('Accuracy =', acc)
def test(self, testfile, model_label, batch_size=100, # pylint: disable=too-many-arguments
include_eos=False, job_dir='.', model_dir='ckpt'):
""" Evaluate model on the test data """
with file_io.FileIO(testfile, 'r') as fin:
examples = read_data(fin, testfile)
self.vectorizer = self.make_vectorizer(examples, include_eos=include_eos)
self.build()
K.set_session(self.session)
self.load_model(job_dir, model_dir, model_label)
x, y = get_fullbatch(examples, self.vectorizer, multiple=batch_size)
loss, acc = self.model.evaluate(x, y, batch_size=batch_size)
print()
print('Loss =', loss)
print('Accuracy =', acc)
def predict(self, items, batch_size=100):
''' Predict the the matching of the items '''
x = get_fullbatch(dataset_tokenize(items), self.vectorizer, with_y=False)
K.set_session(self.session)
pred = self.model.predict(x, batch_size=batch_size)
return pd.DataFrame({
'seqa': items['seqa'],
'seqb': items['seqb'],
'matched': pred,
})
| mit |
wangmiao1981/PredictionIO | examples/scala-parallel-similarproduct/add-rateevent/data/import_eventserver.py | 142 | 1844 | """
Import sample data for similar product engine
"""
import predictionio
import argparse
import random
SEED = 3
def import_events(client):
random.seed(SEED)
count = 0
print client.get_status()
print "Importing data..."
# generate 10 users, with user ids u1,u2,....,u10
user_ids = ["u%s" % i for i in range(1, 11)]
for user_id in user_ids:
print "Set user", user_id
client.create_event(
event="$set",
entity_type="user",
entity_id=user_id
)
count += 1
# generate 50 items, with item ids i1,i2,....,i50
# random assign 1 to 4 categories among c1-c6 to items
categories = ["c%s" % i for i in range(1, 7)]
item_ids = ["i%s" % i for i in range(1, 51)]
for item_id in item_ids:
print "Set item", item_id
client.create_event(
event="$set",
entity_type="item",
entity_id=item_id,
properties={
"categories" : random.sample(categories, random.randint(1, 4))
}
)
count += 1
# each user randomly viewed 10 items
for user_id in user_ids:
for viewed_item in random.sample(item_ids, 10):
print "User", user_id ,"views item", viewed_item
client.create_event(
event="view",
entity_type="user",
entity_id=user_id,
target_entity_type="item",
target_entity_id=viewed_item
)
count += 1
print "%s events are imported." % count
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Import sample data for similar product engine")
parser.add_argument('--access_key', default='invald_access_key')
parser.add_argument('--url', default="http://localhost:7070")
args = parser.parse_args()
print args
client = predictionio.EventClient(
access_key=args.access_key,
url=args.url,
threads=5,
qsize=500)
import_events(client)
| apache-2.0 |
xiaoluobo/Medline-Pubmed-Search-Engine | build/lib/pymetamap/MetaMap.py | 4 | 1901 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
DEFAULT_METAMAP_VERSION = '2012'
class MetaMap:
""" Abstract base class for extracting concepts from text using
MetaMap. To use this you will need to have downloaded the
recent MetaMap software from NLM. metamap_filename should point
to the binary you intend to use.
Subclasses need to override the extract_concepts method.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, metamap_filename, version=None):
self.metamap_filename = metamap_filename
if version is None:
version = DEFAULT_METAMAP_VERSION
@abc.abstractmethod
def extract_concepts(self, sentences=None, ids=None,
filename=None, composite_phrase=4,
file_format='sldi', word_sense_disambiguation=True):
""" Extract concepts from a list of sentences using MetaMap. """
@staticmethod
def get_instance(metamap_filename, version=None, backend='subprocess',
**extra_args):
extra_args.update(metamap_filename=metamap_filename, version=version)
if backend == 'subprocess':
from SubprocessBackend import SubprocessBackend
return SubprocessBackend(**extra_args)
raise ValueError("Unknown backend: %r (known backends: "
"'subprocess')" % backend)
| apache-2.0 |
ryanpetrello/pecan | pecan/tests/test_generic.py | 2 | 2900 | from json import dumps
from webtest import TestApp
from six import b as b_
from pecan import Pecan, expose, abort
from pecan.tests import PecanTestCase
class TestGeneric(PecanTestCase):
def test_simple_generic(self):
class RootController(object):
@expose(generic=True)
def index(self):
pass
@index.when(method='POST', template='json')
def do_post(self):
return dict(result='POST')
@index.when(method='GET')
def do_get(self):
return 'GET'
app = TestApp(Pecan(RootController()))
r = app.get('/')
assert r.status_int == 200
assert r.body == b_('GET')
r = app.post('/')
assert r.status_int == 200
assert r.body == b_(dumps(dict(result='POST')))
r = app.get('/do_get', status=404)
assert r.status_int == 404
def test_generic_allow_header(self):
class RootController(object):
@expose(generic=True)
def index(self):
abort(405)
@index.when(method='POST', template='json')
def do_post(self):
return dict(result='POST')
@index.when(method='GET')
def do_get(self):
return 'GET'
@index.when(method='PATCH')
def do_patch(self):
return 'PATCH'
app = TestApp(Pecan(RootController()))
r = app.delete('/', expect_errors=True)
assert r.status_int == 405
assert r.headers['Allow'] == 'GET, PATCH, POST'
def test_nested_generic(self):
class SubSubController(object):
@expose(generic=True)
def index(self):
return 'GET'
@index.when(method='DELETE', template='json')
def do_delete(self, name, *args):
return dict(result=name, args=', '.join(args))
class SubController(object):
sub = SubSubController()
class RootController(object):
sub = SubController()
app = TestApp(Pecan(RootController()))
r = app.get('/sub/sub/')
assert r.status_int == 200
assert r.body == b_('GET')
r = app.delete('/sub/sub/joe/is/cool')
assert r.status_int == 200
assert r.body == b_(dumps(dict(result='joe', args='is, cool')))
class TestGenericWithSpecialMethods(PecanTestCase):
def test_generics_not_allowed(self):
class C(object):
def _default(self):
pass
def _lookup(self):
pass
def _route(self):
pass
for method in (C._default, C._lookup, C._route):
self.assertRaises(
ValueError,
expose(generic=True),
getattr(method, '__func__', method)
)
| bsd-3-clause |
joelddiaz/openshift-tools | openshift/installer/vendored/openshift-ansible-3.8.36-1/roles/lib_openshift/src/lib/clusterrole.py | 64 | 1995 | # pylint: skip-file
# flake8: noqa
# pylint: disable=too-many-public-methods
class ClusterRole(Yedit):
''' Class to model an openshift ClusterRole'''
rules_path = "rules"
def __init__(self, name=None, content=None):
''' Constructor for clusterrole '''
if content is None:
content = ClusterRole.builder(name).yaml_dict
super(ClusterRole, self).__init__(content=content)
self.__rules = Rule.parse_rules(self.get(ClusterRole.rules_path)) or []
@property
def rules(self):
return self.__rules
@rules.setter
def rules(self, data):
self.__rules = data
self.put(ClusterRole.rules_path, self.__rules)
def rule_exists(self, inc_rule):
'''attempt to find the inc_rule in the rules list'''
for rule in self.rules:
if rule == inc_rule:
return True
return False
def compare(self, other, verbose=False):
'''compare function for clusterrole'''
for rule in other.rules:
if rule not in self.rules:
if verbose:
print('Rule in other not found in self. [{}]'.format(rule))
return False
for rule in self.rules:
if rule not in other.rules:
if verbose:
print('Rule in self not found in other. [{}]'.format(rule))
return False
return True
@staticmethod
def builder(name='default_clusterrole', rules=None):
'''return a clusterrole with name and/or rules'''
if rules is None:
rules = [{'apiGroups': [""],
'attributeRestrictions': None,
'verbs': [],
'resources': []}]
content = {
'apiVersion': 'v1',
'kind': 'ClusterRole',
'metadata': {'name': '{}'.format(name)},
'rules': rules,
}
return ClusterRole(content=content)
| apache-2.0 |
aquajach/sample_teach | node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/xcode_emulation.py | 241 | 63805 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module contains classes that help to emulate xcodebuild behavior on top of
other build systems, such as make and ninja.
"""
import copy
import gyp.common
import os
import os.path
import re
import shlex
import subprocess
import sys
import tempfile
from gyp.common import GypError
# Populated lazily by XcodeVersion, for efficiency, and to fix an issue when
# "xcodebuild" is called too quickly (it has been found to return incorrect
# version number).
XCODE_VERSION_CACHE = None
# Populated lazily by GetXcodeArchsDefault, to an |XcodeArchsDefault| instance
# corresponding to the installed version of Xcode.
XCODE_ARCHS_DEFAULT_CACHE = None
def XcodeArchsVariableMapping(archs, archs_including_64_bit=None):
"""Constructs a dictionary with expansion for $(ARCHS_STANDARD) variable,
and optionally for $(ARCHS_STANDARD_INCLUDING_64_BIT)."""
mapping = {'$(ARCHS_STANDARD)': archs}
if archs_including_64_bit:
mapping['$(ARCHS_STANDARD_INCLUDING_64_BIT)'] = archs_including_64_bit
return mapping
class XcodeArchsDefault(object):
"""A class to resolve ARCHS variable from xcode_settings, resolving Xcode
macros and implementing filtering by VALID_ARCHS. The expansion of macros
depends on the SDKROOT used ("macosx", "iphoneos", "iphonesimulator") and
on the version of Xcode.
"""
# Match variable like $(ARCHS_STANDARD).
variable_pattern = re.compile(r'\$\([a-zA-Z_][a-zA-Z0-9_]*\)$')
def __init__(self, default, mac, iphonesimulator, iphoneos):
self._default = (default,)
self._archs = {'mac': mac, 'ios': iphoneos, 'iossim': iphonesimulator}
def _VariableMapping(self, sdkroot):
"""Returns the dictionary of variable mapping depending on the SDKROOT."""
sdkroot = sdkroot.lower()
if 'iphoneos' in sdkroot:
return self._archs['ios']
elif 'iphonesimulator' in sdkroot:
return self._archs['iossim']
else:
return self._archs['mac']
def _ExpandArchs(self, archs, sdkroot):
"""Expands variables references in ARCHS, and remove duplicates."""
variable_mapping = self._VariableMapping(sdkroot)
expanded_archs = []
for arch in archs:
if self.variable_pattern.match(arch):
variable = arch
try:
variable_expansion = variable_mapping[variable]
for arch in variable_expansion:
if arch not in expanded_archs:
expanded_archs.append(arch)
except KeyError as e:
print 'Warning: Ignoring unsupported variable "%s".' % variable
elif arch not in expanded_archs:
expanded_archs.append(arch)
return expanded_archs
def ActiveArchs(self, archs, valid_archs, sdkroot):
"""Expands variables references in ARCHS, and filter by VALID_ARCHS if it
is defined (if not set, Xcode accept any value in ARCHS, otherwise, only
values present in VALID_ARCHS are kept)."""
expanded_archs = self._ExpandArchs(archs or self._default, sdkroot or '')
if valid_archs:
filtered_archs = []
for arch in expanded_archs:
if arch in valid_archs:
filtered_archs.append(arch)
expanded_archs = filtered_archs
return expanded_archs
def GetXcodeArchsDefault():
"""Returns the |XcodeArchsDefault| object to use to expand ARCHS for the
installed version of Xcode. The default values used by Xcode for ARCHS
and the expansion of the variables depends on the version of Xcode used.
For all version anterior to Xcode 5.0 or posterior to Xcode 5.1 included
uses $(ARCHS_STANDARD) if ARCHS is unset, while Xcode 5.0 to 5.0.2 uses
$(ARCHS_STANDARD_INCLUDING_64_BIT). This variable was added to Xcode 5.0
and deprecated with Xcode 5.1.
For "macosx" SDKROOT, all version starting with Xcode 5.0 includes 64-bit
architecture as part of $(ARCHS_STANDARD) and default to only building it.
For "iphoneos" and "iphonesimulator" SDKROOT, 64-bit architectures are part
of $(ARCHS_STANDARD_INCLUDING_64_BIT) from Xcode 5.0. From Xcode 5.1, they
are also part of $(ARCHS_STANDARD).
All thoses rules are coded in the construction of the |XcodeArchsDefault|
object to use depending on the version of Xcode detected. The object is
for performance reason."""
global XCODE_ARCHS_DEFAULT_CACHE
if XCODE_ARCHS_DEFAULT_CACHE:
return XCODE_ARCHS_DEFAULT_CACHE
xcode_version, _ = XcodeVersion()
if xcode_version < '0500':
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD)',
XcodeArchsVariableMapping(['i386']),
XcodeArchsVariableMapping(['i386']),
XcodeArchsVariableMapping(['armv7']))
elif xcode_version < '0510':
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD_INCLUDING_64_BIT)',
XcodeArchsVariableMapping(['x86_64'], ['x86_64']),
XcodeArchsVariableMapping(['i386'], ['i386', 'x86_64']),
XcodeArchsVariableMapping(
['armv7', 'armv7s'],
['armv7', 'armv7s', 'arm64']))
else:
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD)',
XcodeArchsVariableMapping(['x86_64'], ['x86_64']),
XcodeArchsVariableMapping(['i386', 'x86_64'], ['i386', 'x86_64']),
XcodeArchsVariableMapping(
['armv7', 'armv7s', 'arm64'],
['armv7', 'armv7s', 'arm64']))
return XCODE_ARCHS_DEFAULT_CACHE
class XcodeSettings(object):
"""A class that understands the gyp 'xcode_settings' object."""
# Populated lazily by _SdkPath(). Shared by all XcodeSettings, so cached
# at class-level for efficiency.
_sdk_path_cache = {}
_sdk_root_cache = {}
# Populated lazily by GetExtraPlistItems(). Shared by all XcodeSettings, so
# cached at class-level for efficiency.
_plist_cache = {}
# Populated lazily by GetIOSPostbuilds. Shared by all XcodeSettings, so
# cached at class-level for efficiency.
_codesigning_key_cache = {}
def __init__(self, spec):
self.spec = spec
self.isIOS = False
# Per-target 'xcode_settings' are pushed down into configs earlier by gyp.
# This means self.xcode_settings[config] always contains all settings
# for that config -- the per-target settings as well. Settings that are
# the same for all configs are implicitly per-target settings.
self.xcode_settings = {}
configs = spec['configurations']
for configname, config in configs.iteritems():
self.xcode_settings[configname] = config.get('xcode_settings', {})
self._ConvertConditionalKeys(configname)
if self.xcode_settings[configname].get('IPHONEOS_DEPLOYMENT_TARGET',
None):
self.isIOS = True
# This is only non-None temporarily during the execution of some methods.
self.configname = None
# Used by _AdjustLibrary to match .a and .dylib entries in libraries.
self.library_re = re.compile(r'^lib([^/]+)\.(a|dylib)$')
def _ConvertConditionalKeys(self, configname):
"""Converts or warns on conditional keys. Xcode supports conditional keys,
such as CODE_SIGN_IDENTITY[sdk=iphoneos*]. This is a partial implementation
with some keys converted while the rest force a warning."""
settings = self.xcode_settings[configname]
conditional_keys = [key for key in settings if key.endswith(']')]
for key in conditional_keys:
# If you need more, speak up at http://crbug.com/122592
if key.endswith("[sdk=iphoneos*]"):
if configname.endswith("iphoneos"):
new_key = key.split("[")[0]
settings[new_key] = settings[key]
else:
print 'Warning: Conditional keys not implemented, ignoring:', \
' '.join(conditional_keys)
del settings[key]
def _Settings(self):
assert self.configname
return self.xcode_settings[self.configname]
def _Test(self, test_key, cond_key, default):
return self._Settings().get(test_key, default) == cond_key
def _Appendf(self, lst, test_key, format_str, default=None):
if test_key in self._Settings():
lst.append(format_str % str(self._Settings()[test_key]))
elif default:
lst.append(format_str % str(default))
def _WarnUnimplemented(self, test_key):
if test_key in self._Settings():
print 'Warning: Ignoring not yet implemented key "%s".' % test_key
def IsBinaryOutputFormat(self, configname):
default = "binary" if self.isIOS else "xml"
format = self.xcode_settings[configname].get('INFOPLIST_OUTPUT_FORMAT',
default)
return format == "binary"
def _IsBundle(self):
return int(self.spec.get('mac_bundle', 0)) != 0
def _IsIosAppExtension(self):
return int(self.spec.get('ios_app_extension', 0)) != 0
def _IsIosWatchKitExtension(self):
return int(self.spec.get('ios_watchkit_extension', 0)) != 0
def _IsIosWatchApp(self):
return int(self.spec.get('ios_watch_app', 0)) != 0
def GetFrameworkVersion(self):
"""Returns the framework version of the current target. Only valid for
bundles."""
assert self._IsBundle()
return self.GetPerTargetSetting('FRAMEWORK_VERSION', default='A')
def GetWrapperExtension(self):
"""Returns the bundle extension (.app, .framework, .plugin, etc). Only
valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('loadable_module', 'shared_library'):
default_wrapper_extension = {
'loadable_module': 'bundle',
'shared_library': 'framework',
}[self.spec['type']]
wrapper_extension = self.GetPerTargetSetting(
'WRAPPER_EXTENSION', default=default_wrapper_extension)
return '.' + self.spec.get('product_extension', wrapper_extension)
elif self.spec['type'] == 'executable':
if self._IsIosAppExtension() or self._IsIosWatchKitExtension():
return '.' + self.spec.get('product_extension', 'appex')
else:
return '.' + self.spec.get('product_extension', 'app')
else:
assert False, "Don't know extension for '%s', target '%s'" % (
self.spec['type'], self.spec['target_name'])
def GetProductName(self):
"""Returns PRODUCT_NAME."""
return self.spec.get('product_name', self.spec['target_name'])
def GetFullProductName(self):
"""Returns FULL_PRODUCT_NAME."""
if self._IsBundle():
return self.GetWrapperName()
else:
return self._GetStandaloneBinaryPath()
def GetWrapperName(self):
"""Returns the directory name of the bundle represented by this target.
Only valid for bundles."""
assert self._IsBundle()
return self.GetProductName() + self.GetWrapperExtension()
def GetBundleContentsFolderPath(self):
"""Returns the qualified path to the bundle's contents folder. E.g.
Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles."""
if self.isIOS:
return self.GetWrapperName()
assert self._IsBundle()
if self.spec['type'] == 'shared_library':
return os.path.join(
self.GetWrapperName(), 'Versions', self.GetFrameworkVersion())
else:
# loadable_modules have a 'Contents' folder like executables.
return os.path.join(self.GetWrapperName(), 'Contents')
def GetBundleResourceFolder(self):
"""Returns the qualified path to the bundle's resource folder. E.g.
Chromium.app/Contents/Resources. Only valid for bundles."""
assert self._IsBundle()
if self.isIOS:
return self.GetBundleContentsFolderPath()
return os.path.join(self.GetBundleContentsFolderPath(), 'Resources')
def GetBundlePlistPath(self):
"""Returns the qualified path to the bundle's plist file. E.g.
Chromium.app/Contents/Info.plist. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('executable', 'loadable_module'):
return os.path.join(self.GetBundleContentsFolderPath(), 'Info.plist')
else:
return os.path.join(self.GetBundleContentsFolderPath(),
'Resources', 'Info.plist')
def GetProductType(self):
"""Returns the PRODUCT_TYPE of this target."""
if self._IsIosAppExtension():
assert self._IsBundle(), ('ios_app_extension flag requires mac_bundle '
'(target %s)' % self.spec['target_name'])
return 'com.apple.product-type.app-extension'
if self._IsIosWatchKitExtension():
assert self._IsBundle(), ('ios_watchkit_extension flag requires '
'mac_bundle (target %s)' % self.spec['target_name'])
return 'com.apple.product-type.watchkit-extension'
if self._IsIosWatchApp():
assert self._IsBundle(), ('ios_watch_app flag requires mac_bundle '
'(target %s)' % self.spec['target_name'])
return 'com.apple.product-type.application.watchapp'
if self._IsBundle():
return {
'executable': 'com.apple.product-type.application',
'loadable_module': 'com.apple.product-type.bundle',
'shared_library': 'com.apple.product-type.framework',
}[self.spec['type']]
else:
return {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.apple.product-type.library.dynamic',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
}[self.spec['type']]
def GetMachOType(self):
"""Returns the MACH_O_TYPE of this target."""
# Weird, but matches Xcode.
if not self._IsBundle() and self.spec['type'] == 'executable':
return ''
return {
'executable': 'mh_execute',
'static_library': 'staticlib',
'shared_library': 'mh_dylib',
'loadable_module': 'mh_bundle',
}[self.spec['type']]
def _GetBundleBinaryPath(self):
"""Returns the name of the bundle binary of by this target.
E.g. Chromium.app/Contents/MacOS/Chromium. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('shared_library') or self.isIOS:
path = self.GetBundleContentsFolderPath()
elif self.spec['type'] in ('executable', 'loadable_module'):
path = os.path.join(self.GetBundleContentsFolderPath(), 'MacOS')
return os.path.join(path, self.GetExecutableName())
def _GetStandaloneExecutableSuffix(self):
if 'product_extension' in self.spec:
return '.' + self.spec['product_extension']
return {
'executable': '',
'static_library': '.a',
'shared_library': '.dylib',
'loadable_module': '.so',
}[self.spec['type']]
def _GetStandaloneExecutablePrefix(self):
return self.spec.get('product_prefix', {
'executable': '',
'static_library': 'lib',
'shared_library': 'lib',
# Non-bundled loadable_modules are called foo.so for some reason
# (that is, .so and no prefix) with the xcode build -- match that.
'loadable_module': '',
}[self.spec['type']])
def _GetStandaloneBinaryPath(self):
"""Returns the name of the non-bundle binary represented by this target.
E.g. hello_world. Only valid for non-bundles."""
assert not self._IsBundle()
assert self.spec['type'] in (
'executable', 'shared_library', 'static_library', 'loadable_module'), (
'Unexpected type %s' % self.spec['type'])
target = self.spec['target_name']
if self.spec['type'] == 'static_library':
if target[:3] == 'lib':
target = target[3:]
elif self.spec['type'] in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = self._GetStandaloneExecutablePrefix()
target = self.spec.get('product_name', target)
target_ext = self._GetStandaloneExecutableSuffix()
return target_prefix + target + target_ext
def GetExecutableName(self):
"""Returns the executable name of the bundle represented by this target.
E.g. Chromium."""
if self._IsBundle():
return self.spec.get('product_name', self.spec['target_name'])
else:
return self._GetStandaloneBinaryPath()
def GetExecutablePath(self):
"""Returns the directory name of the bundle represented by this target. E.g.
Chromium.app/Contents/MacOS/Chromium."""
if self._IsBundle():
return self._GetBundleBinaryPath()
else:
return self._GetStandaloneBinaryPath()
def GetActiveArchs(self, configname):
"""Returns the architectures this target should be built for."""
config_settings = self.xcode_settings[configname]
xcode_archs_default = GetXcodeArchsDefault()
return xcode_archs_default.ActiveArchs(
config_settings.get('ARCHS'),
config_settings.get('VALID_ARCHS'),
config_settings.get('SDKROOT'))
def _GetSdkVersionInfoItem(self, sdk, infoitem):
# xcodebuild requires Xcode and can't run on Command Line Tools-only
# systems from 10.7 onward.
# Since the CLT has no SDK paths anyway, returning None is the
# most sensible route and should still do the right thing.
try:
return GetStdout(['xcodebuild', '-version', '-sdk', sdk, infoitem])
except:
pass
def _SdkRoot(self, configname):
if configname is None:
configname = self.configname
return self.GetPerConfigSetting('SDKROOT', configname, default='')
def _SdkPath(self, configname=None):
sdk_root = self._SdkRoot(configname)
if sdk_root.startswith('/'):
return sdk_root
return self._XcodeSdkPath(sdk_root)
def _XcodeSdkPath(self, sdk_root):
if sdk_root not in XcodeSettings._sdk_path_cache:
sdk_path = self._GetSdkVersionInfoItem(sdk_root, 'Path')
XcodeSettings._sdk_path_cache[sdk_root] = sdk_path
if sdk_root:
XcodeSettings._sdk_root_cache[sdk_path] = sdk_root
return XcodeSettings._sdk_path_cache[sdk_root]
def _AppendPlatformVersionMinFlags(self, lst):
self._Appendf(lst, 'MACOSX_DEPLOYMENT_TARGET', '-mmacosx-version-min=%s')
if 'IPHONEOS_DEPLOYMENT_TARGET' in self._Settings():
# TODO: Implement this better?
sdk_path_basename = os.path.basename(self._SdkPath())
if sdk_path_basename.lower().startswith('iphonesimulator'):
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-mios-simulator-version-min=%s')
else:
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-miphoneos-version-min=%s')
def GetCflags(self, configname, arch=None):
"""Returns flags that need to be added to .c, .cc, .m, and .mm
compilations."""
# This functions (and the similar ones below) do not offer complete
# emulation of all xcode_settings keys. They're implemented on demand.
self.configname = configname
cflags = []
sdk_root = self._SdkPath()
if 'SDKROOT' in self._Settings() and sdk_root:
cflags.append('-isysroot %s' % sdk_root)
if self._Test('CLANG_WARN_CONSTANT_CONVERSION', 'YES', default='NO'):
cflags.append('-Wconstant-conversion')
if self._Test('GCC_CHAR_IS_UNSIGNED_CHAR', 'YES', default='NO'):
cflags.append('-funsigned-char')
if self._Test('GCC_CW_ASM_SYNTAX', 'YES', default='YES'):
cflags.append('-fasm-blocks')
if 'GCC_DYNAMIC_NO_PIC' in self._Settings():
if self._Settings()['GCC_DYNAMIC_NO_PIC'] == 'YES':
cflags.append('-mdynamic-no-pic')
else:
pass
# TODO: In this case, it depends on the target. xcode passes
# mdynamic-no-pic by default for executable and possibly static lib
# according to mento
if self._Test('GCC_ENABLE_PASCAL_STRINGS', 'YES', default='YES'):
cflags.append('-mpascal-strings')
self._Appendf(cflags, 'GCC_OPTIMIZATION_LEVEL', '-O%s', default='s')
if self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES'):
dbg_format = self._Settings().get('DEBUG_INFORMATION_FORMAT', 'dwarf')
if dbg_format == 'dwarf':
cflags.append('-gdwarf-2')
elif dbg_format == 'stabs':
raise NotImplementedError('stabs debug format is not supported yet.')
elif dbg_format == 'dwarf-with-dsym':
cflags.append('-gdwarf-2')
else:
raise NotImplementedError('Unknown debug format %s' % dbg_format)
if self._Settings().get('GCC_STRICT_ALIASING') == 'YES':
cflags.append('-fstrict-aliasing')
elif self._Settings().get('GCC_STRICT_ALIASING') == 'NO':
cflags.append('-fno-strict-aliasing')
if self._Test('GCC_SYMBOLS_PRIVATE_EXTERN', 'YES', default='NO'):
cflags.append('-fvisibility=hidden')
if self._Test('GCC_TREAT_WARNINGS_AS_ERRORS', 'YES', default='NO'):
cflags.append('-Werror')
if self._Test('GCC_WARN_ABOUT_MISSING_NEWLINE', 'YES', default='NO'):
cflags.append('-Wnewline-eof')
self._AppendPlatformVersionMinFlags(cflags)
# TODO:
if self._Test('COPY_PHASE_STRIP', 'YES', default='NO'):
self._WarnUnimplemented('COPY_PHASE_STRIP')
self._WarnUnimplemented('GCC_DEBUGGING_SYMBOLS')
self._WarnUnimplemented('GCC_ENABLE_OBJC_EXCEPTIONS')
# TODO: This is exported correctly, but assigning to it is not supported.
self._WarnUnimplemented('MACH_O_TYPE')
self._WarnUnimplemented('PRODUCT_TYPE')
if arch is not None:
archs = [arch]
else:
assert self.configname
archs = self.GetActiveArchs(self.configname)
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
cflags.append('-arch ' + archs[0])
if archs[0] in ('i386', 'x86_64'):
if self._Test('GCC_ENABLE_SSE3_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse3')
if self._Test('GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS', 'YES',
default='NO'):
cflags.append('-mssse3') # Note 3rd 's'.
if self._Test('GCC_ENABLE_SSE41_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.1')
if self._Test('GCC_ENABLE_SSE42_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.2')
cflags += self._Settings().get('WARNING_CFLAGS', [])
if sdk_root:
framework_root = sdk_root
else:
framework_root = ''
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
cflags.append('-F' + directory.replace('$(SDKROOT)', framework_root))
self.configname = None
return cflags
def GetCflagsC(self, configname):
"""Returns flags that need to be added to .c, and .m compilations."""
self.configname = configname
cflags_c = []
if self._Settings().get('GCC_C_LANGUAGE_STANDARD', '') == 'ansi':
cflags_c.append('-ansi')
else:
self._Appendf(cflags_c, 'GCC_C_LANGUAGE_STANDARD', '-std=%s')
cflags_c += self._Settings().get('OTHER_CFLAGS', [])
self.configname = None
return cflags_c
def GetCflagsCC(self, configname):
"""Returns flags that need to be added to .cc, and .mm compilations."""
self.configname = configname
cflags_cc = []
clang_cxx_language_standard = self._Settings().get(
'CLANG_CXX_LANGUAGE_STANDARD')
# Note: Don't make c++0x to c++11 so that c++0x can be used with older
# clangs that don't understand c++11 yet (like Xcode 4.2's).
if clang_cxx_language_standard:
cflags_cc.append('-std=%s' % clang_cxx_language_standard)
self._Appendf(cflags_cc, 'CLANG_CXX_LIBRARY', '-stdlib=%s')
if self._Test('GCC_ENABLE_CPP_RTTI', 'NO', default='YES'):
cflags_cc.append('-fno-rtti')
if self._Test('GCC_ENABLE_CPP_EXCEPTIONS', 'NO', default='YES'):
cflags_cc.append('-fno-exceptions')
if self._Test('GCC_INLINES_ARE_PRIVATE_EXTERN', 'YES', default='NO'):
cflags_cc.append('-fvisibility-inlines-hidden')
if self._Test('GCC_THREADSAFE_STATICS', 'NO', default='YES'):
cflags_cc.append('-fno-threadsafe-statics')
# Note: This flag is a no-op for clang, it only has an effect for gcc.
if self._Test('GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO', 'NO', default='YES'):
cflags_cc.append('-Wno-invalid-offsetof')
other_ccflags = []
for flag in self._Settings().get('OTHER_CPLUSPLUSFLAGS', ['$(inherited)']):
# TODO: More general variable expansion. Missing in many other places too.
if flag in ('$inherited', '$(inherited)', '${inherited}'):
flag = '$OTHER_CFLAGS'
if flag in ('$OTHER_CFLAGS', '$(OTHER_CFLAGS)', '${OTHER_CFLAGS}'):
other_ccflags += self._Settings().get('OTHER_CFLAGS', [])
else:
other_ccflags.append(flag)
cflags_cc += other_ccflags
self.configname = None
return cflags_cc
def _AddObjectiveCGarbageCollectionFlags(self, flags):
gc_policy = self._Settings().get('GCC_ENABLE_OBJC_GC', 'unsupported')
if gc_policy == 'supported':
flags.append('-fobjc-gc')
elif gc_policy == 'required':
flags.append('-fobjc-gc-only')
def _AddObjectiveCARCFlags(self, flags):
if self._Test('CLANG_ENABLE_OBJC_ARC', 'YES', default='NO'):
flags.append('-fobjc-arc')
def _AddObjectiveCMissingPropertySynthesisFlags(self, flags):
if self._Test('CLANG_WARN_OBJC_MISSING_PROPERTY_SYNTHESIS',
'YES', default='NO'):
flags.append('-Wobjc-missing-property-synthesis')
def GetCflagsObjC(self, configname):
"""Returns flags that need to be added to .m compilations."""
self.configname = configname
cflags_objc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objc)
self._AddObjectiveCARCFlags(cflags_objc)
self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objc)
self.configname = None
return cflags_objc
def GetCflagsObjCC(self, configname):
"""Returns flags that need to be added to .mm compilations."""
self.configname = configname
cflags_objcc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objcc)
self._AddObjectiveCARCFlags(cflags_objcc)
self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objcc)
if self._Test('GCC_OBJC_CALL_CXX_CDTORS', 'YES', default='NO'):
cflags_objcc.append('-fobjc-call-cxx-cdtors')
self.configname = None
return cflags_objcc
def GetInstallNameBase(self):
"""Return DYLIB_INSTALL_NAME_BASE for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
install_base = self.GetPerTargetSetting(
'DYLIB_INSTALL_NAME_BASE',
default='/Library/Frameworks' if self._IsBundle() else '/usr/local/lib')
return install_base
def _StandardizePath(self, path):
"""Do :standardizepath processing for path."""
# I'm not quite sure what :standardizepath does. Just call normpath(),
# but don't let @executable_path/../foo collapse to foo.
if '/' in path:
prefix, rest = '', path
if path.startswith('@'):
prefix, rest = path.split('/', 1)
rest = os.path.normpath(rest) # :standardizepath
path = os.path.join(prefix, rest)
return path
def GetInstallName(self):
"""Return LD_DYLIB_INSTALL_NAME for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
default_install_name = \
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)'
install_name = self.GetPerTargetSetting(
'LD_DYLIB_INSTALL_NAME', default=default_install_name)
# Hardcode support for the variables used in chromium for now, to
# unblock people using the make build.
if '$' in install_name:
assert install_name in ('$(DYLIB_INSTALL_NAME_BASE:standardizepath)/'
'$(WRAPPER_NAME)/$(PRODUCT_NAME)', default_install_name), (
'Variables in LD_DYLIB_INSTALL_NAME are not generally supported '
'yet in target \'%s\' (got \'%s\')' %
(self.spec['target_name'], install_name))
install_name = install_name.replace(
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)',
self._StandardizePath(self.GetInstallNameBase()))
if self._IsBundle():
# These are only valid for bundles, hence the |if|.
install_name = install_name.replace(
'$(WRAPPER_NAME)', self.GetWrapperName())
install_name = install_name.replace(
'$(PRODUCT_NAME)', self.GetProductName())
else:
assert '$(WRAPPER_NAME)' not in install_name
assert '$(PRODUCT_NAME)' not in install_name
install_name = install_name.replace(
'$(EXECUTABLE_PATH)', self.GetExecutablePath())
return install_name
def _MapLinkerFlagFilename(self, ldflag, gyp_to_build_path):
"""Checks if ldflag contains a filename and if so remaps it from
gyp-directory-relative to build-directory-relative."""
# This list is expanded on demand.
# They get matched as:
# -exported_symbols_list file
# -Wl,exported_symbols_list file
# -Wl,exported_symbols_list,file
LINKER_FILE = r'(\S+)'
WORD = r'\S+'
linker_flags = [
['-exported_symbols_list', LINKER_FILE], # Needed for NaCl.
['-unexported_symbols_list', LINKER_FILE],
['-reexported_symbols_list', LINKER_FILE],
['-sectcreate', WORD, WORD, LINKER_FILE], # Needed for remoting.
]
for flag_pattern in linker_flags:
regex = re.compile('(?:-Wl,)?' + '[ ,]'.join(flag_pattern))
m = regex.match(ldflag)
if m:
ldflag = ldflag[:m.start(1)] + gyp_to_build_path(m.group(1)) + \
ldflag[m.end(1):]
# Required for ffmpeg (no idea why they don't use LIBRARY_SEARCH_PATHS,
# TODO(thakis): Update ffmpeg.gyp):
if ldflag.startswith('-L'):
ldflag = '-L' + gyp_to_build_path(ldflag[len('-L'):])
return ldflag
def GetLdflags(self, configname, product_dir, gyp_to_build_path, arch=None):
"""Returns flags that need to be passed to the linker.
Args:
configname: The name of the configuration to get ld flags for.
product_dir: The directory where products such static and dynamic
libraries are placed. This is added to the library search path.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
self.configname = configname
ldflags = []
# The xcode build is relative to a gyp file's directory, and OTHER_LDFLAGS
# can contain entries that depend on this. Explicitly absolutify these.
for ldflag in self._Settings().get('OTHER_LDFLAGS', []):
ldflags.append(self._MapLinkerFlagFilename(ldflag, gyp_to_build_path))
if self._Test('DEAD_CODE_STRIPPING', 'YES', default='NO'):
ldflags.append('-Wl,-dead_strip')
if self._Test('PREBINDING', 'YES', default='NO'):
ldflags.append('-Wl,-prebind')
self._Appendf(
ldflags, 'DYLIB_COMPATIBILITY_VERSION', '-compatibility_version %s')
self._Appendf(
ldflags, 'DYLIB_CURRENT_VERSION', '-current_version %s')
self._AppendPlatformVersionMinFlags(ldflags)
if 'SDKROOT' in self._Settings() and self._SdkPath():
ldflags.append('-isysroot ' + self._SdkPath())
for library_path in self._Settings().get('LIBRARY_SEARCH_PATHS', []):
ldflags.append('-L' + gyp_to_build_path(library_path))
if 'ORDER_FILE' in self._Settings():
ldflags.append('-Wl,-order_file ' +
'-Wl,' + gyp_to_build_path(
self._Settings()['ORDER_FILE']))
if arch is not None:
archs = [arch]
else:
assert self.configname
archs = self.GetActiveArchs(self.configname)
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
ldflags.append('-arch ' + archs[0])
# Xcode adds the product directory by default.
ldflags.append('-L' + product_dir)
install_name = self.GetInstallName()
if install_name and self.spec['type'] != 'loadable_module':
ldflags.append('-install_name ' + install_name.replace(' ', r'\ '))
for rpath in self._Settings().get('LD_RUNPATH_SEARCH_PATHS', []):
ldflags.append('-Wl,-rpath,' + rpath)
sdk_root = self._SdkPath()
if not sdk_root:
sdk_root = ''
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
ldflags.append('-F' + directory.replace('$(SDKROOT)', sdk_root))
is_extension = self._IsIosAppExtension() or self._IsIosWatchKitExtension()
if sdk_root and is_extension:
# Adds the link flags for extensions. These flags are common for all
# extensions and provide loader and main function.
# These flags reflect the compilation options used by xcode to compile
# extensions.
ldflags.append('-lpkstart')
ldflags.append(sdk_root +
'/System/Library/PrivateFrameworks/PlugInKit.framework/PlugInKit')
ldflags.append('-fapplication-extension')
ldflags.append('-Xlinker -rpath '
'-Xlinker @executable_path/../../Frameworks')
self._Appendf(ldflags, 'CLANG_CXX_LIBRARY', '-stdlib=%s')
self.configname = None
return ldflags
def GetLibtoolflags(self, configname):
"""Returns flags that need to be passed to the static linker.
Args:
configname: The name of the configuration to get ld flags for.
"""
self.configname = configname
libtoolflags = []
for libtoolflag in self._Settings().get('OTHER_LDFLAGS', []):
libtoolflags.append(libtoolflag)
# TODO(thakis): ARCHS?
self.configname = None
return libtoolflags
def GetPerTargetSettings(self):
"""Gets a list of all the per-target settings. This will only fetch keys
whose values are the same across all configurations."""
first_pass = True
result = {}
for configname in sorted(self.xcode_settings.keys()):
if first_pass:
result = dict(self.xcode_settings[configname])
first_pass = False
else:
for key, value in self.xcode_settings[configname].iteritems():
if key not in result:
continue
elif result[key] != value:
del result[key]
return result
def GetPerConfigSetting(self, setting, configname, default=None):
if configname in self.xcode_settings:
return self.xcode_settings[configname].get(setting, default)
else:
return self.GetPerTargetSetting(setting, default)
def GetPerTargetSetting(self, setting, default=None):
"""Tries to get xcode_settings.setting from spec. Assumes that the setting
has the same value in all configurations and throws otherwise."""
is_first_pass = True
result = None
for configname in sorted(self.xcode_settings.keys()):
if is_first_pass:
result = self.xcode_settings[configname].get(setting, None)
is_first_pass = False
else:
assert result == self.xcode_settings[configname].get(setting, None), (
"Expected per-target setting for '%s', got per-config setting "
"(target %s)" % (setting, self.spec['target_name']))
if result is None:
return default
return result
def _GetStripPostbuilds(self, configname, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to strip this target's binary. These should be run as postbuilds
before the actual postbuilds run."""
self.configname = configname
result = []
if (self._Test('DEPLOYMENT_POSTPROCESSING', 'YES', default='NO') and
self._Test('STRIP_INSTALLED_PRODUCT', 'YES', default='NO')):
default_strip_style = 'debugging'
if self.spec['type'] == 'loadable_module' and self._IsBundle():
default_strip_style = 'non-global'
elif self.spec['type'] == 'executable':
default_strip_style = 'all'
strip_style = self._Settings().get('STRIP_STYLE', default_strip_style)
strip_flags = {
'all': '',
'non-global': '-x',
'debugging': '-S',
}[strip_style]
explicit_strip_flags = self._Settings().get('STRIPFLAGS', '')
if explicit_strip_flags:
strip_flags += ' ' + _NormalizeEnvVarReferences(explicit_strip_flags)
if not quiet:
result.append('echo STRIP\\(%s\\)' % self.spec['target_name'])
result.append('strip %s %s' % (strip_flags, output_binary))
self.configname = None
return result
def _GetDebugInfoPostbuilds(self, configname, output, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to massage this target's debug information. These should be run
as postbuilds before the actual postbuilds run."""
self.configname = configname
# For static libraries, no dSYMs are created.
result = []
if (self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES') and
self._Test(
'DEBUG_INFORMATION_FORMAT', 'dwarf-with-dsym', default='dwarf') and
self.spec['type'] != 'static_library'):
if not quiet:
result.append('echo DSYMUTIL\\(%s\\)' % self.spec['target_name'])
result.append('dsymutil %s -o %s' % (output_binary, output + '.dSYM'))
self.configname = None
return result
def _GetTargetPostbuilds(self, configname, output, output_binary,
quiet=False):
"""Returns a list of shell commands that contain the shell commands
to run as postbuilds for this target, before the actual postbuilds."""
# dSYMs need to build before stripping happens.
return (
self._GetDebugInfoPostbuilds(configname, output, output_binary, quiet) +
self._GetStripPostbuilds(configname, output_binary, quiet))
def _GetIOSPostbuilds(self, configname, output_binary):
"""Return a shell command to codesign the iOS output binary so it can
be deployed to a device. This should be run as the very last step of the
build."""
if not (self.isIOS and self.spec['type'] == 'executable'):
return []
settings = self.xcode_settings[configname]
key = self._GetIOSCodeSignIdentityKey(settings)
if not key:
return []
# Warn for any unimplemented signing xcode keys.
unimpl = ['OTHER_CODE_SIGN_FLAGS']
unimpl = set(unimpl) & set(self.xcode_settings[configname].keys())
if unimpl:
print 'Warning: Some codesign keys not implemented, ignoring: %s' % (
', '.join(sorted(unimpl)))
return ['%s code-sign-bundle "%s" "%s" "%s" "%s"' % (
os.path.join('${TARGET_BUILD_DIR}', 'gyp-mac-tool'), key,
settings.get('CODE_SIGN_RESOURCE_RULES_PATH', ''),
settings.get('CODE_SIGN_ENTITLEMENTS', ''),
settings.get('PROVISIONING_PROFILE', ''))
]
def _GetIOSCodeSignIdentityKey(self, settings):
identity = settings.get('CODE_SIGN_IDENTITY')
if not identity:
return None
if identity not in XcodeSettings._codesigning_key_cache:
output = subprocess.check_output(
['security', 'find-identity', '-p', 'codesigning', '-v'])
for line in output.splitlines():
if identity in line:
fingerprint = line.split()[1]
cache = XcodeSettings._codesigning_key_cache
assert identity not in cache or fingerprint == cache[identity], (
"Multiple codesigning fingerprints for identity: %s" % identity)
XcodeSettings._codesigning_key_cache[identity] = fingerprint
return XcodeSettings._codesigning_key_cache.get(identity, '')
def AddImplicitPostbuilds(self, configname, output, output_binary,
postbuilds=[], quiet=False):
"""Returns a list of shell commands that should run before and after
|postbuilds|."""
assert output_binary is not None
pre = self._GetTargetPostbuilds(configname, output, output_binary, quiet)
post = self._GetIOSPostbuilds(configname, output_binary)
return pre + postbuilds + post
def _AdjustLibrary(self, library, config_name=None):
if library.endswith('.framework'):
l = '-framework ' + os.path.splitext(os.path.basename(library))[0]
else:
m = self.library_re.match(library)
if m:
l = '-l' + m.group(1)
else:
l = library
sdk_root = self._SdkPath(config_name)
if not sdk_root:
sdk_root = ''
return l.replace('$(SDKROOT)', sdk_root)
def AdjustLibraries(self, libraries, config_name=None):
"""Transforms entries like 'Cocoa.framework' in libraries into entries like
'-framework Cocoa', 'libcrypto.dylib' into '-lcrypto', etc.
"""
libraries = [self._AdjustLibrary(library, config_name)
for library in libraries]
return libraries
def _BuildMachineOSBuild(self):
return GetStdout(['sw_vers', '-buildVersion'])
def _XcodeIOSDeviceFamily(self, configname):
family = self.xcode_settings[configname].get('TARGETED_DEVICE_FAMILY', '1')
return [int(x) for x in family.split(',')]
def GetExtraPlistItems(self, configname=None):
"""Returns a dictionary with extra items to insert into Info.plist."""
if configname not in XcodeSettings._plist_cache:
cache = {}
cache['BuildMachineOSBuild'] = self._BuildMachineOSBuild()
xcode, xcode_build = XcodeVersion()
cache['DTXcode'] = xcode
cache['DTXcodeBuild'] = xcode_build
sdk_root = self._SdkRoot(configname)
if not sdk_root:
sdk_root = self._DefaultSdkRoot()
cache['DTSDKName'] = sdk_root
if xcode >= '0430':
cache['DTSDKBuild'] = self._GetSdkVersionInfoItem(
sdk_root, 'ProductBuildVersion')
else:
cache['DTSDKBuild'] = cache['BuildMachineOSBuild']
if self.isIOS:
cache['DTPlatformName'] = cache['DTSDKName']
if configname.endswith("iphoneos"):
cache['DTPlatformVersion'] = self._GetSdkVersionInfoItem(
sdk_root, 'ProductVersion')
cache['CFBundleSupportedPlatforms'] = ['iPhoneOS']
else:
cache['CFBundleSupportedPlatforms'] = ['iPhoneSimulator']
XcodeSettings._plist_cache[configname] = cache
# Include extra plist items that are per-target, not per global
# XcodeSettings.
items = dict(XcodeSettings._plist_cache[configname])
if self.isIOS:
items['UIDeviceFamily'] = self._XcodeIOSDeviceFamily(configname)
return items
def _DefaultSdkRoot(self):
"""Returns the default SDKROOT to use.
Prior to version 5.0.0, if SDKROOT was not explicitly set in the Xcode
project, then the environment variable was empty. Starting with this
version, Xcode uses the name of the newest SDK installed.
"""
xcode_version, xcode_build = XcodeVersion()
if xcode_version < '0500':
return ''
default_sdk_path = self._XcodeSdkPath('')
default_sdk_root = XcodeSettings._sdk_root_cache.get(default_sdk_path)
if default_sdk_root:
return default_sdk_root
try:
all_sdks = GetStdout(['xcodebuild', '-showsdks'])
except:
# If xcodebuild fails, there will be no valid SDKs
return ''
for line in all_sdks.splitlines():
items = line.split()
if len(items) >= 3 and items[-2] == '-sdk':
sdk_root = items[-1]
sdk_path = self._XcodeSdkPath(sdk_root)
if sdk_path == default_sdk_path:
return sdk_root
return ''
class MacPrefixHeader(object):
"""A class that helps with emulating Xcode's GCC_PREFIX_HEADER feature.
This feature consists of several pieces:
* If GCC_PREFIX_HEADER is present, all compilations in that project get an
additional |-include path_to_prefix_header| cflag.
* If GCC_PRECOMPILE_PREFIX_HEADER is present too, then the prefix header is
instead compiled, and all other compilations in the project get an
additional |-include path_to_compiled_header| instead.
+ Compiled prefix headers have the extension gch. There is one gch file for
every language used in the project (c, cc, m, mm), since gch files for
different languages aren't compatible.
+ gch files themselves are built with the target's normal cflags, but they
obviously don't get the |-include| flag. Instead, they need a -x flag that
describes their language.
+ All o files in the target need to depend on the gch file, to make sure
it's built before any o file is built.
This class helps with some of these tasks, but it needs help from the build
system for writing dependencies to the gch files, for writing build commands
for the gch files, and for figuring out the location of the gch files.
"""
def __init__(self, xcode_settings,
gyp_path_to_build_path, gyp_path_to_build_output):
"""If xcode_settings is None, all methods on this class are no-ops.
Args:
gyp_path_to_build_path: A function that takes a gyp-relative path,
and returns a path relative to the build directory.
gyp_path_to_build_output: A function that takes a gyp-relative path and
a language code ('c', 'cc', 'm', or 'mm'), and that returns a path
to where the output of precompiling that path for that language
should be placed (without the trailing '.gch').
"""
# This doesn't support per-configuration prefix headers. Good enough
# for now.
self.header = None
self.compile_headers = False
if xcode_settings:
self.header = xcode_settings.GetPerTargetSetting('GCC_PREFIX_HEADER')
self.compile_headers = xcode_settings.GetPerTargetSetting(
'GCC_PRECOMPILE_PREFIX_HEADER', default='NO') != 'NO'
self.compiled_headers = {}
if self.header:
if self.compile_headers:
for lang in ['c', 'cc', 'm', 'mm']:
self.compiled_headers[lang] = gyp_path_to_build_output(
self.header, lang)
self.header = gyp_path_to_build_path(self.header)
def _CompiledHeader(self, lang, arch):
assert self.compile_headers
h = self.compiled_headers[lang]
if arch:
h += '.' + arch
return h
def GetInclude(self, lang, arch=None):
"""Gets the cflags to include the prefix header for language |lang|."""
if self.compile_headers and lang in self.compiled_headers:
return '-include %s' % self._CompiledHeader(lang, arch)
elif self.header:
return '-include %s' % self.header
else:
return ''
def _Gch(self, lang, arch):
"""Returns the actual file name of the prefix header for language |lang|."""
assert self.compile_headers
return self._CompiledHeader(lang, arch) + '.gch'
def GetObjDependencies(self, sources, objs, arch=None):
"""Given a list of source files and the corresponding object files, returns
a list of (source, object, gch) tuples, where |gch| is the build-directory
relative path to the gch file each object file depends on. |compilable[i]|
has to be the source file belonging to |objs[i]|."""
if not self.header or not self.compile_headers:
return []
result = []
for source, obj in zip(sources, objs):
ext = os.path.splitext(source)[1]
lang = {
'.c': 'c',
'.cpp': 'cc', '.cc': 'cc', '.cxx': 'cc',
'.m': 'm',
'.mm': 'mm',
}.get(ext, None)
if lang:
result.append((source, obj, self._Gch(lang, arch)))
return result
def GetPchBuildCommands(self, arch=None):
"""Returns [(path_to_gch, language_flag, language, header)].
|path_to_gch| and |header| are relative to the build directory.
"""
if not self.header or not self.compile_headers:
return []
return [
(self._Gch('c', arch), '-x c-header', 'c', self.header),
(self._Gch('cc', arch), '-x c++-header', 'cc', self.header),
(self._Gch('m', arch), '-x objective-c-header', 'm', self.header),
(self._Gch('mm', arch), '-x objective-c++-header', 'mm', self.header),
]
def XcodeVersion():
"""Returns a tuple of version and build version of installed Xcode."""
# `xcodebuild -version` output looks like
# Xcode 4.6.3
# Build version 4H1503
# or like
# Xcode 3.2.6
# Component versions: DevToolsCore-1809.0; DevToolsSupport-1806.0
# BuildVersion: 10M2518
# Convert that to '0463', '4H1503'.
global XCODE_VERSION_CACHE
if XCODE_VERSION_CACHE:
return XCODE_VERSION_CACHE
try:
version_list = GetStdout(['xcodebuild', '-version']).splitlines()
# In some circumstances xcodebuild exits 0 but doesn't return
# the right results; for example, a user on 10.7 or 10.8 with
# a bogus path set via xcode-select
# In that case this may be a CLT-only install so fall back to
# checking that version.
if len(version_list) < 2:
raise GypError("xcodebuild returned unexpected results")
except:
version = CLTVersion()
if version:
version = re.match(r'(\d\.\d\.?\d*)', version).groups()[0]
else:
raise GypError("No Xcode or CLT version detected!")
# The CLT has no build information, so we return an empty string.
version_list = [version, '']
version = version_list[0]
build = version_list[-1]
# Be careful to convert "4.2" to "0420":
version = version.split()[-1].replace('.', '')
version = (version + '0' * (3 - len(version))).zfill(4)
if build:
build = build.split()[-1]
XCODE_VERSION_CACHE = (version, build)
return XCODE_VERSION_CACHE
# This function ported from the logic in Homebrew's CLT version check
def CLTVersion():
"""Returns the version of command-line tools from pkgutil."""
# pkgutil output looks like
# package-id: com.apple.pkg.CLTools_Executables
# version: 5.0.1.0.1.1382131676
# volume: /
# location: /
# install-time: 1382544035
# groups: com.apple.FindSystemFiles.pkg-group com.apple.DevToolsBoth.pkg-group com.apple.DevToolsNonRelocatableShared.pkg-group
STANDALONE_PKG_ID = "com.apple.pkg.DeveloperToolsCLILeo"
FROM_XCODE_PKG_ID = "com.apple.pkg.DeveloperToolsCLI"
MAVERICKS_PKG_ID = "com.apple.pkg.CLTools_Executables"
regex = re.compile('version: (?P<version>.+)')
for key in [MAVERICKS_PKG_ID, STANDALONE_PKG_ID, FROM_XCODE_PKG_ID]:
try:
output = GetStdout(['/usr/sbin/pkgutil', '--pkg-info', key])
return re.search(regex, output).groupdict()['version']
except:
continue
def GetStdout(cmdlist):
"""Returns the content of standard output returned by invoking |cmdlist|.
Raises |GypError| if the command return with a non-zero return code."""
job = subprocess.Popen(cmdlist, stdout=subprocess.PIPE)
out = job.communicate()[0]
if job.returncode != 0:
sys.stderr.write(out + '\n')
raise GypError('Error %d running %s' % (job.returncode, cmdlist[0]))
return out.rstrip('\n')
def MergeGlobalXcodeSettingsToSpec(global_dict, spec):
"""Merges the global xcode_settings dictionary into each configuration of the
target represented by spec. For keys that are both in the global and the local
xcode_settings dict, the local key gets precendence.
"""
# The xcode generator special-cases global xcode_settings and does something
# that amounts to merging in the global xcode_settings into each local
# xcode_settings dict.
global_xcode_settings = global_dict.get('xcode_settings', {})
for config in spec['configurations'].values():
if 'xcode_settings' in config:
new_settings = global_xcode_settings.copy()
new_settings.update(config['xcode_settings'])
config['xcode_settings'] = new_settings
def IsMacBundle(flavor, spec):
"""Returns if |spec| should be treated as a bundle.
Bundles are directories with a certain subdirectory structure, instead of
just a single file. Bundle rules do not produce a binary but also package
resources into that directory."""
is_mac_bundle = (int(spec.get('mac_bundle', 0)) != 0 and flavor == 'mac')
if is_mac_bundle:
assert spec['type'] != 'none', (
'mac_bundle targets cannot have type none (target "%s")' %
spec['target_name'])
return is_mac_bundle
def GetMacBundleResources(product_dir, xcode_settings, resources):
"""Yields (output, resource) pairs for every resource in |resources|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
resources: A list of bundle resources, relative to the build directory.
"""
dest = os.path.join(product_dir,
xcode_settings.GetBundleResourceFolder())
for res in resources:
output = dest
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in res, (
"Spaces in resource filenames not supported (%s)" % res)
# Split into (path,file).
res_parts = os.path.split(res)
# Now split the path into (prefix,maybe.lproj).
lproj_parts = os.path.split(res_parts[0])
# If the resource lives in a .lproj bundle, add that to the destination.
if lproj_parts[1].endswith('.lproj'):
output = os.path.join(output, lproj_parts[1])
output = os.path.join(output, res_parts[1])
# Compiled XIB files are referred to by .nib.
if output.endswith('.xib'):
output = os.path.splitext(output)[0] + '.nib'
# Compiled storyboard files are referred to by .storyboardc.
if output.endswith('.storyboard'):
output = os.path.splitext(output)[0] + '.storyboardc'
yield output, res
def GetMacInfoPlist(product_dir, xcode_settings, gyp_path_to_build_path):
"""Returns (info_plist, dest_plist, defines, extra_env), where:
* |info_plist| is the source plist path, relative to the
build directory,
* |dest_plist| is the destination plist path, relative to the
build directory,
* |defines| is a list of preprocessor defines (empty if the plist
shouldn't be preprocessed,
* |extra_env| is a dict of env variables that should be exported when
invoking |mac_tool copy-info-plist|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
info_plist = xcode_settings.GetPerTargetSetting('INFOPLIST_FILE')
if not info_plist:
return None, None, [], {}
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in info_plist, (
"Spaces in Info.plist filenames not supported (%s)" % info_plist)
info_plist = gyp_path_to_build_path(info_plist)
# If explicitly set to preprocess the plist, invoke the C preprocessor and
# specify any defines as -D flags.
if xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESS', default='NO') == 'YES':
# Create an intermediate file based on the path.
defines = shlex.split(xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESSOR_DEFINITIONS', default=''))
else:
defines = []
dest_plist = os.path.join(product_dir, xcode_settings.GetBundlePlistPath())
extra_env = xcode_settings.GetPerTargetSettings()
return info_plist, dest_plist, defines, extra_env
def _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings=None):
"""Return the environment variables that Xcode would set. See
http://developer.apple.com/library/mac/#documentation/DeveloperTools/Reference/XcodeBuildSettingRef/1-Build_Setting_Reference/build_setting_ref.html#//apple_ref/doc/uid/TP40003931-CH3-SW153
for a full list.
Args:
xcode_settings: An XcodeSettings object. If this is None, this function
returns an empty dict.
built_products_dir: Absolute path to the built products dir.
srcroot: Absolute path to the source root.
configuration: The build configuration name.
additional_settings: An optional dict with more values to add to the
result.
"""
if not xcode_settings: return {}
# This function is considered a friend of XcodeSettings, so let it reach into
# its implementation details.
spec = xcode_settings.spec
# These are filled in on a as-needed basis.
env = {
'BUILT_PRODUCTS_DIR' : built_products_dir,
'CONFIGURATION' : configuration,
'PRODUCT_NAME' : xcode_settings.GetProductName(),
# See /Developer/Platforms/MacOSX.platform/Developer/Library/Xcode/Specifications/MacOSX\ Product\ Types.xcspec for FULL_PRODUCT_NAME
'SRCROOT' : srcroot,
'SOURCE_ROOT': '${SRCROOT}',
# This is not true for static libraries, but currently the env is only
# written for bundles:
'TARGET_BUILD_DIR' : built_products_dir,
'TEMP_DIR' : '${TMPDIR}',
}
if xcode_settings.GetPerConfigSetting('SDKROOT', configuration):
env['SDKROOT'] = xcode_settings._SdkPath(configuration)
else:
env['SDKROOT'] = ''
if spec['type'] in (
'executable', 'static_library', 'shared_library', 'loadable_module'):
env['EXECUTABLE_NAME'] = xcode_settings.GetExecutableName()
env['EXECUTABLE_PATH'] = xcode_settings.GetExecutablePath()
env['FULL_PRODUCT_NAME'] = xcode_settings.GetFullProductName()
mach_o_type = xcode_settings.GetMachOType()
if mach_o_type:
env['MACH_O_TYPE'] = mach_o_type
env['PRODUCT_TYPE'] = xcode_settings.GetProductType()
if xcode_settings._IsBundle():
env['CONTENTS_FOLDER_PATH'] = \
xcode_settings.GetBundleContentsFolderPath()
env['UNLOCALIZED_RESOURCES_FOLDER_PATH'] = \
xcode_settings.GetBundleResourceFolder()
env['INFOPLIST_PATH'] = xcode_settings.GetBundlePlistPath()
env['WRAPPER_NAME'] = xcode_settings.GetWrapperName()
install_name = xcode_settings.GetInstallName()
if install_name:
env['LD_DYLIB_INSTALL_NAME'] = install_name
install_name_base = xcode_settings.GetInstallNameBase()
if install_name_base:
env['DYLIB_INSTALL_NAME_BASE'] = install_name_base
if XcodeVersion() >= '0500' and not env.get('SDKROOT'):
sdk_root = xcode_settings._SdkRoot(configuration)
if not sdk_root:
sdk_root = xcode_settings._XcodeSdkPath('')
if sdk_root is None:
sdk_root = ''
env['SDKROOT'] = sdk_root
if not additional_settings:
additional_settings = {}
else:
# Flatten lists to strings.
for k in additional_settings:
if not isinstance(additional_settings[k], str):
additional_settings[k] = ' '.join(additional_settings[k])
additional_settings.update(env)
for k in additional_settings:
additional_settings[k] = _NormalizeEnvVarReferences(additional_settings[k])
return additional_settings
def _NormalizeEnvVarReferences(str):
"""Takes a string containing variable references in the form ${FOO}, $(FOO),
or $FOO, and returns a string with all variable references in the form ${FOO}.
"""
# $FOO -> ${FOO}
str = re.sub(r'\$([a-zA-Z_][a-zA-Z0-9_]*)', r'${\1}', str)
# $(FOO) -> ${FOO}
matches = re.findall(r'(\$\(([a-zA-Z0-9\-_]+)\))', str)
for match in matches:
to_replace, variable = match
assert '$(' not in match, '$($(FOO)) variables not supported: ' + match
str = str.replace(to_replace, '${' + variable + '}')
return str
def ExpandEnvVars(string, expansions):
"""Expands ${VARIABLES}, $(VARIABLES), and $VARIABLES in string per the
expansions list. If the variable expands to something that references
another variable, this variable is expanded as well if it's in env --
until no variables present in env are left."""
for k, v in reversed(expansions):
string = string.replace('${' + k + '}', v)
string = string.replace('$(' + k + ')', v)
string = string.replace('$' + k, v)
return string
def _TopologicallySortedEnvVarKeys(env):
"""Takes a dict |env| whose values are strings that can refer to other keys,
for example env['foo'] = '$(bar) and $(baz)'. Returns a list L of all keys of
env such that key2 is after key1 in L if env[key2] refers to env[key1].
Throws an Exception in case of dependency cycles.
"""
# Since environment variables can refer to other variables, the evaluation
# order is important. Below is the logic to compute the dependency graph
# and sort it.
regex = re.compile(r'\$\{([a-zA-Z0-9\-_]+)\}')
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
# We can then reverse the result of the topological sort at the end.
# Since: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
matches = set([v for v in regex.findall(env[node]) if v in env])
for dependee in matches:
assert '${' not in dependee, 'Nested variables not supported: ' + dependee
return matches
try:
# Topologically sort, and then reverse, because we used an edge definition
# that's inverted from the expected result of this function (see comment
# above).
order = gyp.common.TopologicallySorted(env.keys(), GetEdges)
order.reverse()
return order
except gyp.common.CycleError, e:
raise GypError(
'Xcode environment variables are cyclically dependent: ' + str(e.nodes))
def GetSortedXcodeEnv(xcode_settings, built_products_dir, srcroot,
configuration, additional_settings=None):
env = _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings)
return [(key, env[key]) for key in _TopologicallySortedEnvVarKeys(env)]
def GetSpecPostbuildCommands(spec, quiet=False):
"""Returns the list of postbuilds explicitly defined on |spec|, in a form
executable by a shell."""
postbuilds = []
for postbuild in spec.get('postbuilds', []):
if not quiet:
postbuilds.append('echo POSTBUILD\\(%s\\) %s' % (
spec['target_name'], postbuild['postbuild_name']))
postbuilds.append(gyp.common.EncodePOSIXShellList(postbuild['action']))
return postbuilds
def _HasIOSTarget(targets):
"""Returns true if any target contains the iOS specific key
IPHONEOS_DEPLOYMENT_TARGET."""
for target_dict in targets.values():
for config in target_dict['configurations'].values():
if config.get('xcode_settings', {}).get('IPHONEOS_DEPLOYMENT_TARGET'):
return True
return False
def _AddIOSDeviceConfigurations(targets):
"""Clone all targets and append -iphoneos to the name. Configure these targets
to build for iOS devices and use correct architectures for those builds."""
for target_dict in targets.itervalues():
toolset = target_dict['toolset']
configs = target_dict['configurations']
for config_name, config_dict in dict(configs).iteritems():
iphoneos_config_dict = copy.deepcopy(config_dict)
configs[config_name + '-iphoneos'] = iphoneos_config_dict
configs[config_name + '-iphonesimulator'] = config_dict
if toolset == 'target':
iphoneos_config_dict['xcode_settings']['SDKROOT'] = 'iphoneos'
return targets
def CloneConfigurationForDeviceAndEmulator(target_dicts):
"""If |target_dicts| contains any iOS targets, automatically create -iphoneos
targets for iOS device builds."""
if _HasIOSTarget(target_dicts):
return _AddIOSDeviceConfigurations(target_dicts)
return target_dicts
| mit |
tianweizhang/nova | nova/api/openstack/compute/contrib/server_usage.py | 21 | 4148 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
authorize = extensions.soft_extension_authorizer('compute', 'server_usage')
class ServerUsageController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(ServerUsageController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
def _extend_server(self, server, instance):
for k in ['launched_at', 'terminated_at']:
key = "%s:%s" % (Server_usage.alias, k)
# NOTE(danms): Historically, this timestamp has been generated
# merely by grabbing str(datetime) of a TZ-naive object. The
# only way we can keep that with instance objects is to strip
# the tzinfo from the stamp and str() it.
server[key] = (instance[k].replace(tzinfo=None)
if instance[k] else None)
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
# Attach our slave template to the response object
resp_obj.attach(xml=ServerUsageTemplate())
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'show' method.
self._extend_server(server, db_instance)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
# Attach our slave template to the response object
resp_obj.attach(xml=ServerUsagesTemplate())
servers = list(resp_obj.obj['servers'])
for server in servers:
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'detail' method.
self._extend_server(server, db_instance)
class Server_usage(extensions.ExtensionDescriptor):
"""Adds launched_at and terminated_at on Servers."""
name = "ServerUsage"
alias = "OS-SRV-USG"
namespace = ("http://docs.openstack.org/compute/ext/"
"server_usage/api/v1.1")
updated = "2013-04-29T00:00:00Z"
def get_controller_extensions(self):
controller = ServerUsageController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def make_server(elem):
elem.set('{%s}launched_at' % Server_usage.namespace,
'%s:launched_at' % Server_usage.alias)
elem.set('{%s}terminated_at' % Server_usage.namespace,
'%s:terminated_at' % Server_usage.alias)
class ServerUsageTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
make_server(root)
return xmlutil.SlaveTemplate(root, 1, nsmap={
Server_usage.alias: Server_usage.namespace})
class ServerUsagesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
return xmlutil.SlaveTemplate(root, 1, nsmap={
Server_usage.alias: Server_usage.namespace})
| apache-2.0 |
mahabs/nitro | nssrc/com/citrix/netscaler/nitro/resource/config/lb/lbvserver_filterpolicy_binding.py | 1 | 14739 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class lbvserver_filterpolicy_binding(base_resource) :
""" Binding class showing the filterpolicy that can be bound to lbvserver.
"""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._sc = ""
self._name = ""
self._gotopriorityexpression = ""
self._bindpoint = ""
self._invoke = False
self._labeltype = ""
self._labelname = ""
self.___count = 0
@property
def priority(self) :
"""Priority.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
"""Priority.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
"""Expression or other value specifying the next policy to be evaluated if the current policy evaluates to TRUE. Specify one of the following values:
* NEXT - Evaluate the policy with the next higher priority number.
* END - End policy evaluation.
* USE_INVOCATION_RESULT - Applicable if this policy invokes another policy label. If the final goto in the invoked policy label has a value of END, the evaluation stops. If the final goto is anything other than END, the current policy label performs a NEXT.
* A default syntax expression that evaluates to a number.
If you specify an expression, the number to which it evaluates determines the next policy to evaluate, as follows:
* If the expression evaluates to a higher numbered priority, the policy with that priority is evaluated next.
* If the expression evaluates to the priority of the current policy, the policy with the next higher numbered priority is evaluated next.
* If the expression evaluates to a priority number that is numerically higher than the highest numbered priority, policy evaluation ends.
An UNDEF event is triggered if:
* The expression is invalid.
* The expression evaluates to a priority number that is numerically lower than the current policy's priority.
* The expression evaluates to a priority number that is between the current policy's priority number (say, 30) and the highest priority number (say, 100), but does not match any configured priority number (for example, the expression evaluates to the number 85). This example assumes that the priority number increments by 10 for every successive policy, and therefore a priority number of 85 does not exist in the policy label.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
"""Expression or other value specifying the next policy to be evaluated if the current policy evaluates to TRUE. Specify one of the following values:
* NEXT - Evaluate the policy with the next higher priority number.
* END - End policy evaluation.
* USE_INVOCATION_RESULT - Applicable if this policy invokes another policy label. If the final goto in the invoked policy label has a value of END, the evaluation stops. If the final goto is anything other than END, the current policy label performs a NEXT.
* A default syntax expression that evaluates to a number.
If you specify an expression, the number to which it evaluates determines the next policy to evaluate, as follows:
* If the expression evaluates to a higher numbered priority, the policy with that priority is evaluated next.
* If the expression evaluates to the priority of the current policy, the policy with the next higher numbered priority is evaluated next.
* If the expression evaluates to a priority number that is numerically higher than the highest numbered priority, policy evaluation ends.
An UNDEF event is triggered if:
* The expression is invalid.
* The expression evaluates to a priority number that is numerically lower than the current policy's priority.
* The expression evaluates to a priority number that is between the current policy's priority number (say, 30) and the highest priority number (say, 100), but does not match any configured priority number (for example, the expression evaluates to the number 85). This example assumes that the priority number increments by 10 for every successive policy, and therefore a priority number of 85 does not exist in the policy label.
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def policyname(self) :
"""Name of the policy bound to the LB vserver.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
"""Name of the policy bound to the LB vserver.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def name(self) :
"""Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or 'my vserver'). .<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or 'my vserver'). .<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def bindpoint(self) :
"""Bind point to which to bind the policy. Applicable only to compression, rewrite, and cache policies.
"""
try :
return self._bindpoint
except Exception as e:
raise e
@bindpoint.setter
def bindpoint(self, bindpoint) :
"""Bind point to which to bind the policy. Applicable only to compression, rewrite, and cache policies.
"""
try :
self._bindpoint = bindpoint
except Exception as e:
raise e
@property
def labeltype(self) :
"""Type of policy label to invoke. Applicable only to rewrite and cache policies. Available settings function as follows:
* reqvserver - Evaluate the request against the request-based policies bound to the specified virtual server.
* resvserver - Evaluate the response against the response-based policies bound to the specified virtual server.
* policylabel - invoke the request or response against the specified user-defined policy label.
"""
try :
return self._labeltype
except Exception as e:
raise e
@labeltype.setter
def labeltype(self, labeltype) :
"""Type of policy label to invoke. Applicable only to rewrite and cache policies. Available settings function as follows:
* reqvserver - Evaluate the request against the request-based policies bound to the specified virtual server.
* resvserver - Evaluate the response against the response-based policies bound to the specified virtual server.
* policylabel - invoke the request or response against the specified user-defined policy label.
"""
try :
self._labeltype = labeltype
except Exception as e:
raise e
@property
def labelname(self) :
"""Name of the virtual server or user-defined policy label to invoke if the policy evaluates to TRUE.
"""
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
"""Name of the virtual server or user-defined policy label to invoke if the policy evaluates to TRUE.
"""
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def invoke(self) :
"""Invoke policies bound to a virtual server or policy label.
"""
try :
return self._invoke
except Exception as e:
raise e
@invoke.setter
def invoke(self, invoke) :
"""Invoke policies bound to a virtual server or policy label.
"""
try :
self._invoke = invoke
except Exception as e:
raise e
@property
def sc(self) :
"""Use SureConnect on the virtual server.<br/>Default value: OFF<br/>Possible values = ON, OFF.
"""
try :
return self._sc
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(lbvserver_filterpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lbvserver_filterpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = lbvserver_filterpolicy_binding()
updateresource.name = resource.name
updateresource.policyname = resource.policyname
updateresource.gotopriorityexpression = resource.gotopriorityexpression
updateresource.bindpoint = resource.bindpoint
updateresource.invoke = resource.invoke
updateresource.labeltype = resource.labeltype
updateresource.labelname = resource.labelname
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [lbvserver_filterpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].policyname = resource[i].policyname
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
updateresources[i].bindpoint = resource[i].bindpoint
updateresources[i].invoke = resource[i].invoke
updateresources[i].labeltype = resource[i].labeltype
updateresources[i].labelname = resource[i].labelname
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = lbvserver_filterpolicy_binding()
deleteresource.name = resource.name
deleteresource.policyname = resource.policyname
deleteresource.bindpoint = resource.bindpoint
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [lbvserver_filterpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].policyname = resource[i].policyname
deleteresources[i].bindpoint = resource[i].bindpoint
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
""" Use this API to fetch lbvserver_filterpolicy_binding resources.
"""
try :
obj = lbvserver_filterpolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
""" Use this API to fetch filtered set of lbvserver_filterpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = lbvserver_filterpolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
""" Use this API to count lbvserver_filterpolicy_binding resources configued on NetScaler.
"""
try :
obj = lbvserver_filterpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
""" Use this API to count the filtered set of lbvserver_filterpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = lbvserver_filterpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Sc:
ON = "ON"
OFF = "OFF"
class Bindpoint:
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class lbvserver_filterpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.lbvserver_filterpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.lbvserver_filterpolicy_binding = [lbvserver_filterpolicy_binding() for _ in range(length)]
| apache-2.0 |
T-002/pycast | pycast/tests/errormeasuretest.py | 1 | 9111 | # !/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2012-2015 Christian Schwarz
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# required external modules
import unittest
# required modules from pycast
from pycast.errors.baseerrormeasure import BaseErrorMeasure
from pycast.common.timeseries import TimeSeries
from pycast.common.pycastobject import PyCastObject
from pycast.errors.meansquarederror import MeanSquaredError
class BaseErrorMeasureTest(unittest.TestCase):
"""Test class for the BaseErrorMeasure interface."""
def initialization_test(self):
"""Test the BaseErrorMeasure initialization."""
BaseErrorMeasure()
for percentage in [-1.2, -0.1, 100.1, 123.9]:
try:
BaseErrorMeasure(percentage)
except ValueError:
pass
else:
assert False # pragma: no cover
for percentage in [0.0, 12.3, 53.4, 100.0]:
try:
BaseErrorMeasure(percentage)
except ValueError: # pragma: no cover
assert False # pragma: no cover
def get_error_initialization_test(self):
"""Test the get_error of BaseErrorMeasure for the initialization exception."""
bem = BaseErrorMeasure()
try:
bem.get_error()
except StandardError:
pass
else:
assert False # pragma: no cover
def double_initialize_test(self):
"""Test for the error ocuring when the same error measure is initialized twice."""
data = [[0.0, 0.0], [1, 0.1], [2, 0.2], [3, 0.3], [4, 0.4]]
tsOrg = TimeSeries.from_twodim_list(data)
tsCalc = TimeSeries.from_twodim_list(data)
bem = BaseErrorMeasure()
bem_calculate = bem._calculate
bem_local_error = bem.local_error
def return_zero(ignoreMe, ignoreMeToo):
return 0
# remove the NotImplementedErrors for initialization
bem.local_error = return_zero
bem._calculate = return_zero
# correct initialize call
bem.initialize(tsOrg, tsCalc)
# incorrect initialize call
for cnt in xrange(10):
try:
bem.initialize(tsOrg, tsCalc)
except StandardError:
pass
else:
assert False # pragma: no cover
bem.local_error = bem_calculate
bem._calculate = bem_local_error
def initialize_test(self):
"""Test if calculate throws an error as expected."""
data = [[0.0, 0.0], [1, 0.1], [2, 0.2], [3, 0.3], [4, 0.4]]
tsOrg = TimeSeries.from_twodim_list(data)
tsCalc = TimeSeries.from_twodim_list(data)
bem = BaseErrorMeasure()
try:
bem.initialize(tsOrg, tsCalc)
except NotImplementedError:
pass
else:
assert False # pragma: no cover
assert not bem.initialize(tsOrg, TimeSeries())
def get_error_parameter_test(self):
"""Test for the parameter validity of get_error()."""
data = [[0.0, 0.0], [1, 0.1], [2, 0.2], [3, 0.3], [4, 0.4]]
tsOrg = TimeSeries.from_twodim_list(data)
tsCalc = TimeSeries.from_twodim_list(data)
bem = BaseErrorMeasure()
bem_calculate = bem._calculate
bem_local_error = bem.local_error
def return_zero(ignoreMe, ignoreMeToo, andMe=None, andMeToo=None):
return 0
# remove the NotImplementedErrors for initialization
bem.local_error = return_zero
bem._calculate = return_zero
bem.initialize(tsOrg, tsCalc)
bem.local_error = bem_local_error
bem._calculate = bem_calculate
try:
bem.get_error(10.0, 90.0)
except NotImplementedError:
pass
else:
assert False # pragma: no cover
for start in [-1.0, 80.0, 103.0]:
for end in [-5.0, 10.0, 105.0]:
try:
bem.get_error(start, end)
except ValueError:
pass
else:
assert False # pragma: no cover
def local_error_test(self):
"""Test local_error of BaseErrorMeasure."""
data = [[0.0, 0.0], [1, 0.1], [2, 0.2], [3, 0.3], [4, 0.4]]
tsOrg = TimeSeries.from_twodim_list(data)
tsCalc = TimeSeries.from_twodim_list(data)
bem = BaseErrorMeasure()
for idx in xrange(len(tsOrg)):
try:
bem.local_error([tsOrg[idx][1]], [tsCalc[idx][1]])
except NotImplementedError:
pass
else:
assert False # pragma: no cover
def optimized_test(self):
"""Check if all tests are passed, using optimized implementations."""
PyCastObject.enable_global_optimization()
self.get_error_initialization_test()
self.initialization_test()
# self.initialize_test()
self.double_initialize_test()
PyCastObject.disable_global_optimization()
def confidence_interval_test(self):
bem = BaseErrorMeasure()
bem._errorValues = [10, -5, 3, -4, None, 0, 2, -3]
self.assertRaises(ValueError, bem.confidence_interval, -0.5)
self.assertRaises(ValueError, bem.confidence_interval, 2)
self.assertEquals(bem.confidence_interval(0.5), (-3.0, 2.0))
self.assertEquals(bem.confidence_interval(0.1), (0.0, 0.0))
def get_error_values_test(self):
bem = BaseErrorMeasure()
bem._errorValues = [1, -1, 3, -5, 8]
bem._errorDates = [1,2,3,4,5]
self.assertEquals(bem._get_error_values(0,100, None, None), [1,-1,3,-5,8])
self.assertEquals(bem._get_error_values(0,100, 2, None), [-1,3,-5,8])
self.assertEquals(bem._get_error_values(0,100, None, 4), [1,-1,3,-5])
self.assertEquals(bem._get_error_values(0,100, 2, 4), [-1,3,-5])
self.assertRaises(ValueError, bem._get_error_values, 0, 100, None, 0)
def number_of_comparisons_test(self):
""" Test BaseErrorMeasure.initialize for behaviour if not enough dates match."""
dataOrg = [[0,0],[1,1],[2,2],[3,3],[4,4],[5,5],[6,6],[7,7],[8,8],[9,9]]
dataCalc = [[0,0],[1,1],[2,2],[3,3],[4,4],[5.1,5],[6.1,6],[7.1,7],[8.1,8],[9.1,9]]
tsOrg = TimeSeries.from_twodim_list(dataOrg)
tsCalc = TimeSeries.from_twodim_list(dataCalc)
bem = BaseErrorMeasure(60.0)
#prevent NotImplementedError
bem.local_error = lambda a,b: 1
mse = MeanSquaredError(80.0)
# only 50% of the original TimeSeries have a corresponding partner
if mse.initialize(tsOrg, tsCalc):
assert False # pragma: no cover
if not mse.initialize(tsOrg, tsOrg):
assert False # pragma: no cover
def error_calculation_test(self):
"""Test for a valid error calculation."""
dataOrg = [[0,0], [1,1], [2,2], [3,3], [4,4], [5,5], [6, 6], [7,7], [8,8], [9,9]]
dataCalc = [[0,1], [1,3], [2,5], [3,0], [4,3], [5,5], [6.1,6], [7,3], [8.1,8], [9,8]]
# difference: 1 2 3 3 1 0 NA 4 NA 1
# local errors: 1 4 9 9 1 0 NA 16 NA 1
tsOrg = TimeSeries.from_twodim_list(dataOrg)
tsCalc = TimeSeries.from_twodim_list(dataCalc)
tsOrg = TimeSeries.from_twodim_list(dataOrg)
tsCalc = TimeSeries.from_twodim_list(dataCalc)
mse = MeanSquaredError(80.0)
mse.initialize(tsOrg, tsCalc)
assert str(mse.get_error()) == "5.125"
# def start_and_enddate_test(self):
# """Testing for startDate, endDate exceptions."""
# data = [[0.0, 0.0], [1, 0.1], [2, 0.2], [3, 0.3], [4, 0.4]]
# tsOrg = TimeSeries.from_twodim_list(data)
# tsCalc = TimeSeries.from_twodim_list(data)
# bem = MeanSquaredError()
# self.assertEquals(bem.initialize(tsOrg, tsCalc), False)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.