repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
makiftasova/hangoutsbot | hangupsbot/plugins/botaliases.py | 4 | 2560 | """aliases for the bot"""
import logging
import plugins
logger = logging.getLogger(__name__)
def _initialise(bot):
"""load in bot aliases from memory, create defaults if none"""
if bot.memory.exists(["bot.command_aliases"]):
bot_command_aliases = bot.memory.get("bot.command_aliases")
else:
myself = bot.user_self()
# basic
bot_command_aliases = ["/bot"]
# /<first name fragment>
first_fragment = myself["full_name"].split()[0].lower()
if first_fragment and first_fragment != "unknown":
alias_firstname = "/" + first_fragment
bot_command_aliases.append(alias_firstname)
# /<chat_id>
bot_command_aliases.append("/" + myself["chat_id"])
bot.memory.set_by_path(["bot.command_aliases"], bot_command_aliases)
bot.memory.save()
if not isinstance(bot_command_aliases, list):
bot_command_aliases = []
if len(bot_command_aliases) == 0:
bot.append("/bot")
bot._handlers.bot_command = bot_command_aliases
logger.info("aliases: {}".format(bot_command_aliases))
plugins.register_user_command(["botalias"])
return []
def botalias(bot, event, *args):
"""shows, adds and removes bot command aliases"""
if len(args) == 0:
yield from bot.coro_send_message(
event.conv,
_("<i>bot alias: {}</i>").format(
", ".join(bot._handlers.bot_command)))
else:
admins_list = bot.get_config_suboption(event.conv_id, 'admins')
if event.user_id.chat_id in admins_list:
_aliases = list(bot._handlers.bot_command)
if len(args) == 1:
"""add alias"""
if args[0].lower() not in _aliases:
_aliases.append(args[0].lower())
else:
"""remove aliases, supply list to remove more than one"""
if args[0].lower() == "remove":
for _alias in args[1:]:
_aliases.remove(_alias.lower())
if _aliases != bot._handlers.bot_command:
if len(_aliases) == 0:
_aliases = ["/bot"]
bot.memory.set_by_path(["bot.command_aliases"], _aliases)
bot.memory.save()
bot._handlers.bot_command = _aliases
botalias(bot, event) # run with no arguments
else:
yield from bot.coro_send_message(
event.conv,
_("<i>not authorised to change bot alias</i>"))
| agpl-3.0 |
DirtyPiece/dancestudio | Build/Tools/Python27/Lib/site-packages/pip/_vendor/html5lib/treewalkers/_base.py | 310 | 6919 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type, string_types
import gettext
_ = gettext.gettext
from xml.dom import Node
DOCUMENT = Node.DOCUMENT_NODE
DOCTYPE = Node.DOCUMENT_TYPE_NODE
TEXT = Node.TEXT_NODE
ELEMENT = Node.ELEMENT_NODE
COMMENT = Node.COMMENT_NODE
ENTITY = Node.ENTITY_NODE
UNKNOWN = "<#UNKNOWN#>"
from ..constants import voidElements, spaceCharacters
spaceCharacters = "".join(spaceCharacters)
def to_text(s, blank_if_none=True):
"""Wrapper around six.text_type to convert None to empty string"""
if s is None:
if blank_if_none:
return ""
else:
return None
elif isinstance(s, text_type):
return s
else:
return text_type(s)
def is_text_or_none(string):
"""Wrapper around isinstance(string_types) or is None"""
return string is None or isinstance(string, string_types)
class TreeWalker(object):
def __init__(self, tree):
self.tree = tree
def __iter__(self):
raise NotImplementedError
def error(self, msg):
return {"type": "SerializeError", "data": msg}
def emptyTag(self, namespace, name, attrs, hasChildren=False):
assert namespace is None or isinstance(namespace, string_types), type(namespace)
assert isinstance(name, string_types), type(name)
assert all((namespace is None or isinstance(namespace, string_types)) and
isinstance(name, string_types) and
isinstance(value, string_types)
for (namespace, name), value in attrs.items())
yield {"type": "EmptyTag", "name": to_text(name, False),
"namespace": to_text(namespace),
"data": attrs}
if hasChildren:
yield self.error(_("Void element has children"))
def startTag(self, namespace, name, attrs):
assert namespace is None or isinstance(namespace, string_types), type(namespace)
assert isinstance(name, string_types), type(name)
assert all((namespace is None or isinstance(namespace, string_types)) and
isinstance(name, string_types) and
isinstance(value, string_types)
for (namespace, name), value in attrs.items())
return {"type": "StartTag",
"name": text_type(name),
"namespace": to_text(namespace),
"data": dict(((to_text(namespace, False), to_text(name)),
to_text(value, False))
for (namespace, name), value in attrs.items())}
def endTag(self, namespace, name):
assert namespace is None or isinstance(namespace, string_types), type(namespace)
assert isinstance(name, string_types), type(namespace)
return {"type": "EndTag",
"name": to_text(name, False),
"namespace": to_text(namespace),
"data": {}}
def text(self, data):
assert isinstance(data, string_types), type(data)
data = to_text(data)
middle = data.lstrip(spaceCharacters)
left = data[:len(data) - len(middle)]
if left:
yield {"type": "SpaceCharacters", "data": left}
data = middle
middle = data.rstrip(spaceCharacters)
right = data[len(middle):]
if middle:
yield {"type": "Characters", "data": middle}
if right:
yield {"type": "SpaceCharacters", "data": right}
def comment(self, data):
assert isinstance(data, string_types), type(data)
return {"type": "Comment", "data": text_type(data)}
def doctype(self, name, publicId=None, systemId=None, correct=True):
assert is_text_or_none(name), type(name)
assert is_text_or_none(publicId), type(publicId)
assert is_text_or_none(systemId), type(systemId)
return {"type": "Doctype",
"name": to_text(name),
"publicId": to_text(publicId),
"systemId": to_text(systemId),
"correct": to_text(correct)}
def entity(self, name):
assert isinstance(name, string_types), type(name)
return {"type": "Entity", "name": text_type(name)}
def unknown(self, nodeType):
return self.error(_("Unknown node type: ") + nodeType)
class NonRecursiveTreeWalker(TreeWalker):
def getNodeDetails(self, node):
raise NotImplementedError
def getFirstChild(self, node):
raise NotImplementedError
def getNextSibling(self, node):
raise NotImplementedError
def getParentNode(self, node):
raise NotImplementedError
def __iter__(self):
currentNode = self.tree
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
hasChildren = False
if type == DOCTYPE:
yield self.doctype(*details)
elif type == TEXT:
for token in self.text(*details):
yield token
elif type == ELEMENT:
namespace, name, attributes, hasChildren = details
if name in voidElements:
for token in self.emptyTag(namespace, name, attributes,
hasChildren):
yield token
hasChildren = False
else:
yield self.startTag(namespace, name, attributes)
elif type == COMMENT:
yield self.comment(details[0])
elif type == ENTITY:
yield self.entity(details[0])
elif type == DOCUMENT:
hasChildren = True
else:
yield self.unknown(details[0])
if hasChildren:
firstChild = self.getFirstChild(currentNode)
else:
firstChild = None
if firstChild is not None:
currentNode = firstChild
else:
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
if type == ELEMENT:
namespace, name, attributes, hasChildren = details
if name not in voidElements:
yield self.endTag(namespace, name)
if self.tree is currentNode:
currentNode = None
break
nextSibling = self.getNextSibling(currentNode)
if nextSibling is not None:
currentNode = nextSibling
break
else:
currentNode = self.getParentNode(currentNode)
| mit |
ojake/django | tests/m2m_through_regress/models.py | 166 | 2643 | from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# Forward declared intermediate model
@python_2_unicode_compatible
class Membership(models.Model):
person = models.ForeignKey('Person')
group = models.ForeignKey('Group')
price = models.IntegerField(default=100)
def __str__(self):
return "%s is a member of %s" % (self.person.name, self.group.name)
# using custom id column to test ticket #11107
@python_2_unicode_compatible
class UserMembership(models.Model):
id = models.AutoField(db_column='usermembership_id', primary_key=True)
user = models.ForeignKey(User)
group = models.ForeignKey('Group')
price = models.IntegerField(default=100)
def __str__(self):
return "%s is a user and member of %s" % (self.user.username, self.group.name)
@python_2_unicode_compatible
class Person(models.Model):
name = models.CharField(max_length=128)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Group(models.Model):
name = models.CharField(max_length=128)
# Membership object defined as a class
members = models.ManyToManyField(Person, through=Membership)
user_members = models.ManyToManyField(User, through='UserMembership')
def __str__(self):
return self.name
# A set of models that use an non-abstract inherited model as the 'through' model.
class A(models.Model):
a_text = models.CharField(max_length=20)
class ThroughBase(models.Model):
a = models.ForeignKey(A)
b = models.ForeignKey('B')
class Through(ThroughBase):
extra = models.CharField(max_length=20)
class B(models.Model):
b_text = models.CharField(max_length=20)
a_list = models.ManyToManyField(A, through=Through)
# Using to_field on the through model
@python_2_unicode_compatible
class Car(models.Model):
make = models.CharField(max_length=20, unique=True, null=True)
drivers = models.ManyToManyField('Driver', through='CarDriver')
def __str__(self):
return "%s" % self.make
@python_2_unicode_compatible
class Driver(models.Model):
name = models.CharField(max_length=20, unique=True, null=True)
def __str__(self):
return "%s" % self.name
class Meta:
ordering = ('name',)
@python_2_unicode_compatible
class CarDriver(models.Model):
car = models.ForeignKey('Car', to_field='make')
driver = models.ForeignKey('Driver', to_field='name')
def __str__(self):
return "pk=%s car=%s driver=%s" % (str(self.pk), self.car, self.driver)
| bsd-3-clause |
lowitty/server | libsLinux/twisted/logger/_format.py | 12 | 8176 | # -*- test-case-name: twisted.logger.test.test_format -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tools for formatting logging events.
"""
from datetime import datetime as DateTime
from twisted.python.compat import unicode
from twisted.python.failure import Failure
from twisted.python.reflect import safe_repr
from twisted.python._tzhelper import FixedOffsetTimeZone
from ._flatten import flatFormat, aFormatter
timeFormatRFC3339 = "%Y-%m-%dT%H:%M:%S%z"
def formatEvent(event):
"""
Formats an event as a L{unicode}, using the format in
C{event["log_format"]}.
This implementation should never raise an exception; if the formatting
cannot be done, the returned string will describe the event generically so
that a useful message is emitted regardless.
@param event: A logging event.
@type event: L{dict}
@return: A formatted string.
@rtype: L{unicode}
"""
try:
if "log_flattened" in event:
return flatFormat(event)
format = event.get("log_format", None)
if format is None:
return u""
# Make sure format is unicode.
if isinstance(format, bytes):
# If we get bytes, assume it's UTF-8 bytes
format = format.decode("utf-8")
elif not isinstance(format, unicode):
raise TypeError(
"Log format must be unicode or bytes, not {0!r}".format(format)
)
return formatWithCall(format, event)
except Exception as e:
return formatUnformattableEvent(event, e)
def formatUnformattableEvent(event, error):
"""
Formats an event as a L{unicode} that describes the event generically and a
formatting error.
@param event: A logging event.
@type event: L{dict}
@param error: The formatting error.
@type error: L{Exception}
@return: A formatted string.
@rtype: L{unicode}
"""
try:
return (
u"Unable to format event {event!r}: {error}"
.format(event=event, error=error)
)
except BaseException:
# Yikes, something really nasty happened.
#
# Try to recover as much formattable data as possible; hopefully at
# least the namespace is sane, which will help you find the offending
# logger.
failure = Failure()
text = u", ".join(
u" = ".join((safe_repr(key), safe_repr(value)))
for key, value in event.items()
)
return (
u"MESSAGE LOST: unformattable object logged: {error}\n"
u"Recoverable data: {text}\n"
u"Exception during formatting:\n{failure}"
.format(error=safe_repr(error), failure=failure, text=text)
)
def formatTime(when, timeFormat=timeFormatRFC3339, default=u"-"):
"""
Format a timestamp as text.
Example::
>>> from time import time
>>> from twisted.logger import formatTime
>>>
>>> t = time()
>>> formatTime(t)
u'2013-10-22T14:19:11-0700'
>>> formatTime(t, timeFormat="%Y/%W") # Year and week number
u'2013/42'
>>>
@param when: A timestamp.
@type then: L{float}
@param timeFormat: A time format.
@type timeFormat: L{unicode} or C{None}
@param default: Text to return if C{when} or C{timeFormat} is C{None}.
@type default: L{unicode}
@return: A formatted time.
@rtype: L{unicode}
"""
if (timeFormat is None or when is None):
return default
else:
tz = FixedOffsetTimeZone.fromLocalTimeStamp(when)
datetime = DateTime.fromtimestamp(when, tz)
return unicode(datetime.strftime(timeFormat))
def formatEventAsClassicLogText(event, formatTime=formatTime):
"""
Format an event as a line of human-readable text for, e.g. traditional log
file output.
The output format is C{u"{timeStamp} [{system}] {event}\\n"}, where:
- C{timeStamp} is computed by calling the given C{formatTime} callable
on the event's C{"log_time"} value
- C{system} is the event's C{"log_system"} value, if set, otherwise,
the C{"log_namespace"} and C{"log_level"}, joined by a C{u"#"}. Each
defaults to C{u"-"} is not set.
- C{event} is the event, as formatted by L{formatEvent}.
Example::
>>> from __future__ import print_function
>>> from time import time
>>> from twisted.logger import formatEventAsClassicLogText
>>> from twisted.logger import LogLevel
>>>
>>> formatEventAsClassicLogText(dict()) # No format, returns None
>>> formatEventAsClassicLogText(dict(log_format=u"Hello!"))
u'- [-#-] Hello!\\n'
>>> formatEventAsClassicLogText(dict(
... log_format=u"Hello!",
... log_time=time(),
... log_namespace="my_namespace",
... log_level=LogLevel.info,
... ))
u'2013-10-22T17:30:02-0700 [my_namespace#info] Hello!\\n'
>>> formatEventAsClassicLogText(dict(
... log_format=u"Hello!",
... log_time=time(),
... log_system="my_system",
... ))
u'2013-11-11T17:22:06-0800 [my_system] Hello!\\n'
>>>
@param event: an event.
@type event: L{dict}
@param formatTime: A time formatter
@type formatTime: L{callable} that takes an C{event} argument and returns
a L{unicode}
@return: A formatted event, or C{None} if no output is appropriate.
@rtype: L{unicode} or C{None}
"""
eventText = formatEvent(event)
if not eventText:
return None
eventText = eventText.replace(u"\n", u"\n\t")
timeStamp = formatTime(event.get("log_time", None))
system = event.get("log_system", None)
if system is None:
level = event.get("log_level", None)
if level is None:
levelName = u"-"
else:
levelName = level.name
system = u"{namespace}#{level}".format(
namespace=event.get("log_namespace", u"-"),
level=levelName,
)
else:
try:
system = unicode(system)
except Exception:
system = u"UNFORMATTABLE"
return u"{timeStamp} [{system}] {event}\n".format(
timeStamp=timeStamp,
system=system,
event=eventText,
)
class CallMapping(object):
"""
Read-only mapping that turns a C{()}-suffix in key names into an invocation
of the key rather than a lookup of the key.
Implementation support for L{formatWithCall}.
"""
def __init__(self, submapping):
"""
@param submapping: Another read-only mapping which will be used to look
up items.
"""
self._submapping = submapping
def __getitem__(self, key):
"""
Look up an item in the submapping for this L{CallMapping}, calling it
if C{key} ends with C{"()"}.
"""
callit = key.endswith(u"()")
realKey = key[:-2] if callit else key
value = self._submapping[realKey]
if callit:
value = value()
return value
def formatWithCall(formatString, mapping):
"""
Format a string like L{unicode.format}, but:
- taking only a name mapping; no positional arguments
- with the additional syntax that an empty set of parentheses
correspond to a formatting item that should be called, and its result
C{str}'d, rather than calling C{str} on the element directly as
normal.
For example::
>>> formatWithCall("{string}, {function()}.",
... dict(string="just a string",
... function=lambda: "a function"))
'just a string, a function.'
@param formatString: A PEP-3101 format string.
@type formatString: L{unicode}
@param mapping: A L{dict}-like object to format.
@return: The string with formatted values interpolated.
@rtype: L{unicode}
"""
return unicode(
aFormatter.vformat(formatString, (), CallMapping(mapping))
)
| mit |
zstackorg/zstack-woodpecker | integrationtest/vm/simulator/multi_iso/test_2_vm_multi_iso.py | 2 | 1455 | '''
New Simulator Test for Multi-ISO.
@author: Legion
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import time
test_obj_dict = test_state.TestStateDict()
test_stub = test_lib.lib_get_test_stub()
multi_iso = test_stub.MulISO()
def test():
multi_iso.add_iso_image()
multi_iso.create_vm(vm2=True)
test_obj_dict.add_vm(multi_iso.vm1)
test_obj_dict.add_vm(multi_iso.vm2)
multi_iso.get_all_iso_uuids()
vm2_uuid = multi_iso.vm2.vm.uuid
multi_iso.attach_iso(multi_iso.iso_uuids[0])
multi_iso.attach_iso(multi_iso.iso_uuids[1])
multi_iso.attach_iso(multi_iso.iso_uuids[2])
multi_iso.attach_iso(multi_iso.iso_uuids[0], vm2_uuid)
multi_iso.attach_iso(multi_iso.iso_uuids[1], vm2_uuid)
multi_iso.attach_iso(multi_iso.iso_uuids[2], vm2_uuid)
multi_iso.detach_iso(multi_iso.iso_uuids[0], vm2_uuid)
multi_iso.detach_iso(multi_iso.iso_uuids[1])
multi_iso.detach_iso(multi_iso.iso_uuids[2], vm2_uuid)
multi_iso.detach_iso(multi_iso.iso_uuids[0])
multi_iso.detach_iso(multi_iso.iso_uuids[2])
multi_iso.detach_iso(multi_iso.iso_uuids[1], vm2_uuid)
test_lib.lib_robot_cleanup(test_obj_dict)
test_util.test_pass('Attach 3 ISO Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
global test_obj_dict
test_lib.lib_error_cleanup(test_obj_dict)
| apache-2.0 |
kontrafiktion/ansible | lib/ansible/plugins/filter/mathstuff.py | 9 | 3968 | # (c) 2014, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import math
import collections
from ansible import errors
def unique(a):
if isinstance(a,collections.Hashable):
c = set(a)
else:
c = []
for x in a:
if x not in c:
c.append(x)
return c
def intersect(a, b):
if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable):
c = set(a) & set(b)
else:
c = unique(filter(lambda x: x in b, a))
return c
def difference(a, b):
if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable):
c = set(a) - set(b)
else:
c = unique(filter(lambda x: x not in b, a))
return c
def symmetric_difference(a, b):
if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable):
c = set(a) ^ set(b)
else:
c = unique(filter(lambda x: x not in intersect(a,b), union(a,b)))
return c
def union(a, b):
if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable):
c = set(a) | set(b)
else:
c = unique(a + b)
return c
def min(a):
_min = __builtins__.get('min')
return _min(a);
def max(a):
_max = __builtins__.get('max')
return _max(a);
def logarithm(x, base=math.e):
try:
if base == 10:
return math.log10(x)
else:
return math.log(x, base)
except TypeError as e:
raise errors.AnsibleFilterError('log() can only be used on numbers: %s' % str(e))
def power(x, y):
try:
return math.pow(x, y)
except TypeError as e:
raise errors.AnsibleFilterError('pow() can only be used on numbers: %s' % str(e))
def inversepower(x, base=2):
try:
if base == 2:
return math.sqrt(x)
else:
return math.pow(x, 1.0/float(base))
except TypeError as e:
raise errors.AnsibleFilterError('root() can only be used on numbers: %s' % str(e))
def human_readable(size, isbits=False, unit=None):
base = 'bits' if isbits else 'Bytes'
suffix = ''
ranges = (
(1<<70, 'Z'),
(1<<60, 'E'),
(1<<50, 'P'),
(1<<40, 'T'),
(1<<30, 'G'),
(1<<20, 'M'),
(1<<10, 'K'),
(1, base)
)
for limit, suffix in ranges:
if (unit is None and size >= limit) or \
unit is not None and unit.upper() == suffix:
break
if limit != 1:
suffix += base[0]
return '%.2f %s' % (float(size)/ limit, suffix)
class FilterModule(object):
''' Ansible math jinja2 filters '''
def filters(self):
return {
# general math
'min' : min,
'max' : max,
# exponents and logarithms
'log': logarithm,
'pow': power,
'root': inversepower,
# set theory
'unique' : unique,
'intersect': intersect,
'difference': difference,
'symmetric_difference': symmetric_difference,
'union': union,
# computer theory
'human_readable' : human_readable,
}
| gpl-3.0 |
toidi/hadoop-yarn-api-python-client | tests/test_hadoop_conf.py | 1 | 9522 | # -*- coding: utf-8 -*-
from tempfile import NamedTemporaryFile
import mock
from mock import patch
from requests import RequestException
from tests import TestCase
import requests_mock
from yarn_api_client import hadoop_conf
import platform
import os
import sys
if sys.version_info[0] == 2:
_mock_exception_method = 'assertRaisesRegexp'
else:
_mock_exception_method = 'assertRaisesRegex'
_http_request_method = ''
_http_getresponse_method = ''
try:
from httplib import HTTPConnection, OK, NOT_FOUND # NOQA
_http_request_method = 'httplib.HTTPConnection.request'
_http_getresponse_method = 'httplib.HTTPConnection.getresponse'
except ImportError:
from http.client import HTTPConnection, OK, NOT_FOUND # NOQA
_http_request_method = 'http.client.HTTPConnection.request'
_http_getresponse_method = 'http.client.HTTPConnection.getresponse'
if platform.system() == 'Windows':
hadoop_conf_path = '/etc/hadoop/conf\\'
else:
hadoop_conf_path = '/etc/hadoop/conf/'
empty_config = '<configuration></configuration>'.encode('latin1')
yarn_site_xml = """\
<configuration>
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>localhost:8022</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.https.address</name>
<value>localhost:8024</value>
</property>
<property>
<name>yarn.http.policy</name>
<value>HTTPS_ONLY</value>
</property>
</configuration>
""".encode('latin1')
class HadoopConfTestCase(TestCase):
def test_parse(self):
temp_filename = None
with NamedTemporaryFile(delete=False) as f:
f.write(yarn_site_xml)
f.flush()
f.close()
temp_filename = f.name
key = 'yarn.resourcemanager.webapp.address'
value = hadoop_conf.parse(f.name, key)
self.assertEqual('localhost:8022', value)
key = 'yarn.resourcemanager.webapp.https.address'
value = hadoop_conf.parse(f.name, key)
self.assertEqual('localhost:8024', value)
key = 'yarn.http.policy'
value = hadoop_conf.parse(f.name, key)
self.assertEqual('HTTPS_ONLY', value)
os.remove(temp_filename)
with NamedTemporaryFile(delete=False) as f:
f.write(empty_config)
f.flush()
f.close()
temp_filename = f.name
key = 'yarn.resourcemanager.webapp.address'
value = hadoop_conf.parse(f.name, key)
self.assertEqual(None, value)
key = 'yarn.resourcemanager.webapp.https.address'
value = hadoop_conf.parse(f.name, key)
self.assertEqual(None, value)
key = 'yarn.http.policy'
value = hadoop_conf.parse(f.name, key)
self.assertEqual(None, value)
os.remove(temp_filename)
def test_get_resource_endpoint(self):
with patch('yarn_api_client.hadoop_conf.parse') as parse_mock:
with patch('yarn_api_client.hadoop_conf._get_rm_ids') as get_rm_ids_mock:
parse_mock.return_value = 'example.com:8022'
get_rm_ids_mock.return_value = None
endpoint = hadoop_conf.get_resource_manager_endpoint()
self.assertEqual('http://example.com:8022', endpoint)
parse_mock.assert_called_with(hadoop_conf_path + 'yarn-site.xml',
'yarn.resourcemanager.webapp.address')
parse_mock.reset_mock()
parse_mock.return_value = None
endpoint = hadoop_conf.get_resource_manager_endpoint()
self.assertIsNone(endpoint)
@mock.patch('yarn_api_client.hadoop_conf._get_rm_ids')
@mock.patch('yarn_api_client.hadoop_conf.parse')
@mock.patch('yarn_api_client.hadoop_conf.check_is_active_rm')
def test_get_resource_endpoint_with_ha(self, check_is_active_rm_mock, parse_mock, get_rm_ids_mock):
get_rm_ids_mock.return_value = ['rm1', 'rm2']
parse_mock.return_value = 'example.com:8022'
check_is_active_rm_mock.return_value = True
endpoint = hadoop_conf.get_resource_manager_endpoint()
self.assertEqual('http://example.com:8022', endpoint)
parse_mock.assert_called_with(hadoop_conf_path + 'yarn-site.xml',
'yarn.resourcemanager.webapp.address.rm1')
parse_mock.reset_mock()
parse_mock.return_value = None
endpoint = hadoop_conf.get_resource_manager_endpoint()
self.assertIsNone(endpoint)
def test_get_rm_ids(self):
with patch('yarn_api_client.hadoop_conf.parse') as parse_mock:
parse_mock.return_value = 'rm1,rm2'
rm_list = hadoop_conf._get_rm_ids(hadoop_conf.CONF_DIR)
self.assertEqual(['rm1', 'rm2'], rm_list)
parse_mock.assert_called_with(hadoop_conf_path + 'yarn-site.xml', 'yarn.resourcemanager.ha.rm-ids')
parse_mock.reset_mock()
parse_mock.return_value = None
rm_list = hadoop_conf._get_rm_ids(hadoop_conf.CONF_DIR)
self.assertIsNone(rm_list)
@mock.patch('yarn_api_client.hadoop_conf._is_https_only')
def test_check_is_active_rm(self, is_https_only_mock):
is_https_only_mock.return_value = False
# Success scenario
with requests_mock.mock() as requests_get_mock:
requests_get_mock.get('https://example2:8022/cluster', status_code=200)
self.assertTrue(hadoop_conf.check_is_active_rm('https://example2:8022'))
# Outage scenario
with requests_mock.mock() as requests_get_mock:
requests_get_mock.get('https://example2:8022/cluster', status_code=500)
self.assertFalse(hadoop_conf.check_is_active_rm('https://example2:8022'))
# Error scenario (URL is wrong - not found)
with requests_mock.mock() as requests_get_mock:
requests_get_mock.get('https://example2:8022/cluster', status_code=404)
self.assertFalse(hadoop_conf.check_is_active_rm('https://example2:8022'))
# Error scenario (necessary Auth is not provided or invalid credentials)
with requests_mock.mock() as requests_get_mock:
requests_get_mock.get('https://example2:8022/cluster', status_code=401)
self.assertFalse(hadoop_conf.check_is_active_rm('https://example2:8022'))
# Emulate requests library exception (socket timeout, etc)
with requests_mock.mock() as requests_get_mock:
requests_get_mock.get('example2:8022/cluster', exc=RequestException)
self.assertFalse(hadoop_conf.check_is_active_rm('example2:8022'))
def test_get_resource_manager(self):
with patch('yarn_api_client.hadoop_conf.parse') as parse_mock:
parse_mock.return_value = 'example.com:8022'
endpoint = hadoop_conf._get_resource_manager(hadoop_conf.CONF_DIR, None)
self.assertEqual('http://example.com:8022', endpoint)
parse_mock.assert_called_with(hadoop_conf_path + 'yarn-site.xml', 'yarn.resourcemanager.webapp.address')
endpoint = hadoop_conf._get_resource_manager(hadoop_conf.CONF_DIR, 'rm1')
self.assertEqual(('http://example.com:8022'), endpoint)
parse_mock.assert_called_with(hadoop_conf_path + 'yarn-site.xml', 'yarn.resourcemanager.webapp.address.rm1')
parse_mock.reset_mock()
parse_mock.return_value = None
endpoint = hadoop_conf._get_resource_manager(hadoop_conf.CONF_DIR, 'rm1')
self.assertIsNone(endpoint)
def test_get_jobhistory_endpoint(self):
with patch('yarn_api_client.hadoop_conf.parse') as parse_mock:
parse_mock.return_value = 'example.com:8022'
endpoint = hadoop_conf.get_jobhistory_endpoint()
self.assertEqual('example.com:8022', endpoint)
parse_mock.assert_called_with(hadoop_conf_path + 'mapred-site.xml',
'mapreduce.jobhistory.webapp.address')
parse_mock.reset_mock()
parse_mock.return_value = None
endpoint = hadoop_conf.get_jobhistory_endpoint()
self.assertIsNone(endpoint)
def test_get_nodemanager_endpoint(self):
with patch('yarn_api_client.hadoop_conf.parse') as parse_mock:
parse_mock.return_value = 'example.com:8022'
endpoint = hadoop_conf.get_nodemanager_endpoint()
self.assertEqual('example.com:8022', endpoint)
parse_mock.assert_called_with(hadoop_conf_path + 'yarn-site.xml',
'yarn.nodemanager.webapp.address')
parse_mock.reset_mock()
parse_mock.return_value = None
endpoint = hadoop_conf.get_nodemanager_endpoint()
self.assertIsNone(endpoint)
def test_get_webproxy_endpoint(self):
with patch('yarn_api_client.hadoop_conf.parse') as parse_mock:
parse_mock.return_value = 'example.com:8022'
endpoint = hadoop_conf.get_webproxy_endpoint()
self.assertEqual('example.com:8022', endpoint)
parse_mock.assert_called_with(hadoop_conf_path + 'yarn-site.xml',
'yarn.web-proxy.address')
parse_mock.reset_mock()
parse_mock.return_value = None
endpoint = hadoop_conf.get_webproxy_endpoint()
self.assertIsNone(endpoint)
| bsd-3-clause |
hujiajie/chromium-crosswalk | tools/perf/benchmarks/repaint.py | 9 | 2833 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from core import perf_benchmark
from benchmarks import silk_flags
import ct_benchmarks_util
from measurements import smoothness
import page_sets
from telemetry import benchmark
class _Repaint(perf_benchmark.PerfBenchmark):
@classmethod
def AddBenchmarkCommandLineArgs(cls, parser):
parser.add_option('--mode', type='string',
default='viewport',
help='Invalidation mode. '
'Supported values: fixed_size, layer, random, viewport.')
parser.add_option('--width', type='int',
default=None,
help='Width of invalidations for fixed_size mode.')
parser.add_option('--height', type='int',
default=None,
help='Height of invalidations for fixed_size mode.')
@classmethod
def Name(cls):
return 'repaint'
def CreateStorySet(self, options):
return page_sets.KeyMobileSitesRepaintPageSet(
options.mode, options.width, options.height)
def CreatePageTest(self, options):
return smoothness.Repaint()
#crbug.com/499320
#@benchmark.Enabled('android')
@benchmark.Disabled('all')
class RepaintKeyMobileSites(_Repaint):
"""Measures repaint performance on the key mobile sites.
http://www.chromium.org/developers/design-documents/rendering-benchmarks"""
@classmethod
def Name(cls):
return 'repaint.key_mobile_sites_repaint'
#crbug.com/502179
@benchmark.Enabled('android')
@benchmark.Disabled('all')
class RepaintGpuRasterizationKeyMobileSites(_Repaint):
"""Measures repaint performance on the key mobile sites with forced GPU
rasterization.
http://www.chromium.org/developers/design-documents/rendering-benchmarks"""
tag = 'gpu_rasterization'
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
@classmethod
def Name(cls):
return 'repaint.gpu_rasterization.key_mobile_sites_repaint'
# Disabled because we do not plan on running CT benchmarks on the perf
# waterfall any time soon.
@benchmark.Disabled('all')
class RepaintCT(_Repaint):
"""Measures repaint performance for Cluster Telemetry."""
@classmethod
def Name(cls):
return 'repaint_ct'
@classmethod
def AddBenchmarkCommandLineArgs(cls, parser):
_Repaint.AddBenchmarkCommandLineArgs(parser)
ct_benchmarks_util.AddBenchmarkCommandLineArgs(parser)
@classmethod
def ProcessCommandLineArgs(cls, parser, args):
ct_benchmarks_util.ValidateCommandLineArgs(parser, args)
def CreateStorySet(self, options):
return page_sets.CTPageSet(
options.urls_list, options.user_agent, options.archive_data_file)
| bsd-3-clause |
informatik-mannheim/Moduro-CC3D | Simulation/ModuroModel/Spa/SpaPcdbCdiInDa.py | 1 | 1542 | # Copyright 2016 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "Julian Debatin"
__copyright__ = "The authors"
__license__ = "Apache 2"
__email__ = "juliandebatin@gmail.com"
__status__ = "Production"
from ModuroModel.Spa.SpaPcdbCdiInUa import SpaPcdbCdiInUa
class SpaPcdbCdiInDa(SpaPcdbCdiInUa):
def __init__(self, sim, simthread):
SpaPcdbCdiInUa.__init__(self, sim, simthread)
def _initModel(self):
self.name = "SpaCdbPcdiInDa"
self.adhFactor = 0.25
self.cellTypes = self._createCellTypes()
self.energyMatrix = self._createEnergyMatrix()
self._run() # Must be the last statement.
def _createEnergyMatrix(self):
energyMatrix = [[0, 14, 14, 14, 14, 4],
[0, -1, 1, 3, 12, 12],
[0, 0, 6, 4, 8, 14],
[0, 0, 0, 5, 8, 12],
[0, 0, 0, 0, 6, 4],
[0, 0, 0, 0, 0, 2]]
return energyMatrix | apache-2.0 |
sbaechler/feincms-elephantblog | tests/testapp/tests/factories.py | 1 | 2651 | # coding: utf-8
from __future__ import absolute_import, unicode_literals
import datetime
import pytz
import factory
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
from django.conf import settings
from elephantblog.models import Entry, Category, CategoryTranslation
class UserFactory(factory.DjangoModelFactory):
username = 'author'
password = 'elephant'
email = 'admin@elephantblog.ch'
class Meta:
model = User
class EntryFactory(factory.DjangoModelFactory):
is_active = True
is_featured = False
class Meta:
model = Entry
def create_entries(factory):
author = UserFactory()
entries = []
date1 = datetime.datetime(2012, 8, 12, 11, 0, 0)
delta = datetime.timedelta(hours=4)
date2 = datetime.datetime(2012, 10, 12, 11, 1, 0)
if settings.USE_TZ:
date1 = pytz.timezone(settings.TIME_ZONE).localize(date1, is_dst=None)
date2 = pytz.timezone(settings.TIME_ZONE).localize(date2, is_dst=None)
entries.append(factory.create(
pk=1,
author=author,
title='Entry 1',
published_on=date1,
last_changed=date1+delta,
slug='entry-1',
language='en',
))
entries.append(factory.create(
pk=2,
author=author,
title='Eintrag 1',
published_on=date2,
last_changed=date2+delta,
slug='eintrag-1',
language='en',
))
return entries
def create_chinese_entries(factory):
entries = create_entries(factory)
author = entries[0].author
factory.create(
pk=3,
author=author,
title='Entry 2 chinese traditional',
language='zh-cn',
translation_of=entries[0],
published_on=datetime.datetime(2012, 10, 12, 12, 0, 0),
last_changed=datetime.datetime(2012, 10, 12, 16, 0, 0),
slug='entry-2-cn'
)
factory.create(
pk=4,
author=author,
title='Entry 2 chinese simplified',
language='zh-tw',
translation_of=entries[0],
published_on=datetime.datetime(2012, 10, 12, 12, 0, 0),
last_changed=datetime.datetime(2012, 10, 12, 16, 0, 0),
slug='entry-2-tw'
)
class CategoryTranslationFactory(factory.DjangoModelFactory):
class Meta:
model = CategoryTranslation
class CategoryFactory(factory.DjangoModelFactory):
class Meta:
model = Category
def create_category(title):
category = CategoryFactory.create()
CategoryTranslationFactory.create(
parent=category,
title=title,
slug=slugify(title)
)
return category
| bsd-3-clause |
KonradBreitsprecher/espresso | testsuite/hat.py | 1 | 2111 | #
# Copyright (C) 2013,2014,2015,2016 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Tests hat potential
from __future__ import print_function
import unittest as ut
import espressomd
import numpy as np
@ut.skipIf(not espressomd.has_features("HAT"),"Skipped because feature is disabled")
class HatTest(ut.TestCase):
def force(self, F_max, r_cut, r):
if r > 0 and r < r_cut:
return F_max * (1. - r/r_cut )
else:
return 0.
def pot(self, F_max, r_cut, r):
if r < r_cut:
return F_max * (r - r_cut) * ( (r + r_cut) / (2. * r_cut) - 1.)
else:
return 0.
def test(self):
s = espressomd.System(box_l=[1.0, 1.0, 1.0])
s.box_l = 3 * [10]
s.time_step = 0.01
s.cell_system.skin=0.4
s.part.add(id=0, pos=0.5 * s.box_l, type=0)
s.part.add(id=1, pos=0.5 * s.box_l, type=0)
F_max =3.145
cutoff = 1.3
s.non_bonded_inter[0,0].hat.set_params(F_max=F_max, cutoff=cutoff)
dx = cutoff / 90.
r0 = 0.5 * s.box_l[0]
for i in range(100):
r = r0 - i*dx
s.part[1].pos=[r, 0.5 * s.box_l[1], 0.5*s.box_l[2]]
s.integrator.run(0)
self.assertAlmostEqual(self.force(F_max, cutoff, i*dx), s.part[0].f[0], places=7)
self.assertAlmostEqual(self.pot(F_max, cutoff, i*dx), s.analysis.energy()['total'], places=7)
if __name__ == "__main__":
ut.main()
| gpl-3.0 |
TheProjecter/google-apis-client-generator | src/googleapis/codegen/gwt_generator.py | 4 | 5843 | #!/usr/bin/python2.7
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GWT/Java library generator based on RequestFactory."""
__author__ = 'robertvawter@google.com (Bob Vawter)'
from googleapis.codegen import java_generator
from googleapis.codegen import utilities
from googleapis.codegen.import_definition import ImportDefinition
class GwtGenerator(java_generator.BaseJavaGenerator):
"""The GWT code generator."""
def __init__(self, discovery, options=None):
super(GwtGenerator, self).__init__(discovery, 'gwt',
GwtLanguageModel(options=options),
options=options)
self.api.module.SetPath('%s/shared' % self.api.values['name'])
def _InnerModelClassesSupported(self):
"""Gets whether or not inner model classes are supported."""
return False
def AnnotateApi(self, api):
"""Add GWT specific annotations to the Api dictionary."""
super(GwtGenerator, self).AnnotateApi(api)
def AnnotateMethod(self, api, method, unused_resource):
"""Add GWT-specific annotations and naming schemes."""
method.SetTemplateValue('className',
'%sRequest' % method.values['className'])
super(GwtGenerator, self).AnnotateMethod(api, method, None)
def AnnotateParameter(self, method, parameter):
"""Add GWT-specific annotations to parameter declaration.
Qualify our enum class names by method so they do not clash.
Args:
method: (Method) The method this parameter belongs to.
parameter: (Parameter) The parameter to annotate.
"""
super(GwtGenerator, self).AnnotateParameter(method, parameter)
enum_type = parameter.values.get('enumType')
if enum_type:
# For generated enums, we need to qualify the parent class of the enum so
# that two methods that take a similarly-named enum parameter don't get
# confused.
# TODO(user): This will fail when enums can be $refs. Get rid of all
# special purpuse enum annotations and checks.
name = '%s.%s' % (method.values['className'],
enum_type.values['className'])
enum_type.SetTemplateValue('codeType', name)
def AnnotateResource(self, api, resource):
"""Add GWT-specific annotations and naming schemes."""
super(GwtGenerator, self).AnnotateResource(api, resource)
parent_list = resource.ancestors[1:]
parent_list.append(resource)
parent_classes = [p.values.get('className') for p in parent_list]
resource.SetTemplateValue('contextCodeType', '.'.join(parent_classes))
# for the codeName, we do not lowercase my code name
code_name = '_'.join(parent_classes[0:-1]).lower()
if code_name:
code_name += '_'
code_name += resource.codeName
resource.SetTemplateValue('contextCodeName', code_name)
class GwtLanguageModel(java_generator.JavaLanguageModel):
"""A LanguageModel for GWT."""
language = 'java'
# Dictionary of json type and format to its corresponding import definition.
# The first import in the imports list is the primary import.
TYPE_FORMAT_TO_DATATYPE_AND_IMPORTS = {
('boolean', None): ('Boolean', ImportDefinition(['java.lang.Boolean'])),
# Use String instead of Object for GWT
('any', None): ('String', ImportDefinition(['java.lang.String'])),
('integer', 'int16'): ('Short', ImportDefinition(['java.lang.Short'])),
('integer', 'int32'): ('Integer',
ImportDefinition(['java.lang.Integer'])),
# Java does not support Unsigned Integers
('integer', 'uint32'): ('Long', ImportDefinition(['java.lang.Long'])),
('number', 'double'): ('Double', ImportDefinition(['java.lang.Double'])),
('number', 'float'): ('Float', ImportDefinition(['java.lang.Float'])),
# Use Splittable instead of Object for GWT
('object', None): ('Splittable', ImportDefinition(
['com.google.web.bindery.autobean.shared.Splittable'])),
('string', None): ('String', ImportDefinition(['java.lang.String'])),
('string', 'byte'): ('String', ImportDefinition(['java.lang.String'])),
# GWT does not support date-time
('string', 'date-time'): ('String',
ImportDefinition(['java.lang.String'])),
('string', 'int64'): ('Long', ImportDefinition(['java.lang.Long'])),
# Java does not support Unsigned Integers
('string', 'uint64'): ('BigInteger',
ImportDefinition(['java.math.BigInteger'])),
}
def __init__(self, options=None):
super(GwtLanguageModel, self).__init__(options=options)
self._type_map = GwtLanguageModel.TYPE_FORMAT_TO_DATATYPE_AND_IMPORTS
def CodeTypeForVoid(self):
"""Return a GWT style void.
Overrides the default.
Returns:
(str) 'EmptyResponse'
"""
# TODO(user): Handle empty responses in a more graceful way. Instead of
# passing the type to use for empty responses, just signal that an empty
# response is required, and handle that in the templates.
return 'EmptyResponse'
def DefaultContainerPathForOwner(self, module):
"""Overrides the default implementation."""
return '%s/api/gwt/services' % '/'.join(
utilities.ReversedDomainComponents(module.owner_domain))
| apache-2.0 |
jackkiej/SickRage | sickbeard/logger.py | 1 | 16593 | # coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: https://sickrage.github.io
# Git: https://github.com/SickRage/SickRage.git
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
"""
Custom Logger for SickRage
"""
from __future__ import unicode_literals
import io
import locale
import logging
import logging.handlers
import os
import platform
import re
import sys
import threading
import traceback
from logging import NullHandler
from urllib import quote
from github import InputFileContent
import sickbeard
from sickbeard import classes
from sickrage.helper.common import dateTimeFormat
from sickrage.helper.encoding import ek, ss
from sickrage.helper.exceptions import ex
# pylint: disable=line-too-long
# log levels
ERROR = logging.ERROR
WARNING = logging.WARNING
INFO = logging.INFO
DEBUG = logging.DEBUG
DB = 5
LOGGING_LEVELS = {
'ERROR': ERROR,
'WARNING': WARNING,
'INFO': INFO,
'DEBUG': DEBUG,
'DB': DB,
}
censored_items = {} # pylint: disable=invalid-name
class CensoredFormatter(logging.Formatter, object):
"""
Censor information such as API keys, user names, and passwords from the Log
"""
def __init__(self, fmt=None, datefmt=None, encoding='utf-8'):
super(CensoredFormatter, self).__init__(fmt, datefmt)
self.encoding = encoding
def format(self, record):
"""
Strips censored items from string
:param record: to censor
"""
msg = super(CensoredFormatter, self).format(record)
if not isinstance(msg, unicode):
msg = msg.decode(self.encoding, 'replace') # Convert to unicode
# set of censored items
censored = {item for _, item in censored_items.iteritems() if item}
# set of censored items and urlencoded counterparts
censored = censored | {quote(item) for item in censored}
# convert set items to unicode and typecast to list
censored = list({
item.decode(self.encoding, 'replace')
if not isinstance(item, unicode) else item
for item in censored
})
# sort the list in order of descending length so that entire item is censored
# e.g. password and password_1 both get censored instead of getting ********_1
censored.sort(key=len, reverse=True)
for item in censored:
msg = msg.replace(item, len(item) * '*')
# Needed because Newznab apikey isn't stored as key=value in a section.
msg = re.sub(r'([&?]r|[&?]apikey|[&?]api_key)(?:=|%3D)[^&]*([&\w]?)', r'\1=**********\2', msg, re.I)
return msg
class Logger(object): # pylint: disable=too-many-instance-attributes
"""
Logger to create log entries
"""
def __init__(self):
self.logger = logging.getLogger('sickrage')
self.loggers = [
logging.getLogger('sickrage'),
logging.getLogger('tornado.general'),
logging.getLogger('tornado.application'),
# logging.getLogger('subliminal'),
# logging.getLogger('tornado.access'),
# logging.getLogger('tvdb_api'),
# logging.getLogger("requests.packages.urllib3")
]
self.console_logging = False
self.file_logging = False
self.debug_logging = False
self.database_logging = False
self.log_file = None
self.submitter_running = False
def init_logging(self, console_logging=False, file_logging=False, debug_logging=False, database_logging=False):
"""
Initialize logging
:param console_logging: True if logging to console
:param file_logging: True if logging to file
:param debug_logging: True if debug logging is enabled
:param database_logging: True if logging database access
"""
self.log_file = self.log_file or ek(os.path.join, sickbeard.LOG_DIR, 'sickrage.log')
self.debug_logging = debug_logging
self.console_logging = console_logging
self.file_logging = file_logging
self.database_logging = database_logging
logging.addLevelName(DB, 'DB') # add a new logging level DB
logging.getLogger().addHandler(NullHandler()) # nullify root logger
# set custom root logger
for logger in self.loggers:
if logger is not self.logger:
logger.root = self.logger
logger.parent = self.logger
log_level = DB if self.database_logging else DEBUG if self.debug_logging else INFO
# set minimum logging level allowed for loggers
for logger in self.loggers:
logger.setLevel(log_level)
logging.getLogger("tornado.general").setLevel('ERROR')
# console log handler
if self.console_logging:
console = logging.StreamHandler()
console.setFormatter(CensoredFormatter('%(asctime)s %(levelname)s::%(message)s', '%H:%M:%S'))
console.setLevel(log_level)
for logger in self.loggers:
logger.addHandler(console)
# rotating log file handler
if self.file_logging:
rfh = logging.handlers.RotatingFileHandler(self.log_file, maxBytes=int(sickbeard.LOG_SIZE * 1048576), backupCount=sickbeard.LOG_NR, encoding='utf-8')
rfh.setFormatter(CensoredFormatter('%(asctime)s %(levelname)-8s %(message)s', dateTimeFormat))
rfh.setLevel(log_level)
for logger in self.loggers:
logger.addHandler(rfh)
def set_level(self):
self.debug_logging = sickbeard.DEBUG
self.database_logging = sickbeard.DBDEBUG
level = DB if self.database_logging else DEBUG if self.debug_logging else INFO
for logger in self.loggers:
logger.setLevel(level)
for handler in logger.handlers:
handler.setLevel(level)
@staticmethod
def shutdown():
"""
Shut down the logger
"""
logging.shutdown()
def log(self, msg, level=INFO, *args, **kwargs):
"""
Create log entry
:param msg: to log
:param level: of log, e.g. DEBUG, INFO, etc.
:param args: to pass to logger
:param kwargs: to pass to logger
"""
cur_thread = threading.currentThread().getName()
cur_hash = ''
if level == ERROR and sickbeard.CUR_COMMIT_HASH and len(sickbeard.CUR_COMMIT_HASH) > 6:
cur_hash = '[{0}] '.format(
sickbeard.CUR_COMMIT_HASH[:7]
)
message = '{thread} :: {hash}{message}'.format(
thread=cur_thread, hash=cur_hash, message=msg)
# Change the SSL error to a warning with a link to information about how to fix it.
# Check for u'error [SSL: SSLV3_ALERT_HANDSHAKE_FAILURE] sslv3 alert handshake failure (_ssl.c:590)'
ssl_errors = [
r'error \[Errno \d+\] _ssl.c:\d+: error:\d+\s*:SSL routines:SSL23_GET_SERVER_HELLO:tlsv1 alert internal error',
r'error \[SSL: SSLV3_ALERT_HANDSHAKE_FAILURE\] sslv3 alert handshake failure \(_ssl\.c:\d+\)',
]
for ssl_error in ssl_errors:
check = re.sub(ssl_error, 'See: http://git.io/vuU5V', message)
if check != message:
message = check
level = WARNING
if level == ERROR:
classes.ErrorViewer.add(classes.UIError(message))
elif level == WARNING:
classes.WarningViewer.add(classes.UIError(message))
if level == ERROR:
self.logger.exception(message, *args, **kwargs)
else:
self.logger.log(level, message, *args, **kwargs)
def log_error_and_exit(self, error_msg, *args, **kwargs):
self.log(error_msg, ERROR, *args, **kwargs)
if not self.console_logging:
sys.exit(error_msg.encode(sickbeard.SYS_ENCODING, 'xmlcharrefreplace'))
else:
sys.exit(1)
def submit_errors(self): # pylint: disable=too-many-branches,too-many-locals
submitter_result = ''
issue_id = None
if not all((sickbeard.GIT_USERNAME, sickbeard.GIT_PASSWORD, sickbeard.DEBUG, sickbeard.gh, classes.ErrorViewer.errors)):
submitter_result = 'Please set your GitHub username and password in the config and enable debug. Unable to submit issue ticket to GitHub!'
return submitter_result, issue_id
try:
from sickbeard.versionChecker import CheckVersion
checkversion = CheckVersion()
checkversion.check_for_new_version()
commits_behind = checkversion.updater.get_num_commits_behind()
except Exception: # pylint: disable=broad-except
submitter_result = 'Could not check if your SickRage is updated, unable to submit issue ticket to GitHub!'
return submitter_result, issue_id
if commits_behind is None or commits_behind > 0:
submitter_result = 'Please update SickRage, unable to submit issue ticket to GitHub with an outdated version!'
return submitter_result, issue_id
if self.submitter_running:
submitter_result = 'Issue submitter is running, please wait for it to complete'
return submitter_result, issue_id
self.submitter_running = True
try:
# read log file
log_data = None
if ek(os.path.isfile, self.log_file):
with io.open(self.log_file, encoding='utf-8') as log_f:
log_data = log_f.readlines()
for i in range(1, int(sickbeard.LOG_NR)):
f_name = '{0}.{1:d}'.format(self.log_file, i)
if ek(os.path.isfile, f_name) and (len(log_data) <= 500):
with io.open(f_name, encoding='utf-8') as log_f:
log_data += log_f.readlines()
log_data = [line for line in reversed(log_data)]
# parse and submit errors to issue tracker
for cur_error in sorted(classes.ErrorViewer.errors, key=lambda error: error.time, reverse=True)[:500]:
try:
title_error = ss(str(cur_error.title))
if not title_error or title_error == 'None':
title_error = re.match(r'^[A-Z0-9\-\[\] :]+::\s*(.*)(?: \[[\w]{7}\])$', ss(cur_error.message)).group(1)
if len(title_error) > 1000:
title_error = title_error[0:1000]
except Exception as err_msg: # pylint: disable=broad-except
self.log('Unable to get error title : {0}'.format(ex(err_msg)), ERROR)
title_error = 'UNKNOWN'
gist = None
regex = r'^({0})\s+([A-Z]+)\s+([0-9A-Z\-]+)\s*(.*)(?: \[[\w]{{7}}\])$'.format(cur_error.time)
for i, data in enumerate(log_data):
match = re.match(regex, data)
if match:
level = match.group(2)
if LOGGING_LEVELS[level] == ERROR:
paste_data = ''.join(log_data[i:i + 50])
if paste_data:
gist = sickbeard.gh.get_user().create_gist(False, {'sickrage.log': InputFileContent(paste_data)})
break
else:
gist = 'No ERROR found'
try:
locale_name = locale.getdefaultlocale()[1]
except Exception: # pylint: disable=broad-except
locale_name = 'unknown'
if gist and gist != 'No ERROR found':
log_link = 'Link to Log: {0}'.format(gist.html_url)
else:
log_link = 'No Log available with ERRORS:'
msg = [
'### INFO',
'Python Version: **{0}**'.format(sys.version[:120].replace('\n', '')),
'Operating System: **{0}**'.format(platform.platform()),
'Locale: {0}'.format(locale_name),
'Branch: **{0}**'.format(sickbeard.BRANCH),
'Commit: SickRage/SickRage@{0}'.format(sickbeard.CUR_COMMIT_HASH),
log_link,
'### ERROR',
'```',
cur_error.message,
'```',
'---',
'_STAFF NOTIFIED_: @SickRage/owners @SickRage/moderators',
]
message = '\n'.join(msg)
title_error = '[APP SUBMITTED]: {0}'.format(title_error)
repo = sickbeard.gh.get_organization(sickbeard.GIT_ORG).get_repo(sickbeard.GIT_REPO)
reports = repo.get_issues(state='all')
def is_ascii_error(title):
# [APP SUBMITTED]: 'ascii' codec can't encode characters in position 00-00: ordinal not in range(128)
# [APP SUBMITTED]: 'charmap' codec can't decode byte 0x00 in position 00: character maps to <undefined>
return re.search(r'.* codec can\'t .*code .* in position .*:', title) is not None
def is_malformed_error(title):
# [APP SUBMITTED]: not well-formed (invalid token): line 0, column 0
return re.search(r'.* not well-formed \(invalid token\): line .* column .*', title) is not None
ascii_error = is_ascii_error(title_error)
malformed_error = is_malformed_error(title_error)
issue_found = False
for report in reports:
if title_error.rsplit(' :: ')[-1] in report.title or \
(malformed_error and is_malformed_error(report.title)) or \
(ascii_error and is_ascii_error(report.title)):
issue_id = report.number
if not report.raw_data['locked']:
if report.create_comment(message):
submitter_result = 'Commented on existing issue #{0} successfully!'.format(issue_id)
else:
submitter_result = 'Failed to comment on found issue #{0}!'.format(issue_id)
else:
submitter_result = 'Issue #{0} is locked, check GitHub to find info about the error.'.format(issue_id)
issue_found = True
break
if not issue_found:
issue = repo.create_issue(title_error, message)
if issue:
issue_id = issue.number
submitter_result = 'Your issue ticket #{0} was submitted successfully!'.format(issue_id)
else:
submitter_result = 'Failed to create a new issue!'
if issue_id and cur_error in classes.ErrorViewer.errors:
# clear error from error list
classes.ErrorViewer.errors.remove(cur_error)
except Exception: # pylint: disable=broad-except
self.log(traceback.format_exc(), ERROR)
submitter_result = 'Exception generated in issue submitter, please check the log'
issue_id = None
finally:
self.submitter_running = False
return submitter_result, issue_id
# pylint: disable=too-few-public-methods
class Wrapper(object):
instance = Logger()
def __init__(self, wrapped):
self.wrapped = wrapped
def __getattr__(self, name):
try:
return getattr(self.wrapped, name)
except AttributeError:
return getattr(self.instance, name)
_globals = sys.modules[__name__] = Wrapper(sys.modules[__name__]) # pylint: disable=invalid-name
def log(*args, **kwargs):
return Wrapper.instance.log(*args, **kwargs)
| gpl-3.0 |
blitzmann/Pyfa | eos/saveddata/price.py | 1 | 2599 | # ===============================================================================
# Copyright (C) 2010 Diego Duclos
# Copyright (C) 2011 Anton Vorobyov
#
# This file is part of eos.
#
# eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with eos. If not, see <http://www.gnu.org/licenses/>.
# ===============================================================================
from enum import IntEnum, unique
from time import time
from logbook import Logger
VALIDITY = 24 * 60 * 60 # Price validity period, 24 hours
REREQUEST = 4 * 60 * 60 # Re-request delay for failed fetches, 4 hours
TIMEOUT = 15 * 60 # Network timeout delay for connection issues, 15 minutes
pyfalog = Logger(__name__)
@unique
class PriceStatus(IntEnum):
initialized = 0
notSupported = 1
fetchSuccess = 2
fetchFail = 3
fetchTimeout = 4
class Price(object):
def __init__(self, typeID):
self.typeID = typeID
self.time = 0
self.price = 0
self.status = PriceStatus.initialized
def isValid(self, validityOverride=None):
# Always attempt to update prices which were just initialized, and prices
# of unsupported items (maybe we start supporting them at some point)
if self.status in (PriceStatus.initialized, PriceStatus.notSupported):
return False
elif self.status == PriceStatus.fetchSuccess:
return time() <= self.time + (validityOverride if validityOverride is not None else VALIDITY)
elif self.status == PriceStatus.fetchFail:
return time() <= self.time + REREQUEST
elif self.status == PriceStatus.fetchTimeout:
return time() <= self.time + TIMEOUT
else:
return False
def update(self, status, price=0):
# Keep old price if we failed to fetch new one
if status in (PriceStatus.fetchFail, PriceStatus.fetchTimeout):
price = self.price
elif status != PriceStatus.fetchSuccess:
price = 0
self.time = time()
self.price = price
self.status = status
| gpl-3.0 |
stsouko/CGRtools | CGRtools/files/RDFrw.py | 1 | 17147 | # -*- coding: utf-8 -*-
#
# Copyright 2014-2021 Ramil Nugmanov <nougmanoff@protonmail.com>
# Copyright 2019 Dinar Batyrshin <batyrshin-dinar@mail.ru>
# This file is part of CGRtools.
#
# CGRtools is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, see <https://www.gnu.org/licenses/>.
#
from bisect import bisect_left
from collections import defaultdict
from itertools import chain
from logging import warning
from os.path import getsize
from subprocess import check_output
from time import strftime
from traceback import format_exc
from warnings import warn
from ._mdl import parse_error
from ._mdl import MDLRead, MDLWrite, MOLRead, EMOLRead, RXNRead, ERXNRead, EMDLWrite
from ..containers import ReactionContainer, MoleculeContainer
from ..containers.common import Graph
class RDFRead(MDLRead):
"""
MDL RDF files reader. works similar to opened file object. support `with` context manager.
on initialization accept opened in text mode file, string path to file,
pathlib.Path object or another buffered reader object
"""
def __init__(self, file, indexable=False, **kwargs):
"""
:param indexable: if True: supported methods seek, tell, object size and subscription, it only works when
dealing with a real file (the path to the file is specified) because the external grep utility is used,
supporting in unix-like OS the object behaves like a normal open file.
if False: works like generator converting a record into ReactionContainer and returning each object in
order, records with errors are skipped
:param ignore: Skip some checks of data or try to fix some errors.
:param remap: Remap atom numbers started from one.
:param store_log: Store parser log if exists messages to `.meta` by key `CGRtoolsParserLog`.
:param calc_cis_trans: Calculate cis/trans marks from 2d coordinates.
:param ignore_stereo: Ignore stereo data.
"""
super().__init__(file, **kwargs)
self.__file = iter(self._file.readline, '')
self._data = self.__reader()
if indexable:
if next(self._data):
self._load_cache()
else:
next(self._data)
@staticmethod
def _get_shifts(file):
shifts = [int(x.split(b':', 1)[0]) for x in
check_output(['grep', '-boE', r'^\$[RM]FMT', file]).split()]
shifts.append(getsize(file))
return shifts
def seek(self, offset):
"""
shifts on a given number of record in the original file
:param offset: number of record
"""
if self._shifts:
if 0 <= offset < len(self._shifts):
current_pos = self._file.tell()
new_pos = self._shifts[offset]
if current_pos != new_pos:
if current_pos == self._shifts[-1]: # reached the end of the file
self._file.seek(0)
self.__file = iter(self._file.readline, '')
self._data = self.__reader()
next(self._data)
if offset: # move not to the beginning of the file
self._file.seek(new_pos)
self._data.send(offset)
self.__already_seeked = True
else:
if not self.__already_seeked:
self._file.seek(new_pos)
self._data.send(offset)
self.__already_seeked = True
else:
raise BlockingIOError('File already seeked. New seek possible only after reading any data')
else:
raise IndexError('invalid offset')
else:
raise self._implement_error
def tell(self):
"""
:return: number of records processed from the original file
"""
if self._shifts:
t = self._file.tell()
if t == self._shifts[0]:
return 0
elif t == self._shifts[-1]:
return len(self._shifts) - 1
elif t in self._shifts:
return bisect_left(self._shifts, t)
else:
return bisect_left(self._shifts, t) - 1
raise self._implement_error
def __reader(self):
record = parser = mkey = pos = None
failed = False
file = self._file
try:
seekable = file.seekable()
except AttributeError:
seekable = False
if next(self.__file).startswith('$RXN'): # parse RXN file
is_reaction = True
ir = 3
meta = defaultdict(list)
if seekable:
pos = 0 # $RXN line starting position
count = 0
yield False
elif next(self.__file).startswith('$DATM'): # skip header
ir = 0
is_reaction = meta = None
seek = yield True
if seek is not None:
yield
count = seek - 1
self.__already_seeked = False
else:
count = -1
else:
raise ValueError('invalid file')
for line in self.__file:
if failed and not line.startswith(('$RFMT', '$MFMT')):
continue
elif parser:
try:
if parser(line):
record = parser.getvalue()
parser = None
except ValueError:
parser = None
self._info(f'line:\n{line}\nconsist errors:\n{format_exc()}')
seek = yield parse_error(count, pos, self._format_log(), {})
if seek is not None:
yield
count = seek - 1
self.__already_seeked = False
else:
failed = True
self._flush_log()
elif line.startswith('$RFMT'):
if record:
record['meta'].update(self._prepare_meta(meta))
if title:
record['title'] = title
try:
if is_reaction:
container = self._convert_reaction(record)
else:
container = self._convert_structure(record)
except ValueError:
self._info(f'record consist errors:\n{format_exc()}')
seek = yield parse_error(count, pos, self._format_log(), record['meta'])
else:
if self._store_log:
log = self._format_log()
if log:
container.meta['CGRtoolsParserLog'] = log
seek = yield container
self._flush_log()
record = None
if seek is not None:
yield
count = seek - 1
self.__already_seeked = False
continue
if seekable:
pos = file.tell() # $RXN line starting position
count += 1
is_reaction = True
ir = 4
failed = False
mkey = None
meta = defaultdict(list)
elif line.startswith('$MFMT'):
if record:
record['meta'].update(self._prepare_meta(meta))
if title:
record['title'] = title
try:
if is_reaction:
container = self._convert_reaction(record)
else:
container = self._convert_structure(record)
except ValueError:
self._info(f'record consist errors:\n{format_exc()}')
seek = yield parse_error(count, pos, self._format_log(), record['meta'])
else:
if self._store_log:
log = self._format_log()
if log:
container.meta['CGRtoolsParserLog'] = log
seek = yield container
self._flush_log()
record = None
if seek is not None:
yield
count = seek - 1
self.__already_seeked = False
continue
if seekable:
pos = file.tell() # MOL block line starting position
count += 1
ir = 3
failed = is_reaction = False
mkey = None
meta = defaultdict(list)
elif record:
if line.startswith('$DTYPE'):
mkey = line[7:].strip()
if not mkey:
self._info(f'invalid metadata entry: {line}')
elif mkey:
data = line.lstrip("$DATUM").strip()
if data:
meta[mkey].append(data)
elif ir:
if ir == 3: # parse mol or rxn title
title = line.strip()
ir -= 1
else:
try:
if is_reaction:
if line.startswith('M V30 COUNTS'):
parser = ERXNRead(line, self._ignore, self._log_buffer)
else:
parser = RXNRead(line, self._ignore, self._log_buffer)
else:
if 'V2000' in line:
parser = MOLRead(line, self._log_buffer)
elif 'V3000' in line:
parser = EMOLRead(self._log_buffer)
else:
raise ValueError('invalid MOL entry')
except ValueError:
failed = True
self._info(f'line:\n{line}\nconsist errors:\n{format_exc()}')
seek = yield parse_error(count, pos, self._format_log(), {})
if seek is not None:
yield
count = seek - 1
self.__already_seeked = False
self._flush_log()
if record:
record['meta'].update(self._prepare_meta(meta))
if title:
record['title'] = title
try:
if is_reaction:
container = self._convert_reaction(record)
else:
container = self._convert_structure(record)
except ValueError:
self._info(f'record consist errors:\n{format_exc()}')
log = self._format_log()
self._flush_log()
yield parse_error(count, pos, log, record['meta'])
else:
if self._store_log:
log = self._format_log()
if log:
container.meta['CGRtoolsParserLog'] = log
self._flush_log()
yield container
__already_seeked = False
class _RDFWrite:
def __init__(self, file, *, append: bool = False, write3d: bool = False, mapping: bool = True):
"""
:param append: append to existing file (True) or rewrite it (False). For buffered writer object append = False
will write RDF header and append = True will omit the header.
:param write3d: write for Molecules first 3D coordinates instead 2D if exists.
:param mapping: write atom mapping.
"""
super().__init__(file, append=append, write3d=int(write3d), mapping=mapping)
if not append or not (self._is_buffer or self._file.tell() != 0):
self.write = self.__write
def __write(self, data):
"""
write single molecule or reaction into file
"""
del self.write
self._file.write(strftime('$RDFILE 1\n$DATM %m/%d/%y %H:%M\n'))
self.write(data)
class RDFWrite(_RDFWrite, MDLWrite):
"""
MDL RDF files writer. works similar to opened for writing file object. support `with` context manager.
on initialization accept opened for writing in text mode file, string path to file,
pathlib.Path object or another buffered writer object
"""
def write(self, data):
if isinstance(data, Graph):
m = self._convert_structure(data)
self._file.write('$MFMT\n')
self._file.write(m)
elif isinstance(data, ReactionContainer):
ag = f'{len(data.reagents):3d}' if data.reagents else ''
self._file.write(f'$RFMT\n$RXN\n{data.name}\n\n\n{len(data.reactants):3d}{len(data.products):3d}{ag}\n')
for m in chain(data.reactants, data.products, data.reagents):
m = self._convert_structure(m)
self._file.write('$MOL\n')
self._file.write(m)
else:
raise TypeError('Graph or Reaction object expected')
for k, v in data.meta.items():
self._file.write(f'$DTYPE {k}\n$DATUM {v}\n')
class ERDFWrite(_RDFWrite, EMDLWrite):
"""
MDL V3000 RDF files writer. works similar to opened for writing file object. support `with` context manager.
on initialization accept opened for writing in text mode file, string path to file,
pathlib.Path object or another buffered writer object
"""
def write(self, data):
if isinstance(data, MoleculeContainer):
m = self._convert_structure(data)
self._file.write(f'$MFMT\n{data.name}\n\n\n 0 0 0 0 0 999 V3000\n')
self._file.write(m)
self._file.write('M END\n')
elif isinstance(data, ReactionContainer):
ag = f' {len(data.reagents)}' if data.reagents else ''
self._file.write(f'$RFMT\n$RXN V3000\n{data.name}\n\n\n'
f'M V30 COUNTS {len(data.reactants)} {len(data.products)}{ag}\nM V30 BEGIN REACTANT\n')
for m in data.reactants:
self._file.write(self._convert_structure(m))
self._file.write('M V30 END REACTANT\nM V30 BEGIN PRODUCT\n')
for m in data.products:
self._file.write(self._convert_structure(m))
self._file.write('M V30 END PRODUCT\n')
if data.reagents:
self._file.write('M V30 BEGIN AGENT\n')
for m in data.reagents:
self._file.write(self._convert_structure(m))
self._file.write('M V30 END AGENT\n')
self._file.write('M END\n')
else:
raise TypeError('Molecule or Reaction object expected')
for k, v in data.meta.items():
self._file.write(f'$DTYPE {k}\n$DATUM {v}\n')
class RDFread:
def __init__(self, *args, **kwargs):
warn('RDFread deprecated. Use RDFRead instead', DeprecationWarning)
warning('RDFread deprecated. Use RDFRead instead')
self.__obj = RDFRead(*args, **kwargs)
def __getattr__(self, item):
return getattr(self.__obj, item)
def __iter__(self):
return iter(self.__obj)
def __next__(self):
return next(self.__obj)
def __getitem__(self, item):
return self.__obj[item]
def __enter__(self):
return self.__obj.__enter__()
def __exit__(self, _type, value, traceback):
return self.__obj.__exit__(_type, value, traceback)
def __len__(self):
return len(self.__obj)
class RDFwrite:
def __init__(self, *args, **kwargs):
warn('RDFwrite deprecated. Use RDFWrite instead', DeprecationWarning)
warning('RDFwrite deprecated. Use RDFWrite instead')
self.__obj = RDFWrite(*args, **kwargs)
def __getattr__(self, item):
return getattr(self.__obj, item)
def __enter__(self):
return self.__obj.__enter__()
def __exit__(self, _type, value, traceback):
return self.__obj.__exit__(_type, value, traceback)
__all__ = ['RDFRead', 'RDFWrite', 'ERDFWrite', 'RDFread', 'RDFwrite']
| lgpl-3.0 |
vsantana/autokey | src/lib/interface.py | 46 | 44692 | # -*- coding: utf-8 -*-
# Copyright (C) 2011 Chris Dekter
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__all__ = ["XRecordInterface", "AtSpiInterface"]
import os, threading, re, time, socket, select, logging, Queue, subprocess
try:
import pyatspi
HAS_ATSPI = True
except ImportError:
HAS_ATSPI = False
from Xlib import X, XK, display, error
try:
from Xlib.ext import record, xtest
HAS_RECORD = True
except ImportError:
HAS_RECORD = False
from Xlib.protocol import rq, event
import common
if common.USING_QT:
from PyQt4.QtGui import QClipboard, QApplication
else:
from gi.repository import Gtk, Gdk
logger = logging.getLogger("interface")
MASK_INDEXES = [
(X.ShiftMapIndex, X.ShiftMask),
(X.ControlMapIndex, X.ControlMask),
(X.LockMapIndex, X.LockMask),
(X.Mod1MapIndex, X.Mod1Mask),
(X.Mod2MapIndex, X.Mod2Mask),
(X.Mod3MapIndex, X.Mod3Mask),
(X.Mod4MapIndex, X.Mod4Mask),
(X.Mod5MapIndex, X.Mod5Mask),
]
CAPSLOCK_LEDMASK = 1<<0
NUMLOCK_LEDMASK = 1<<1
class XInterfaceBase(threading.Thread):
"""
Encapsulates the common functionality for the two X interface classes.
"""
def __init__(self, mediator, app):
threading.Thread.__init__(self)
self.setDaemon(True)
self.setName("XInterface-thread")
self.mediator = mediator
self.app = app
self.lastChars = [] # QT4 Workaround
self.__enableQT4Workaround = False # QT4 Workaround
self.shutdown = False
# Event loop
self.eventThread = threading.Thread(target=self.__eventLoop)
self.queue = Queue.Queue()
# Event listener
self.listenerThread = threading.Thread(target=self.__flushEvents)
if common.USING_QT:
self.clipBoard = QApplication.clipboard()
else:
self.clipBoard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
self.selection = Gtk.Clipboard.get(Gdk.SELECTION_PRIMARY)
self.__initMappings()
# Set initial lock state
ledMask = self.localDisplay.get_keyboard_control().led_mask
mediator.set_modifier_state(Key.CAPSLOCK, (ledMask & CAPSLOCK_LEDMASK) != 0)
mediator.set_modifier_state(Key.NUMLOCK, (ledMask & NUMLOCK_LEDMASK) != 0)
# Window name atoms
self.__NameAtom = self.localDisplay.intern_atom("_NET_WM_NAME", True)
self.__VisibleNameAtom = self.localDisplay.intern_atom("_NET_WM_VISIBLE_NAME", True)
if not common.USING_QT:
self.keyMap = Gdk.Keymap.get_default()
self.keyMap.connect("keys-changed", self.on_keys_changed)
self.__ignoreRemap = False
self.eventThread.start()
self.listenerThread.start()
def __eventLoop(self):
while True:
method, args = self.queue.get()
if method is None and args is None:
break
try:
method(*args)
except Exception, e:
logger.exception("Error in X event loop thread")
self.queue.task_done()
def __enqueue(self, method, *args):
self.queue.put_nowait((method, args))
def on_keys_changed(self, data=None):
if not self.__ignoreRemap:
logger.debug("Recorded keymap change event")
self.__ignoreRemap = True
time.sleep(0.2)
self.__enqueue(self.__ungrabAllHotkeys)
self.__enqueue(self.__delayedInitMappings)
else:
logger.debug("Ignored keymap change event")
def __delayedInitMappings(self):
self.__initMappings()
self.__ignoreRemap = False
def __initMappings(self):
self.localDisplay = display.Display()
self.rootWindow = self.localDisplay.screen().root
self.rootWindow.change_attributes(event_mask=X.SubstructureNotifyMask|X.StructureNotifyMask)
altList = self.localDisplay.keysym_to_keycodes(XK.XK_ISO_Level3_Shift)
self.__usableOffsets = (0, 1)
for code, offset in altList:
if code == 108 and offset == 0:
self.__usableOffsets += (4, 5)
logger.debug("Enabling sending using Alt-Grid")
break
# Build modifier mask mapping
self.modMasks = {}
mapping = self.localDisplay.get_modifier_mapping()
for keySym, ak in XK_TO_AK_MAP.iteritems():
if ak in MODIFIERS:
keyCodeList = self.localDisplay.keysym_to_keycodes(keySym)
found = False
for keyCode, lvl in keyCodeList:
for index, mask in MASK_INDEXES:
if keyCode in mapping[index]:
self.modMasks[ak] = mask
found = True
break
if found: break
logger.debug("Modifier masks: %r", self.modMasks)
self.__grabHotkeys()
self.localDisplay.flush()
# --- get list of keycodes that are unused in the current keyboard mapping
keyCode = 8
avail = []
for keyCodeMapping in self.localDisplay.get_keyboard_mapping(keyCode, 200):
codeAvail = True
for offset in keyCodeMapping:
if offset != 0:
codeAvail = False
break
if codeAvail:
avail.append(keyCode)
keyCode += 1
self.__availableKeycodes = avail
self.remappedChars = {}
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
self.keymap_test()
def keymap_test(self):
code = self.localDisplay.keycode_to_keysym(108, 0)
for attr in XK.__dict__.iteritems():
if attr[0].startswith("XK"):
if attr[1] == code:
logger.debug("Alt-Grid: %s, %s", attr[0], attr[1])
logger.debug(repr(self.localDisplay.keysym_to_keycodes(XK.XK_ISO_Level3_Shift)))
logger.debug("X Server Keymap")
for char in "\\|`1234567890-=~!@#$%^&*()qwertyuiop[]asdfghjkl;'zxcvbnm,./QWERTYUIOP{}ASDFGHJKL:\"ZXCVBNM<>?":
keyCodeList = self.localDisplay.keysym_to_keycodes(ord(char))
if len(keyCodeList) > 0:
logger.debug("[%s] : %s", char, keyCodeList)
else:
logger.debug("No mapping for [%s]", char)
def __needsMutterWorkaround(self, item):
if Key.SUPER not in item.modifiers:
return False
try:
output = subprocess.check_output(["ps", "-eo", "command"])
lines = output.splitlines()
for line in lines:
if "gnome-shell" in line or "cinnamon" in line or "unity" in line:
return True
except:
pass # since this is just a nasty workaround, if anything goes wrong just disable it
return False
def __grabHotkeys(self):
"""
Run during startup to grab global and specific hotkeys in all open windows
"""
c = self.app.configManager
hotkeys = c.hotKeys + c.hotKeyFolders
# Grab global hotkeys in root window
for item in c.globalHotkeys:
if item.enabled:
self.__enqueue(self.__grabHotkey, item.hotKey, item.modifiers, self.rootWindow)
if self.__needsMutterWorkaround(item):
self.__enqueue(self.__grabRecurse, item, self.rootWindow, False)
# Grab hotkeys without a filter in root window
for item in hotkeys:
if item.get_applicable_regex() is None:
self.__enqueue(self.__grabHotkey, item.hotKey, item.modifiers, self.rootWindow)
if self.__needsMutterWorkaround(item):
self.__enqueue(self.__grabRecurse, item, self.rootWindow, False)
self.__enqueue(self.__recurseTree, self.rootWindow, hotkeys)
def __recurseTree(self, parent, hotkeys):
# Grab matching hotkeys in all open child windows
try:
children = parent.query_tree().children
except:
return # window has been destroyed
for window in children:
try:
title = self.get_window_title(window, False)
klass = self.get_window_class(window, False)
if title or klass:
for item in hotkeys:
if item.get_applicable_regex() is not None and item._should_trigger_window_title((title, klass)):
self.__grabHotkey(item.hotKey, item.modifiers, window)
self.__grabRecurse(item, window, False)
self.__enqueue(self.__recurseTree, window, hotkeys)
except:
logger.exception("grab on window failed")
def __ungrabAllHotkeys(self):
"""
Ungrab all hotkeys in preparation for keymap change
"""
c = self.app.configManager
hotkeys = c.hotKeys + c.hotKeyFolders
# Ungrab global hotkeys in root window, recursively
for item in c.globalHotkeys:
if item.enabled:
self.__ungrabHotkey(item.hotKey, item.modifiers, self.rootWindow)
if self.__needsMutterWorkaround(item):
self.__ungrabRecurse(item, self.rootWindow, False)
# Ungrab hotkeys without a filter in root window, recursively
for item in hotkeys:
if item.get_applicable_regex() is None:
self.__ungrabHotkey(item.hotKey, item.modifiers, self.rootWindow)
if self.__needsMutterWorkaround(item):
self.__ungrabRecurse(item, self.rootWindow, False)
self.__recurseTreeUngrab(self.rootWindow, hotkeys)
def __recurseTreeUngrab(self, parent, hotkeys):
# Ungrab matching hotkeys in all open child windows
try:
children = parent.query_tree().children
except:
return # window has been destroyed
for window in children:
try:
title = self.get_window_title(window, False)
klass = self.get_window_class(window, False)
if title or klass:
for item in hotkeys:
if item.get_applicable_regex() is not None and item._should_trigger_window_title((title, klass)):
self.__ungrabHotkey(item.hotKey, item.modifiers, window)
self.__ungrabRecurse(item, window, False)
self.__enqueue(self.__recurseTreeUngrab, window, hotkeys)
except:
logger.exception("ungrab on window failed")
def __grabHotkeysForWindow(self, window):
"""
Grab all hotkeys relevant to the window
Used when a new window is created
"""
c = self.app.configManager
hotkeys = c.hotKeys + c.hotKeyFolders
title = self.get_window_title(window)
klass = self.get_window_class(window)
for item in hotkeys:
if item.get_applicable_regex() is not None and item._should_trigger_window_title((title, klass)):
self.__enqueue(self.__grabHotkey, item.hotKey, item.modifiers, window)
elif self.__needsMutterWorkaround(item):
self.__enqueue(self.__grabHotkey, item.hotKey, item.modifiers, window)
def __grabHotkey(self, key, modifiers, window):
"""
Grab a specific hotkey in the given window
"""
logger.debug("Grabbing hotkey: %r %r", modifiers, key)
try:
keycode = self.__lookupKeyCode(key)
mask = 0
for mod in modifiers:
mask |= self.modMasks[mod]
window.grab_key(keycode, mask, True, X.GrabModeAsync, X.GrabModeAsync)
if Key.NUMLOCK in self.modMasks:
window.grab_key(keycode, mask|self.modMasks[Key.NUMLOCK], True, X.GrabModeAsync, X.GrabModeAsync)
if Key.CAPSLOCK in self.modMasks:
window.grab_key(keycode, mask|self.modMasks[Key.CAPSLOCK], True, X.GrabModeAsync, X.GrabModeAsync)
if Key.CAPSLOCK in self.modMasks and Key.NUMLOCK in self.modMasks:
window.grab_key(keycode, mask|self.modMasks[Key.CAPSLOCK]|self.modMasks[Key.NUMLOCK], True, X.GrabModeAsync, X.GrabModeAsync)
except Exception, e:
logger.warn("Failed to grab hotkey %r %r: %s", modifiers, key, str(e))
def grab_hotkey(self, item):
"""
Grab a hotkey.
If the hotkey has no filter regex, it is global and is grabbed recursively from the root window
If it has a filter regex, iterate over all children of the root and grab from matching windows
"""
if item.get_applicable_regex() is None:
self.__enqueue(self.__grabHotkey, item.hotKey, item.modifiers, self.rootWindow)
if self.__needsMutterWorkaround(item):
self.__enqueue(self.__grabRecurse, item, self.rootWindow, False)
else:
self.__enqueue(self.__grabRecurse, item, self.rootWindow)
def __grabRecurse(self, item, parent, checkWinInfo=True):
try:
children = parent.query_tree().children
except:
return # window has been destroyed
for window in children:
shouldTrigger = False
if checkWinInfo:
title = self.get_window_title(window, False)
klass = self.get_window_class(window, False)
shouldTrigger = item._should_trigger_window_title((title, klass))
if shouldTrigger or not checkWinInfo:
self.__grabHotkey(item.hotKey, item.modifiers, window)
self.__grabRecurse(item, window, False)
else:
self.__grabRecurse(item, window)
def ungrab_hotkey(self, item):
"""
Ungrab a hotkey.
If the hotkey has no filter regex, it is global and is grabbed recursively from the root window
If it has a filter regex, iterate over all children of the root and ungrab from matching windows
"""
import copy
newItem = copy.copy(item)
if item.get_applicable_regex() is None:
self.__enqueue(self.__ungrabHotkey, newItem.hotKey, newItem.modifiers, self.rootWindow)
if self.__needsMutterWorkaround(item):
self.__enqueue(self.__ungrabRecurse, newItem, self.rootWindow, False)
else:
self.__enqueue(self.__ungrabRecurse, newItem, self.rootWindow)
def __ungrabRecurse(self, item, parent, checkWinInfo=True):
try:
children = parent.query_tree().children
except:
return # window has been destroyed
for window in children:
shouldTrigger = False
if checkWinInfo:
title = self.get_window_title(window, False)
klass = self.get_window_class(window, False)
shouldTrigger = item._should_trigger_window_title((title, klass))
if shouldTrigger or not checkWinInfo:
self.__ungrabHotkey(item.hotKey, item.modifiers, window)
self.__ungrabRecurse(item, window, False)
else:
self.__ungrabRecurse(item, window)
def __ungrabHotkey(self, key, modifiers, window):
"""
Ungrab a specific hotkey in the given window
"""
logger.debug("Ungrabbing hotkey: %r %r", modifiers, key)
try:
keycode = self.__lookupKeyCode(key)
mask = 0
for mod in modifiers:
mask |= self.modMasks[mod]
window.ungrab_key(keycode, mask)
if Key.NUMLOCK in self.modMasks:
window.ungrab_key(keycode, mask|self.modMasks[Key.NUMLOCK])
if Key.CAPSLOCK in self.modMasks:
window.ungrab_key(keycode, mask|self.modMasks[Key.CAPSLOCK])
if Key.CAPSLOCK in self.modMasks and Key.NUMLOCK in self.modMasks:
window.ungrab_key(keycode, mask|self.modMasks[Key.CAPSLOCK]|self.modMasks[Key.NUMLOCK])
except Exception, e:
logger.warn("Failed to ungrab hotkey %r %r: %s", modifiers, key, str(e))
def lookup_string(self, keyCode, shifted, numlock, altGrid):
if keyCode == 0:
return "<unknown>"
keySym = self.localDisplay.keycode_to_keysym(keyCode, 0)
if keySym in XK_TO_AK_NUMLOCKED and numlock and not (numlock and shifted):
return XK_TO_AK_NUMLOCKED[keySym]
elif keySym in XK_TO_AK_MAP:
return XK_TO_AK_MAP[keySym]
else:
try:
index = 0
if shifted: index += 1
if altGrid: index += 4
return unichr(self.localDisplay.keycode_to_keysym(keyCode, index))
except ValueError:
return "<code%d>" % keyCode
def send_string_clipboard(self, string, pasteCommand):
self.__enqueue(self.__sendStringClipboard, string, pasteCommand)
def __sendStringClipboard(self, string, pasteCommand):
logger.debug("Sending string: %r", string)
if pasteCommand is None:
if common.USING_QT:
self.sem = threading.Semaphore(0)
self.app.exec_in_main(self.__fillSelection, string)
self.sem.acquire()
else:
self.__fillSelection(string)
focus = self.localDisplay.get_input_focus().focus
xtest.fake_input(focus, X.ButtonPress, X.Button2)
xtest.fake_input(focus, X.ButtonRelease, X.Button2)
else:
if common.USING_QT:
self.sem = threading.Semaphore(0)
self.app.exec_in_main(self.__fillClipboard, string)
self.sem.acquire()
else:
self.__fillClipboard(string)
self.mediator.send_string(pasteCommand)
if common.USING_QT:
self.app.exec_in_main(self.__restoreClipboard)
logger.debug("Send via clipboard done")
def __restoreClipboard(self):
if self.__savedClipboard != "":
if common.USING_QT:
self.clipBoard.setText(self.__savedClipboard, QClipboard.Clipboard)
else:
Gdk.threads_enter()
self.clipBoard.set_text(self.__savedClipboard)
Gdk.threads_leave()
def __fillSelection(self, string):
if common.USING_QT:
self.clipBoard.setText(string, QClipboard.Selection)
self.sem.release()
else:
Gdk.threads_enter()
self.selection.set_text(string.encode("utf-8"))
Gdk.threads_leave()
def __fillClipboard(self, string):
if common.USING_QT:
self.__savedClipboard = self.clipBoard.text()
self.clipBoard.setText(string, QClipboard.Clipboard)
self.sem.release()
else:
Gdk.threads_enter()
text = self.clipBoard.wait_for_text()
self.__savedClipboard = ''
if text is not None: self.__savedClipboard = text
self.clipBoard.set_text(string.encode("utf-8"))
Gdk.threads_leave()
def begin_send(self):
self.__enqueue(self.__grab_keyboard)
def finish_send(self):
self.__enqueue(self.__ungrabKeyboard)
def grab_keyboard(self):
self.__enqueue(self.__grab_keyboard)
def __grab_keyboard(self):
focus = self.localDisplay.get_input_focus().focus
focus.grab_keyboard(True, X.GrabModeAsync, X.GrabModeAsync, X.CurrentTime)
self.localDisplay.flush()
def ungrab_keyboard(self):
self.__enqueue(self.__ungrabKeyboard)
def __ungrabKeyboard(self):
self.localDisplay.ungrab_keyboard(X.CurrentTime)
self.localDisplay.flush()
def __findUsableKeycode(self, codeList):
for code, offset in codeList:
if offset in self.__usableOffsets:
return code, offset
return None, None
def send_string(self, string):
self.__enqueue(self.__sendString, string)
def __sendString(self, string):
"""
Send a string of printable characters.
"""
logger.debug("Sending string: %r", string)
# Determine if workaround is needed
if not ConfigManager.SETTINGS[ENABLE_QT4_WORKAROUND]:
self.__checkWorkaroundNeeded()
# First find out if any chars need remapping
remapNeeded = False
for char in string:
keyCodeList = self.localDisplay.keysym_to_keycodes(ord(char))
usableCode, offset = self.__findUsableKeycode(keyCodeList)
if usableCode is None and char not in self.remappedChars:
remapNeeded = True
break
# Now we know chars need remapping, do it
if remapNeeded:
self.__ignoreRemap = True
self.remappedChars = {}
remapChars = []
for char in string:
keyCodeList = self.localDisplay.keysym_to_keycodes(ord(char))
usableCode, offset = self.__findUsableKeycode(keyCodeList)
if usableCode is None:
remapChars.append(char)
logger.debug("Characters requiring remapping: %r", remapChars)
availCodes = self.__availableKeycodes
logger.debug("Remapping with keycodes in the range: %r", availCodes)
mapping = self.localDisplay.get_keyboard_mapping(8, 200)
firstCode = 8
for i in xrange(len(availCodes) - 1):
code = availCodes[i]
sym1 = 0
sym2 = 0
if len(remapChars) > 0:
char = remapChars.pop(0)
self.remappedChars[char] = (code, 0)
sym1 = ord(char)
if len(remapChars) > 0:
char = remapChars.pop(0)
self.remappedChars[char] = (code, 1)
sym2 = ord(char)
if sym1 != 0:
mapping[code - firstCode][0] = sym1
mapping[code - firstCode][1] = sym2
mapping = [tuple(l) for l in mapping]
self.localDisplay.change_keyboard_mapping(firstCode, mapping)
self.localDisplay.flush()
focus = self.localDisplay.get_input_focus().focus
for char in string:
try:
keyCodeList = self.localDisplay.keysym_to_keycodes(ord(char))
keyCode, offset = self.__findUsableKeycode(keyCodeList)
if keyCode is not None:
if offset == 0:
self.__sendKeyCode(keyCode, theWindow=focus)
if offset == 1:
self.__pressKey(Key.SHIFT)
self.__sendKeyCode(keyCode, self.modMasks[Key.SHIFT], focus)
self.__releaseKey(Key.SHIFT)
if offset == 4:
self.__pressKey(Key.ALT_GR)
self.__sendKeyCode(keyCode, self.modMasks[Key.ALT_GR], focus)
self.__releaseKey(Key.ALT_GR)
if offset == 5:
self.__pressKey(Key.ALT_GR)
self.__pressKey(Key.SHIFT)
self.__sendKeyCode(keyCode, self.modMasks[Key.ALT_GR]|self.modMasks[Key.SHIFT], focus)
self.__releaseKey(Key.SHIFT)
self.__releaseKey(Key.ALT_GR)
elif char in self.remappedChars:
keyCode, offset = self.remappedChars[char]
if offset == 0:
self.__sendKeyCode(keyCode, theWindow=focus)
if offset == 1:
self.__pressKey(Key.SHIFT)
self.__sendKeyCode(keyCode, self.modMasks[Key.SHIFT], focus)
self.__releaseKey(Key.SHIFT)
else:
logger.warn("Unable to send character %r", char)
except Exception, e:
logger.exception("Error sending char %r: %s", char, str(e))
self.__ignoreRemap = False
def send_key(self, keyName):
"""
Send a specific non-printing key, eg Up, Left, etc
"""
self.__enqueue(self.__sendKey, keyName)
def __sendKey(self, keyName):
logger.debug("Send special key: [%r]", keyName)
self.__sendKeyCode(self.__lookupKeyCode(keyName))
def fake_keypress(self, keyName):
self.__enqueue(self.__fakeKeypress, keyName)
def __fakeKeypress(self, keyName):
keyCode = self.__lookupKeyCode(keyName)
xtest.fake_input(self.rootWindow, X.KeyPress, keyCode)
xtest.fake_input(self.rootWindow, X.KeyRelease, keyCode)
def fake_keydown(self, keyName):
self.__enqueue(self.__fakeKeydown, keyName)
def __fakeKeydown(self, keyName):
keyCode = self.__lookupKeyCode(keyName)
xtest.fake_input(self.rootWindow, X.KeyPress, keyCode)
def fake_keyup(self, keyName):
self.__enqueue(self.__fakeKeyup, keyName)
def __fakeKeyup(self, keyName):
keyCode = self.__lookupKeyCode(keyName)
xtest.fake_input(self.rootWindow, X.KeyRelease, keyCode)
def send_modified_key(self, keyName, modifiers):
"""
Send a modified key (e.g. when emulating a hotkey)
"""
self.__enqueue(self.__sendModifiedKey, keyName, modifiers)
def __sendModifiedKey(self, keyName, modifiers):
logger.debug("Send modified key: modifiers: %s key: %s", modifiers, keyName)
try:
mask = 0
for mod in modifiers:
mask |= self.modMasks[mod]
keyCode = self.__lookupKeyCode(keyName)
for mod in modifiers: self.__pressKey(mod)
self.__sendKeyCode(keyCode, mask)
for mod in modifiers: self.__releaseKey(mod)
except Exception, e:
logger.warn("Error sending modified key %r %r: %s", modifiers, keyName, str(e))
def send_mouse_click(self, xCoord, yCoord, button, relative):
self.__enqueue(self.__sendMouseClick, xCoord, yCoord, button, relative)
def __sendMouseClick(self, xCoord, yCoord, button, relative):
# Get current pointer position so we can return it there
pos = self.rootWindow.query_pointer()
if relative:
focus = self.localDisplay.get_input_focus().focus
focus.warp_pointer(xCoord, yCoord)
xtest.fake_input(focus, X.ButtonPress, button, x=xCoord, y=yCoord)
xtest.fake_input(focus, X.ButtonRelease, button, x=xCoord, y=yCoord)
else:
self.rootWindow.warp_pointer(xCoord, yCoord)
xtest.fake_input(self.rootWindow, X.ButtonPress, button, x=xCoord, y=yCoord)
xtest.fake_input(self.rootWindow, X.ButtonRelease, button, x=xCoord, y=yCoord)
self.rootWindow.warp_pointer(pos.root_x, pos.root_y)
self.__flush()
def send_mouse_click_relative(self, xoff, yoff, button):
self.__enqueue(self.__sendMouseClickRelative, xoff, yoff, button)
def __sendMouseClickRelative(self, xoff, yoff, button):
# Get current pointer position
pos = self.rootWindow.query_pointer()
xCoord = pos.root_x + xoff
yCoord = pos.root_y + yoff
self.rootWindow.warp_pointer(xCoord, yCoord)
xtest.fake_input(self.rootWindow, X.ButtonPress, button, x=xCoord, y=yCoord)
xtest.fake_input(self.rootWindow, X.ButtonRelease, button, x=xCoord, y=yCoord)
self.rootWindow.warp_pointer(pos.root_x, pos.root_y)
self.__flush()
def flush(self):
self.__enqueue(self.__flush)
def __flush(self):
self.localDisplay.flush()
self.lastChars = []
def press_key(self, keyName):
self.__enqueue(self.__pressKey, keyName)
def __pressKey(self, keyName):
self.__sendKeyPressEvent(self.__lookupKeyCode(keyName), 0)
def release_key(self, keyName):
self.__enqueue(self.__releaseKey, keyName)
def __releaseKey(self, keyName):
self.__sendKeyReleaseEvent(self.__lookupKeyCode(keyName), 0)
def __flushEvents(self):
while True:
try:
readable, w, e = select.select([self.localDisplay], [], [], 1)
time.sleep(1)
if self.localDisplay in readable:
createdWindows = []
destroyedWindows = []
for x in xrange(self.localDisplay.pending_events()):
event = self.localDisplay.next_event()
if event.type == X.CreateNotify:
createdWindows.append(event.window)
if event.type == X.DestroyNotify:
destroyedWindows.append(event.window)
for window in createdWindows:
if window not in destroyedWindows:
self.__enqueue(self.__grabHotkeysForWindow, window)
if self.shutdown:
break
except:
pass
def handle_keypress(self, keyCode):
self.__enqueue(self.__handleKeyPress, keyCode)
def __handleKeyPress(self, keyCode):
focus = self.localDisplay.get_input_focus().focus
modifier = self.__decodeModifier(keyCode)
if modifier is not None:
self.mediator.handle_modifier_down(modifier)
else:
self.mediator.handle_keypress(keyCode, self.get_window_title(focus), self.get_window_class(focus))
def handle_keyrelease(self, keyCode):
self.__enqueue(self.__handleKeyrelease, keyCode)
def __handleKeyrelease(self, keyCode):
modifier = self.__decodeModifier(keyCode)
if modifier is not None:
self.mediator.handle_modifier_up(modifier)
def handle_mouseclick(self, button, x, y):
self.__enqueue(self.__handleMouseclick, button, x, y)
def __handleMouseclick(self, button, x, y):
title = self.get_window_title()
klass = self.get_window_class()
info = (title, klass)
if x is None and y is None:
ret = self.localDisplay.get_input_focus().focus.query_pointer()
self.mediator.handle_mouse_click(ret.root_x, ret.root_y, ret.win_x, ret.win_y, button, info)
else:
focus = self.localDisplay.get_input_focus().focus
try:
rel = focus.translate_coords(self.rootWindow, x, y)
self.mediator.handle_mouse_click(x, y, rel.x, rel.y, button, info)
except:
self.mediator.handle_mouse_click(x, y, 0, 0, button, info)
def __decodeModifier(self, keyCode):
"""
Checks if the given keyCode is a modifier key. If it is, returns the modifier name
constant as defined in the iomediator module. If not, returns C{None}
"""
keyName = self.lookup_string(keyCode, False, False, False)
if keyName in MODIFIERS:
return keyName
return None
def __sendKeyCode(self, keyCode, modifiers=0, theWindow=None):
if ConfigManager.SETTINGS[ENABLE_QT4_WORKAROUND] or self.__enableQT4Workaround:
self.__doQT4Workaround(keyCode)
self.__sendKeyPressEvent(keyCode, modifiers, theWindow)
self.__sendKeyReleaseEvent(keyCode, modifiers, theWindow)
def __checkWorkaroundNeeded(self):
focus = self.localDisplay.get_input_focus().focus
windowName = self.get_window_title(focus)
windowClass = self.get_window_class(focus)
w = self.app.configManager.workAroundApps
if w.match(windowName) or w.match(windowClass):
self.__enableQT4Workaround = True
else:
self.__enableQT4Workaround = False
def __doQT4Workaround(self, keyCode):
if len(self.lastChars) > 0:
if keyCode in self.lastChars:
self.localDisplay.flush()
time.sleep(0.0125)
self.lastChars.append(keyCode)
if len(self.lastChars) > 10:
self.lastChars.pop(0)
def __sendKeyPressEvent(self, keyCode, modifiers, theWindow=None):
if theWindow is None:
focus = self.localDisplay.get_input_focus().focus
else:
focus = theWindow
keyEvent = event.KeyPress(
detail=keyCode,
time=X.CurrentTime,
root=self.rootWindow,
window=focus,
child=X.NONE,
root_x=1,
root_y=1,
event_x=1,
event_y=1,
state=modifiers,
same_screen=1
)
focus.send_event(keyEvent)
def __sendKeyReleaseEvent(self, keyCode, modifiers, theWindow=None):
if theWindow is None:
focus = self.localDisplay.get_input_focus().focus
else:
focus = theWindow
keyEvent = event.KeyRelease(
detail=keyCode,
time=X.CurrentTime,
root=self.rootWindow,
window=focus,
child=X.NONE,
root_x=1,
root_y=1,
event_x=1,
event_y=1,
state=modifiers,
same_screen=1
)
focus.send_event(keyEvent)
def __lookupKeyCode(self, char):
if char in AK_TO_XK_MAP:
return self.localDisplay.keysym_to_keycode(AK_TO_XK_MAP[char])
elif char.startswith("<code"):
return int(char[5:-1])
else:
try:
return self.localDisplay.keysym_to_keycode(ord(char))
except Exception, e:
logger.error("Unknown key name: %s", char)
raise
def get_window_title(self, window=None, traverse=True):
try:
if window is None:
windowvar = self.localDisplay.get_input_focus().focus
else:
windowvar = window
return self.__getWinTitle(windowvar, traverse)
except:
return ""
def __getWinTitle(self, windowvar, traverse):
atom = windowvar.get_property(self.__VisibleNameAtom, 0, 0, 255)
if atom is None:
atom = windowvar.get_property(self.__NameAtom, 0, 0, 255)
if atom:
return atom.value.decode("utf-8")
elif traverse:
return self.__getWinTitle(windowvar.query_tree().parent, True)
else:
return ""
def get_window_class(self, window=None, traverse=True):
try:
if window is None:
windowvar = self.localDisplay.get_input_focus().focus
else:
windowvar = window
return self.__getWinClass(windowvar, traverse)
except:
return ""
def __getWinClass(self, windowvar, traverse):
wmclass = windowvar.get_wm_class()
if (wmclass == None or wmclass == ""):
if traverse:
return self.__getWinClass(windowvar.query_tree().parent, True)
else:
return ""
return wmclass[0] + '.' + wmclass[1]
def cancel(self):
self.queue.put_nowait((None, None))
self.shutdown = True
self.listenerThread.join()
self.eventThread.join()
self.localDisplay.flush()
self.localDisplay.close()
self.join()
class XRecordInterface(XInterfaceBase):
def initialise(self):
self.recordDisplay = display.Display()
self.__locksChecked = False
# Check for record extension
if not self.recordDisplay.has_extension("RECORD"):
raise Exception("Your X-Server does not have the RECORD extension available/enabled.")
def run(self):
# Create a recording context; we only want key and mouse events
self.ctx = self.recordDisplay.record_create_context(
0,
[record.AllClients],
[{
'core_requests': (0, 0),
'core_replies': (0, 0),
'ext_requests': (0, 0, 0, 0),
'ext_replies': (0, 0, 0, 0),
'delivered_events': (0, 0),
'device_events': (X.KeyPress, X.ButtonPress), #X.KeyRelease,
'errors': (0, 0),
'client_started': False,
'client_died': False,
}])
# Enable the context; this only returns after a call to record_disable_context,
# while calling the callback function in the meantime
logger.info("XRecord interface thread starting")
self.recordDisplay.record_enable_context(self.ctx, self.__processEvent)
# Finally free the context
self.recordDisplay.record_free_context(self.ctx)
self.recordDisplay.close()
def cancel(self):
self.localDisplay.record_disable_context(self.ctx)
XInterfaceBase.cancel(self)
def __processEvent(self, reply):
if reply.category != record.FromServer:
return
if reply.client_swapped:
return
if not len(reply.data) or ord(reply.data[0]) < 2:
# not an event
return
data = reply.data
while len(data):
event, data = rq.EventField(None).parse_binary_value(data, self.recordDisplay.display, None, None)
if event.type == X.KeyPress:
self.handle_keypress(event.detail)
elif event.type == X.KeyRelease:
self.handle_keyrelease(event.detail)
elif event.type == X.ButtonPress:
self.handle_mouseclick(event.detail, event.root_x, event.root_y)
class AtSpiInterface(XInterfaceBase):
def initialise(self):
self.registry = pyatspi.Registry
def start(self):
logger.info("AT-SPI interface thread starting")
self.registry.registerKeystrokeListener(self.__processKeyEvent, mask=pyatspi.allModifiers())
self.registry.registerEventListener(self.__processMouseEvent, 'mouse:button')
def cancel(self):
self.registry.deregisterKeystrokeListener(self.__processKeyEvent, mask=pyatspi.allModifiers())
self.registry.deregisterEventListener(self.__processMouseEvent, 'mouse:button')
self.registry.stop()
XInterfaceBase.cancel(self)
def __processKeyEvent(self, event):
if event.type == pyatspi.KEY_PRESSED_EVENT:
self.handle_keypress(event.hw_code)
else:
self.handle_keyrelease(event.hw_code)
def __processMouseEvent(self, event):
if event.type[-1] == 'p':
button = int(event.type[-2])
self.handle_mouseclick(button, event.detail1, event.detail2)
def __pumpEvents(self):
pyatspi.Registry.pumpQueuedEvents()
return True
from iomediator import Key, MODIFIERS
from configmanager import *
XK.load_keysym_group('xkb')
XK_TO_AK_MAP = {
XK.XK_Shift_L : Key.SHIFT,
XK.XK_Shift_R : Key.SHIFT,
XK.XK_Caps_Lock : Key.CAPSLOCK,
XK.XK_Control_L : Key.CONTROL,
XK.XK_Control_R : Key.CONTROL,
XK.XK_Alt_L : Key.ALT,
XK.XK_Alt_R : Key.ALT,
XK.XK_ISO_Level3_Shift : Key.ALT_GR,
XK.XK_Super_L : Key.SUPER,
XK.XK_Super_R : Key.SUPER,
XK.XK_Hyper_L : Key.HYPER,
XK.XK_Hyper_R : Key.HYPER,
XK.XK_Meta_L : Key.META,
XK.XK_Meta_R : Key.META,
XK.XK_Num_Lock : Key.NUMLOCK,
#SPACE : Key.SPACE,
XK.XK_Tab : Key.TAB,
XK.XK_Left : Key.LEFT,
XK.XK_Right : Key.RIGHT,
XK.XK_Up : Key.UP,
XK.XK_Down : Key.DOWN,
XK.XK_Return : Key.ENTER,
XK.XK_BackSpace : Key.BACKSPACE,
XK.XK_Scroll_Lock : Key.SCROLL_LOCK,
XK.XK_Print : Key.PRINT_SCREEN,
XK.XK_Pause : Key.PAUSE,
XK.XK_Menu : Key.MENU,
XK.XK_F1 : Key.F1,
XK.XK_F2 : Key.F2,
XK.XK_F3 : Key.F3,
XK.XK_F4 : Key.F4,
XK.XK_F5 : Key.F5,
XK.XK_F6 : Key.F6,
XK.XK_F7 : Key.F7,
XK.XK_F8 : Key.F8,
XK.XK_F9 : Key.F9,
XK.XK_F10 : Key.F10,
XK.XK_F11 : Key.F11,
XK.XK_F12 : Key.F12,
XK.XK_Escape : Key.ESCAPE,
XK.XK_Insert : Key.INSERT,
XK.XK_Delete : Key.DELETE,
XK.XK_Home : Key.HOME,
XK.XK_End : Key.END,
XK.XK_Page_Up : Key.PAGE_UP,
XK.XK_Page_Down : Key.PAGE_DOWN,
XK.XK_KP_Insert : Key.NP_INSERT,
XK.XK_KP_Delete : Key.NP_DELETE,
XK.XK_KP_End : Key.NP_END,
XK.XK_KP_Down : Key.NP_DOWN,
XK.XK_KP_Page_Down : Key.NP_PAGE_DOWN,
XK.XK_KP_Left : Key.NP_LEFT,
XK.XK_KP_Begin : Key.NP_5,
XK.XK_KP_Right : Key.NP_RIGHT,
XK.XK_KP_Home : Key.NP_HOME,
XK.XK_KP_Up: Key.NP_UP,
XK.XK_KP_Page_Up : Key.NP_PAGE_UP,
XK.XK_KP_Divide : Key.NP_DIVIDE,
XK.XK_KP_Multiply : Key.NP_MULTIPLY,
XK.XK_KP_Add : Key.NP_ADD,
XK.XK_KP_Subtract : Key.NP_SUBTRACT,
XK.XK_KP_Enter : Key.ENTER,
XK.XK_space : ' '
}
AK_TO_XK_MAP = dict((v,k) for k, v in XK_TO_AK_MAP.iteritems())
XK_TO_AK_NUMLOCKED = {
XK.XK_KP_Insert : "0",
XK.XK_KP_Delete : ".",
XK.XK_KP_End : "1",
XK.XK_KP_Down : "2",
XK.XK_KP_Page_Down : "3",
XK.XK_KP_Left : "4",
XK.XK_KP_Begin : "5",
XK.XK_KP_Right : "6",
XK.XK_KP_Home : "7",
XK.XK_KP_Up: "8",
XK.XK_KP_Page_Up : "9",
XK.XK_KP_Divide : "/",
XK.XK_KP_Multiply : "*",
XK.XK_KP_Add : "+",
XK.XK_KP_Subtract : "-",
XK.XK_KP_Enter : Key.ENTER
}
class MockMediator:
"""
Mock IoMediator for testing purposes.
"""
def handle_modifier_down(self, modifier):
pass
def handle_modifier_up(self, modifier):
pass
def handle_keypress(self, keyCode, windowName):
pass
def handle_mouse_click(self):
pass
if __name__ == "__main__":
import time
x = XLibInterface(MockMediator(), True)
x.start()
x.keymap_test()
time.sleep(10.0)
#time.sleep(4.0)
#x.send_unicode_key([0, 3, 9, 4])
x.cancel()
print "Test completed. Thank you for your assistance in improving AutoKey!"
| gpl-3.0 |
klausman/scion | python/lib/sibra/ext/sof.py | 3 | 3855 | # Copyright 2016 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`sof` --- SIBRA Opaque Field
=================================
"""
# Stdlib
import struct
# SCION
from lib.crypto.symcrypto import mac
from lib.defines import (
SIBRA_STEADY_ID_LEN,
SIBRA_EPHEMERAL_ID_LEN,
)
from lib.sibra.ext.info import ResvInfoBase
from lib.packet.packet_base import Serializable
from lib.util import Raw, hex_str
class SibraOpaqueField(Serializable):
"""
SIBRA Opqaue Field. This is used for routing SIBRA packets. It describes the
ingress/egress interfaces, and has a MAC to authenticate that it was issued
for this reservation.
0B 1 2 3 4 5 6 7
+--------+--------+--------+--------+--------+--------+--------+--------+
| Ingress IF | Egress IF | MAC(IFs, res info, pathID, prev) |
+--------+--------+--------+--------+--------+--------+--------+--------+
"""
NAME = "SibraOpaqueField"
MAC_LEN = 4
IF_LEN = 2
LEN = IF_LEN * 2 + MAC_LEN
# Steady + ephemeral path:
MAX_PATH_IDS_LEN = SIBRA_EPHEMERAL_ID_LEN + 3 * SIBRA_STEADY_ID_LEN
MAC_DATA_LEN = IF_LEN * 2 + ResvInfoBase.LEN + MAX_PATH_IDS_LEN + LEN
MAC_BLOCK_SIZE = 16
MAC_BLOCK_PADDING = MAC_BLOCK_SIZE - (MAC_DATA_LEN % MAC_BLOCK_SIZE)
def __init__(self, raw=None): # pragma: no cover
self.ingress = None
self.egress = None
self.mac = bytes(self.MAC_LEN)
super().__init__(raw)
def _parse(self, raw):
data = Raw(raw, self.NAME, self.LEN)
self.ingress, self.egress = struct.unpack(
"!HH", data.pop(self.IF_LEN * 2))
self.mac = data.pop(self.MAC_LEN)
@classmethod
def from_values(cls, ingress, egress): # pragma: no cover
inst = cls()
inst.ingress = ingress
inst.egress = egress
return inst
def pack(self):
raw = []
raw.append(struct.pack("!HH", self.ingress, self.egress))
raw.append(self.mac)
return b"".join(raw)
def calc_mac(self, info, key, path_ids, prev_raw=None):
"""
Calculate the MAC based on the reservation info, the relevant path IDs,
and the previous SOF field if any. The algorithm is a CBC MAC, with
constant input size.
"""
raw = []
raw.append(struct.pack("!HH", self.ingress, self.egress))
raw.append(info.pack(mac=True))
ids_len = 0
for id_ in path_ids:
ids_len += len(id_)
raw.append(id_)
# Pad path IDs with 0's to give constant length
raw.append(bytes(self.MAX_PATH_IDS_LEN - ids_len))
raw.append(prev_raw or bytes(self.LEN))
# Pad to multiple of block size
raw.append(bytes(self.MAC_BLOCK_PADDING))
to_mac = b"".join(raw)
assert len(to_mac) == self.MAC_DATA_LEN + self.MAC_BLOCK_PADDING
assert len(to_mac) % self.MAC_BLOCK_SIZE == 0
return mac(key, to_mac)[:self.MAC_LEN]
def __len__(self): # pragma: no cover
return self.LEN
def __str__(self):
tmp = ["%s(%dB):" % (self.NAME, len(self))]
tmp.append("Ingress: %s" % self.ingress)
tmp.append("Egress: %s" % self.egress)
tmp.append("Mac: %s" % hex_str(self.mac))
return " ".join(tmp)
| apache-2.0 |
jwhitlock/web-platform-compat | mdn/data.py | 2 | 7543 | # coding: utf-8
"""Data lookup operations for MDN parsing."""
from __future__ import unicode_literals
from collections import namedtuple
from webplatformcompat.models import (
Browser, Feature, Section, Specification, Support, Version)
from .utils import is_new_id, normalize_name, slugify
class Data(object):
"""
Provide data operations for MDN parsing.
Parsing an MDN page requires loading existing data for many purposes.
This class loads the data and, if it can, caches the data.
"""
def __init__(self):
self.specifications = {}
self.browser_data = None
self.subfeature_data = {}
BrowserParams = namedtuple(
'BrowserParams', ['browser', 'browser_id', 'name', 'slug'])
browser_name_fixes = {
'Android': 'Android Browser',
'BlackBerry': 'BlackBerry Browser',
'Chrome': 'Chrome for Desktop',
'Firefox (Gecko)': 'Firefox for Desktop',
'Firefox Mobile (Gecko)': 'Firefox for Android',
'Firefox Mobile': 'Firefox for Android',
'Firefox OS (Gecko)': 'Firefox OS',
'Firefox': 'Firefox for Desktop',
'IE Mobile': 'Internet Explorer Mobile',
'IE Phone': 'Internet Explorer Mobile',
'IE': 'Internet Explorer for Desktop',
'iOS Safari': 'Safari for iOS',
'Internet Explorer': 'Internet Explorer for Desktop',
'Opera': 'Opera for Desktop',
'Opera (Presto)': 'Opera for Desktop',
'Safari (WebKit)': 'Safari for Desktop',
'Safari Mobile': 'Safari for iOS',
'Safari': 'Safari for Desktop',
'Windows Phone': 'Internet Explorer Mobile',
}
def lookup_browser_params(self, name, locale='en'):
"""Get or create the browser ID, name, and slug given a raw name.
Return is a named tuple:
* browser - A Browser if found, None if no existing browser
* brower_id - The browser ID, prefixed with an underscore if new
* name - The normalized name
* slug - A unique slug for this browser
"""
# Load existing browser data
if self.browser_data is None:
self.browser_data = {}
for browser in Browser.objects.all():
key = browser.name[locale]
self.browser_data[key] = self.BrowserParams(
browser, browser.pk, key, browser.slug)
# Expand to full name, handle common alternate names
full_name = self.browser_name_fixes.get(name, name)
# Select the Browser ID and slug
if full_name not in self.browser_data:
browser_id = '_' + full_name
# TODO: unique slugify instead of browser_id
self.browser_data[full_name] = self.BrowserParams(
None, browser_id, full_name, browser_id)
return self.browser_data[full_name]
FeatureParams = namedtuple(
'FeatureParams', ['feature', 'feature_id', 'slug'])
def lookup_feature_params(self, parent_feature, name):
"""Get or create the feature ID and slug given a name.
Return is a named tuple:
* feature - A Feature if found, None if no existing feature
* feature_id - The feature ID, prefixed with an underscore if new
* slug - A unique slug for this feature
"""
nname = normalize_name(name)
# Treat "Basic Support" rows as parent feature
if nname.lower() == 'basic support':
return self.FeatureParams(
parent_feature, parent_feature.id, parent_feature.slug)
# Initialize subfeature data as needed
if parent_feature.id not in self.subfeature_data:
subfeatures = {}
for feature in Feature.objects.filter(parent=parent_feature):
if 'zxx' in feature.name:
fname = feature.name['zxx']
else:
fname = feature.name['en']
fname = normalize_name(fname)
subfeatures[fname] = self.FeatureParams(
feature, feature.id, feature.slug)
self.subfeature_data[parent_feature.id] = subfeatures
# Select the Feature ID and slug
subfeatures = self.subfeature_data[parent_feature.id]
if nname not in subfeatures:
feature_id = '_' + nname
attempt = 0
feature_slug = None
while not feature_slug:
base_slug = parent_feature.slug + '_' + nname
feature_slug = slugify(base_slug, suffix=attempt)
if Feature.objects.filter(slug=feature_slug).exists():
attempt += 1
feature_slug = ''
subfeatures[nname] = self.FeatureParams(
None, feature_id, feature_slug)
return self.subfeature_data[parent_feature.id][nname]
def lookup_section_id(self, spec_id, subpath, locale='en'):
"""Retrieve a section ID given a Specification ID and subpath."""
for section in Section.objects.filter(specification_id=spec_id):
if section.subpath.get(locale) == subpath:
return section.id
return None
def lookup_specification(self, mdn_key):
"""Retrieve a Specification by key."""
if mdn_key not in self.specifications:
try:
spec = Specification.objects.get(mdn_key=mdn_key)
except Specification.DoesNotExist:
spec = None
self.specifications[mdn_key] = spec
return self.specifications[mdn_key]
def lookup_support_id(self, version_id, feature_id):
"""Lookup or create a support ID for a version and feature."""
support = None
real_version = not is_new_id(version_id)
real_feature = not is_new_id(feature_id)
if real_version and real_feature:
# Might be known version
try:
support = Support.objects.get(
version=version_id, feature=feature_id)
except Support.DoesNotExist:
pass
if support:
# Known support
support_id = support.id
else:
# New support
support_id = '_%s-%s' % (feature_id, version_id)
return support_id
VersionParams = namedtuple('VersionParams', ['version', 'version_id'])
def lookup_version_params(
self, browser_id, browser_name, version_name):
"""Get or create the version ID and normalized name by version string.
Keyword Arguments:
* browser_id - The ID of an existing browser, or a underscore-prefixed
string for a new browser.
* browser_name - The name of the browser
* version_name - The version string, such as 1.0, 'current', or
'nightly'
Return is a named tuple:
* version - A Version if found, None if no existing version
* version_id - The version ID, prefixed with an underscore if new
"""
version = None
if not is_new_id(browser_id):
# Might be known version
try:
version = Version.objects.get(
browser=browser_id, version=version_name)
except Version.DoesNotExist:
pass
if version:
# Known version
version_id = version.id
else:
# New version
version_id = '_%s-%s' % (browser_name, version_name)
return self.VersionParams(version, version_id)
| mpl-2.0 |
GinnyN/Team-Fortress-RPG-Generators | tests/regressiontests/views/tests/defaults.py | 44 | 3687 | from __future__ import absolute_import
from django.test import TestCase
from django.contrib.contenttypes.models import ContentType
from ..models import Author, Article, UrlArticle
class DefaultsTests(TestCase):
"""Test django views in django/views/defaults.py"""
fixtures = ['testdata.json']
non_existing_urls = ['/views/non_existing_url/', # this is in urls.py
'/views/other_non_existing_url/'] # this NOT in urls.py
def test_shortcut_with_absolute_url(self):
"Can view a shortcut for an Author object that has a get_absolute_url method"
for obj in Author.objects.all():
short_url = '/views/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, obj.pk)
response = self.client.get(short_url)
self.assertRedirects(response, 'http://testserver%s' % obj.get_absolute_url(),
status_code=302, target_status_code=404)
def test_shortcut_no_absolute_url(self):
"Shortcuts for an object that has no get_absolute_url method raises 404"
for obj in Article.objects.all():
short_url = '/views/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Article).id, obj.pk)
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_wrong_type_pk(self):
short_url = '/views/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, 'nobody/expects')
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_shortcut_bad_pk(self):
short_url = '/views/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, '42424242')
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_nonint_content_type(self):
an_author = Author.objects.all()[0]
short_url = '/views/shortcut/%s/%s/' % ('spam', an_author.pk)
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_bad_content_type(self):
an_author = Author.objects.all()[0]
short_url = '/views/shortcut/%s/%s/' % (42424242, an_author.pk)
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_page_not_found(self):
"A 404 status is returned by the page_not_found view"
for url in self.non_existing_urls:
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_csrf_token_in_404(self):
"""
The 404 page should have the csrf_token available in the context
"""
# See ticket #14565
for url in self.non_existing_urls:
response = self.client.get(url)
csrf_token = response.context['csrf_token']
self.assertNotEqual(str(csrf_token), 'NOTPROVIDED')
self.assertNotEqual(str(csrf_token), '')
def test_server_error(self):
"The server_error view raises a 500 status"
response = self.client.get('/views/server_error/')
self.assertEqual(response.status_code, 500)
def test_get_absolute_url_attributes(self):
"A model can set attributes on the get_absolute_url method"
self.assertTrue(getattr(UrlArticle.get_absolute_url, 'purge', False),
'The attributes of the original get_absolute_url must be added.')
article = UrlArticle.objects.get(pk=1)
self.assertTrue(getattr(article.get_absolute_url, 'purge', False),
'The attributes of the original get_absolute_url must be added.')
| bsd-3-clause |
andrewgee/gpxviewer | gpxviewer/ui.py | 1 | 19104 | #
# ui.py - GUI for GPX Viewer
#
# Copyright (C) 2009 Andrew Gee
#
# GPX Viewer is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GPX Viewer is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# If you're having any problems, don't hesitate to contact: andrew@andrewgee.org
#
import os
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('OsmGpsMap', '1.0')
from gi.repository import GLib
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GObject
from gi.repository import OsmGpsMap
from . import stats
from gpxpy import parse
from gpxpy.gpx import GPXException
from colorsys import hsv_to_rgb
import locale
import gettext
locale.setlocale(locale.LC_ALL, '')
gettext.bindtextdomain('gpxviewer')
gettext.textdomain('gpxviewer')
_ = gettext.gettext
# Function used to defer translation until later, while still being recognised
# by build_i18n
def N_(message):
return message
def show_url(url):
Gtk.show_uri(None, url, Gdk.CURRENT_TIME)
ALPHA_UNSELECTED = 0.5
ALPHA_SELECTED = 0.8
LAZY_LOAD_AFTER_N_FILES = 3
class MainWindow:
NAME_IDX = 0
GPX_IDX = 1
OSM_IDX = 2
def get_other_tracks(self, trace):
tracks = []
for row in self.model:
for track in row.iterchildren():
if trace != track[self.GPX_IDX]:
tracks += track[self.OSM_IDX]
return tracks
def add_track(self, parent, track, color):
gpstracks = []
for segment in track.segments:
gpstrack = OsmGpsMap.MapTrack()
gpstrack.set_color(color)
gpstrack.props.alpha = 0.8
for point in segment.points:
gpstrack.add_point(OsmGpsMap.MapPoint.new_degrees(point.latitude, point.longitude))
gpstracks.append(gpstrack)
self.map.track_add(gpstrack)
self.model.append(parent, [track.name, track, gpstracks])
def get_all_traces(self):
return [t[self.GPX_IDX] for f in self.model for t in f.iterchildren()]
def __init__(self, ui_dir, files):
self.recent = Gtk.RecentManager.get_default()
self.wTree = Gtk.Builder()
self.wTree.set_translation_domain('gpxviewer')
self.wTree.add_from_file("%sgpxviewer.ui" % ui_dir)
# track_name, gpx, [OsmGpsMapTrack]
self.model = Gtk.TreeStore(str, object, object)
signals = {
"on_windowMain_destroy": self.quit,
"on_menuitemQuit_activate": self.quit,
"on_menuitemOpen_activate": self.open_gpx,
"on_menuitemZoomIn_activate": self.zoom_map_in,
"on_buttonZoomIn_clicked": self.zoom_map_in,
"on_menuitemZoomOut_activate": self.zoom_map_out,
"on_buttonZoomOut_clicked": self.zoom_map_out,
"on_menuitemAbout_activate": self.open_about_dialog,
"on_checkmenuitemShowSidebar_toggled": self.show_sidebar_toggled,
"on_menuitemShowStatistics_activate": self.show_statistics,
"on_buttonTrackAdd_clicked": self.button_track_add_clicked,
"on_buttonTrackDelete_clicked": self.button_track_delete_clicked,
"on_buttonTrackProperties_clicked": self.button_track_properties_clicked,
"on_buttonTrackInspect_clicked": self.button_track_inspect_clicked,
}
self.mainWindow = self.wTree.get_object("windowMain")
self.mainWindow.set_icon_from_file("%sgpxviewer.svg" % ui_dir)
self.mainWindow.set_title(_("GPX Viewer"))
i = self.wTree.get_object("checkmenuitemCenter")
i.connect("toggled", self.auto_center_toggled)
self.autoCenter = i.get_active()
self.ui_dir = ui_dir
self.map = OsmGpsMap.Map(
tile_cache=os.path.join(
GLib.get_user_cache_dir(),
'gpxviewer', 'tiles'))
self.map.layer_add(
OsmGpsMap.MapOsd(
show_dpad=False,
show_zoom=False,
show_scale=True,
show_coordinates=False))
self.wTree.get_object("hbox_map").pack_start(self.map, True, True, 0)
sb = self.wTree.get_object("statusbar1")
# move zoom control into apple like slider
self.zoomSlider = MapZoomSlider(self.map)
self.zoomSlider.show_all()
a = Gtk.Alignment.new(1.0, 1.0, 0.0, 0.0)
a.set_padding(0, 0, 0, 4)
a.add(self.zoomSlider)
a.show_all()
overlay = self.wTree.get_object("overlay_map")
overlay.add_overlay(a)
# pass-through clicks to the map widget
overlay.set_overlay_pass_through(a, True)
# animate a spinner when downloading tiles
try:
self.spinner = Gtk.Spinner()
self.spinner.props.has_tooltip = True
self.spinner.connect("query-tooltip", self.on_spinner_tooltip)
self.map.connect("notify::tiles-queued", self.update_tiles_queued)
self.spinner.set_size_request(*Gtk.icon_size_lookup(Gtk.IconSize.MENU)[:2])
sb.pack_end(self.spinner, False, False, 0)
except AttributeError:
self.spinner = None
self.wTree.connect_signals(signals)
# add open with external tool submenu items and actions
programs = {
'josm': N_('JOSM Editor'),
'merkaartor': N_('Merkaartor'),
}
submenu_open_with = Gtk.Menu()
for prog, progname in programs.items():
submenuitem_open_with = Gtk.MenuItem(_(progname))
submenu_open_with.append(submenuitem_open_with)
submenuitem_open_with.connect("activate", self.open_with_external_app, prog)
submenuitem_open_with.show()
self.wTree.get_object('menuitemOpenBy').set_submenu(submenu_open_with)
self.wTree.get_object("menuitemHelp").connect("activate",
lambda *a: show_url("https://answers.launchpad.net/gpxviewer"))
self.wTree.get_object("menuitemTranslate").connect("activate", lambda *a: show_url(
"https://translations.launchpad.net/gpxviewer"))
self.wTree.get_object("menuitemReportProblem").connect("activate", lambda *a: show_url(
"https://bugs.launchpad.net/gpxviewer/+filebug"))
self.tv = Gtk.TreeView(self.model)
self.tv.get_selection().connect("changed", self.on_selection_changed)
self.tv.append_column(
Gtk.TreeViewColumn(
"Track Name",
Gtk.CellRendererText(),
text=self.NAME_IDX
)
)
self.wTree.get_object("scrolledwindow1").add(self.tv)
self.sb = self.wTree.get_object("vbox_sidebar")
self.hide_spinner()
self.hide_track_selector()
self.lazyLoadFiles(files)
self.map.show()
self.mainWindow.show()
def lazyLoadFiles(self, files):
def do_lazy_load(_files):
try:
self.load_gpx(_files.pop())
self.loadingFiles -= 1
return True
except IndexError:
self.loadingFiles = 0
return False
self.loadingFiles = 0
if not files:
return
# if less than LAZY_LOAD_AFTER_N_FILES load directly, else
# load on idle
if len(files) < LAZY_LOAD_AFTER_N_FILES:
i = 0
for filename in files:
self.loadingFiles = i
self.load_gpx(filename)
if i < LAZY_LOAD_AFTER_N_FILES:
i += 1
else:
self.loadingFiles = 0
break
else:
self.loadingFiles = len(files)
GObject.timeout_add(100, do_lazy_load, files)
def show_spinner(self):
if self.spinner:
self.spinner.show()
self.spinner.start()
def hide_spinner(self):
if self.spinner:
self.spinner.stop()
self.spinner.hide()
def on_spinner_tooltip(self, spinner, x, y, keyboard_mode, tooltip):
tiles = self.map.props.tiles_queued
if tiles:
tooltip.set_text("Downloading Map")
return True
return False
def show_track_selector(self):
self.sb.show_all()
def hide_track_selector(self):
self.sb.hide()
def on_selection_changed(self, selection):
model, _iter = selection.get_selected()
if not _iter:
return
trace = self.model.get_value(_iter, self.GPX_IDX)
tracks = self.model.get_value(_iter, self.OSM_IDX)
self.select_trace(self.model[_iter])
# highlight current track
self.select_tracks(tracks, ALPHA_SELECTED)
# dim other tracks
self.select_tracks(self.get_other_tracks(trace), ALPHA_UNSELECTED)
def update_tiles_queued(self, map_, paramspec):
if self.map.props.tiles_queued > 0:
self.show_spinner()
else:
self.hide_spinner()
def show_sidebar_toggled(self, item):
if item.get_active():
self.show_track_selector()
else:
self.hide_track_selector()
def show_statistics(self, item):
ws = stats.WeekStats()
ss = stats.AvgSpeedStats()
for t in self.get_all_traces():
ws.addTrace(t)
ss.addTrace(t)
w = Gtk.Window()
w.add(stats.ChartNotebook(ws, ss))
w.resize(500, 300)
w.set_position(Gtk.WindowPosition.CENTER_ON_PARENT)
w.set_transient_for(self.mainWindow)
w.show_all()
def open_about_dialog(self, w):
dialog = self.wTree.get_object("dialogAbout")
self.wTree.get_object("dialogAbout").set_icon_from_file("%sgpxviewer.svg" % self.ui_dir)
dialog.connect("response", lambda *a: dialog.hide())
dialog.show_all()
def select_tracks(self, tracks, alpha):
if not tracks:
return
for t in tracks:
t.props.alpha = alpha
def select_trace(self, row):
if not row[self.GPX_IDX]:
self.set_distance_label()
self.set_maximum_speed_label()
self.set_average_speed_label()
self.set_duration_label()
self.set_logging_date_label()
self.set_logging_time_label()
self.currentFilename = row[self.NAME_IDX]
self.mainWindow.set_title(_("GPX Viewer - %s") % row[self.NAME_IDX])
return
self.zoom = 12
distance = row[self.GPX_IDX].get_moving_data().moving_distance
maximum_speed = row[self.GPX_IDX].get_moving_data().max_speed
average_speed = stats.get_average_speed(row[self.GPX_IDX])
duration = row[self.GPX_IDX].get_moving_data().moving_time
clat = row[self.GPX_IDX].get_center().latitude
clon = row[self.GPX_IDX].get_center().longitude
gpxfrom = row[self.GPX_IDX].get_time_bounds().start_time
gpxto = row[self.GPX_IDX].get_time_bounds().end_time
self.set_distance_label(round(distance / 1000, 2))
self.set_maximum_speed_label(maximum_speed)
self.set_average_speed_label(average_speed)
hours, remain = divmod(duration, 3600)
minutes, seconds = divmod(remain, 60)
self.set_duration_label(hours, minutes, seconds)
self.set_logging_date_label('--')
self.set_logging_time_label('--', '--')
if gpxfrom:
self.set_logging_date_label(gpxfrom.strftime("%x"))
if gpxto:
self.set_logging_time_label(gpxfrom.strftime("%X"), gpxto.strftime("%X"))
self.currentFilename = row.get_parent()[self.NAME_IDX]
self.mainWindow.set_title(_("GPX Viewer - %s") % row[self.GPX_IDX].name)
if self.autoCenter:
self.set_centre(clat, clon)
def load_gpx(self, filename):
try:
tracks = parse(open(filename)).tracks
except GPXException:
self.show_gpx_error()
return None
parent = self.model.append(None, [filename, None, None])
for i, track in enumerate(tracks):
color = Gdk.RGBA(*hsv_to_rgb((i / len(tracks) + 1 / 3) % 1.0, 1.0, 1.0))
self.add_track(parent, track, color)
if len(self.model) > 1 or len(tracks) > 1:
self.wTree.get_object("checkmenuitemShowSidebar").set_active(True)
self.show_track_selector()
else:
self.select_trace(next(self.model[0].iterchildren()))
return track
def open_gpx(self, *args):
filechooser = Gtk.FileChooserDialog(title=_("Choose a GPX file to Load"), action=Gtk.FileChooserAction.OPEN,
parent=self.mainWindow)
filechooser.add_button(Gtk.STOCK_CANCEL, Gtk.ResponseType.DELETE_EVENT)
filechooser.add_button(Gtk.STOCK_OPEN, Gtk.ResponseType.OK)
filechooser.set_position(Gtk.WindowPosition.CENTER_ON_PARENT)
filechooser.set_select_multiple(True)
response = filechooser.run()
if response == Gtk.ResponseType.OK:
for filename in filechooser.get_filenames():
if self.load_gpx(filename):
self.recent.add_item("file://" + filename)
filechooser.destroy()
def show_gpx_error(self):
message_box = Gtk.MessageDialog(parent=self.mainWindow, type=Gtk.MessageType.ERROR, buttons=Gtk.ButtonsType.OK,
message_format=_("You selected an invalid GPX file. \n Please try again"))
message_box.run()
message_box.destroy()
return None
def quit(self, w):
Gtk.main_quit()
def main(self):
Gtk.main()
def open_with_external_app(self, w, app):
if self.currentFilename:
os.spawnlp(os.P_NOWAIT, app, app, self.currentFilename)
def zoom_map_in(self, w):
self.map.zoom_in()
def zoom_map_out(self, w):
self.map.zoom_out()
def set_centre(self, lat, lon):
self.map.set_center_and_zoom(lat, lon, self.zoom)
def set_distance_label(self, distance=None):
distance = '%.2f' % distance if distance else '--'
self.wTree.get_object("labelDistance").set_markup(_("<b>Distance:</b> %s km") % distance)
def set_average_speed_label(self, average_speed=None):
average_speed = '%.2f' % average_speed if average_speed else '--'
self.wTree.get_object("labelAverageSpeed").set_markup(_("<b>Average Speed:</b> %s m/s") % average_speed)
def set_maximum_speed_label(self, maximum_speed=None):
maximum_speed = '%.2f' % maximum_speed if maximum_speed else '--'
self.wTree.get_object("labelMaximumSpeed").set_markup(_("<b>Maximum Speed:</b> %s m/s") % maximum_speed)
def set_duration_label(self, hours="--", minutes="--", seconds="--"):
self.wTree.get_object("labelDuration").set_markup(
_("<b>Duration:</b> %(hours)s hours, %(minutes)s minutes, %(seconds)s seconds") % {"hours": hours, "minutes": minutes, "seconds": seconds})
def set_logging_date_label(self, gpxdate="--"):
self.wTree.get_object("labelLoggingDate").set_markup(_("<b>Logging Date:</b> %s") % gpxdate)
def set_logging_time_label(self, gpxfrom="--", gpxto="--"):
self.wTree.get_object("labelLoggingTime").set_markup(
_("<b>Logging Time:</b> %(from)s - %(to)s") % {"from": gpxfrom, "to": gpxto})
def auto_center_toggled(self, item):
self.autoCenter = item.get_active()
def button_track_add_clicked(self, *args):
self.open_gpx()
def remove_track(self, tracks):
for t in tracks:
self.map.track_remove(t)
def button_track_delete_clicked(self, *args):
model, _iter = self.tv.get_selection().get_selected()
if not _iter:
return
if self.model.get_value(_iter, self.OSM_IDX):
self.remove_track(self.model.get_value(_iter, self.OSM_IDX))
else:
for child in self.model[_iter].iterchildren():
self.remove_track(child[self.OSM_IDX])
self.model.remove(_iter)
def button_track_properties_clicked(self, *args):
model, _iter = self.tv.get_selection().get_selected()
if _iter:
OsmGpsMapTracks = self.model.get_value(_iter, self.OSM_IDX)
colorseldlg = Gtk.ColorSelectionDialog("Select track color")
colorseldlg.get_color_selection().set_current_color(OsmGpsMapTracks[0].props.color.to_color())
result = colorseldlg.run()
if result == Gtk.ResponseType.OK:
color = colorseldlg.get_color_selection().get_current_rgba()
for OsmGpsMapTrack in OsmGpsMapTracks:
OsmGpsMapTrack.set_color(color)
self.map.map_redraw()
colorseldlg.destroy()
def button_track_inspect_clicked(self, *args):
pass
class MapZoomSlider(Gtk.HBox):
def __init__(self, _map):
Gtk.HBox.__init__(self)
zo = Gtk.EventBox()
zo.add(Gtk.Image.new_from_stock(Gtk.STOCK_ZOOM_OUT, Gtk.IconSize.MENU))
zo.connect("button-press-event", self._on_zoom_out_pressed, _map)
self.pack_start(zo, False, False, 0)
self.zoom = Gtk.Adjustment(
value=_map.props.zoom,
lower=_map.props.min_zoom,
upper=_map.props.max_zoom,
step_incr=1,
page_incr=1,
page_size=0)
self.zoom.connect("value-changed", self._on_zoom_slider_value_changed, _map)
hs = Gtk.HScale()
hs.set_adjustment(self.zoom)
hs.props.digits = 0
hs.props.draw_value = False
hs.set_size_request(100, -1)
# hs.set_update_policy(gtk.UPDATE_DISCONTINUOUS)
self.pack_start(hs, True, True, 0)
zi = Gtk.EventBox()
zi.add(Gtk.Image.new_from_stock(Gtk.STOCK_ZOOM_IN, Gtk.IconSize.MENU))
zi.connect("button-press-event", self._on_zoom_in_pressed, _map)
self.pack_start(zi, False, False, 0)
_map.connect("notify::zoom", self._on_map_zoom_changed)
def _on_zoom_in_pressed(self, box, event, _map):
_map.zoom_in()
def _on_zoom_out_pressed(self, box, event, _map):
_map.zoom_out()
def _on_zoom_slider_value_changed(self, adj, _map):
zoom = adj.get_value()
if zoom != _map.props.zoom:
_map.set_zoom(int(zoom))
def _on_map_zoom_changed(self, _map, paramspec):
self.zoom.set_value(_map.props.zoom)
| gpl-3.0 |
F-Secure/resource-api | src/tests/authorization_test.py | 2 | 5687 | """
Copyright (c) 2014-2015 F-Secure
See LICENSE for details
"""
from resource_api.errors import AuthorizationError, DoesNotExist, ValidationError
from .base_test import BaseTest
# NOTE: here we access self.entry_point._user to avoid test code duplication. In real life it should not be done.
class ResourceAuthorizationTest(BaseTest):
def test_get_not_authorized(self):
self.entry_point._user = {"source": {"view": False}}
self.assertRaises(AuthorizationError, lambda: self.src.get(1).data)
def test_delete_not_authorized(self):
self.entry_point._user = {"source": {"delete": False}}
self.assertRaises(AuthorizationError, self.src.get(1).delete)
def test_create_not_authorized(self):
self.entry_point._user = {"source": {"create": False}}
self.assertRaises(AuthorizationError, self.src.create, {"pk": 3, "extra": "foo"})
def test_update_not_authorized(self):
self.entry_point._user = {"source": {"update": False}}
self.assertRaises(AuthorizationError, self.src.get(1).update, {"extra": "Neo"})
def test_get_collection_not_authorized(self):
self.entry_point._user = {"source": {"list": False}}
self.assertRaises(AuthorizationError, lambda: list(self.src))
def test_get_collection_count_not_authorized(self):
self.entry_point._user = {"source": {"list": False}}
self.assertRaises(AuthorizationError, self.src.count)
def test_get_non_discoverable(self):
self.entry_point._user = {"source": {"discover": False}}
self.assertRaises(DoesNotExist, lambda: self.src.get(1).data)
class LinkToManyAuthorizationTest(BaseTest):
def test_get_data_not_authorized(self):
self.entry_point._user = {"link": {"view": False}}
self.assertRaises(AuthorizationError, lambda: self.src.get(1).links.targets.get(1).data)
self.assertRaises(AuthorizationError, lambda: self.target.get(1).links.sources.get(1).data)
def test_delete_not_authorized(self):
self.entry_point._user = {"link": {"delete": False}}
self.assertRaises(AuthorizationError, self.src.get(1).links.targets.get(1).delete)
def test_create_not_authorized(self):
self.entry_point._user = {"link": {"create": False}}
self.assertRaises(AuthorizationError, self.src.get(1).links.targets.create, {"@target": 2, "extra": "Bal"})
def test_update_not_authorized(self):
self.entry_point._user = {"link": {"update": False}}
self.assertRaises(AuthorizationError, self.src.get(1).links.targets.get(1).update, {"extra": "Neo"})
def test_get_collection_not_authorized(self):
self.entry_point._user = {"link": {"list": False}}
self.assertRaises(AuthorizationError, list, self.src.get(1).links.targets)
def test_get_collection_count_not_authorized(self):
self.entry_point._user = {"link": {"list": False}}
self.assertRaises(AuthorizationError, self.src.get(1).links.targets.count)
def test_get_collection_of_non_discoverable_links(self):
self.entry_point._user = {"target": {"discover": False}}
self.assertEqual(list(self.src.get(1).links.targets), [None])
self.entry_point._user = {"source": {"discover": False}}
self.assertEqual(list(self.target.get(1).links.sources), [None])
self.entry_point._user = {"link": {"discover": False}}
self.assertEqual(list(self.target.get(1).links.sources), [None])
self.assertEqual(list(self.src.get(1).links.targets), [None])
def test_get_non_discoverable_link(self):
self.entry_point._user = {"link": {"discover": False}}
self.assertRaises(DoesNotExist, self.src.get(1).links.targets.get, 1)
self.assertRaises(DoesNotExist, self.target.get(1).links.sources.get, 1)
def test_get_link_with_non_discoverable_target(self):
self.entry_point._user = {"target": {"discover": False}}
self.assertRaises(DoesNotExist, self.src.get(1).links.targets.get, 1)
def test_create_link_with_non_discoverable_target(self):
self.entry_point._user = {"target": {"discover": False}}
self.assertRaises(ValidationError, self.src.get(1).links.targets.create, {"@target": 2, "extra": "Bal"})
class LinkToOneAuthorizationTest(BaseTest):
def test_create_not_authorized(self):
self.entry_point._user = {"link": {"create": False}}
self.assertRaises(AuthorizationError, self.src.get(1).links.the_target.set, {"@target": 2, "extra": "Bal"})
def test_delete_not_authorized(self):
self.entry_point._user = {"link": {"delete": False}}
self.assertRaises(AuthorizationError, self.src.get(1).links.the_target.set, {"@target": 2, "extra": "Bal"})
def test_update_not_authorized(self):
self.entry_point._user = {"link": {"update": False}}
self.assertRaises(AuthorizationError, self.src.get(1).links.the_target.item.update, {"extra": "Fpo"})
def test_set_with_non_discoverable_target(self):
self.entry_point._user = {"target": {"discover": False}}
self.assertRaisesRegexp(ValidationError, "Target: Resource with pk \w{1,2} does not exist.",
self.src.get(1).links.the_target.set, {"@target": 2, "extra": "Bal"})
def test_get_non_discoverable_link(self):
self.entry_point._user = {"link": {"discover": False}}
self.assertRaises(DoesNotExist, lambda: self.src.get(1).links.the_target.item.target)
def test_get_link_with_non_discoverable_target(self):
self.entry_point._user = {"target": {"discover": False}}
self.assertRaises(DoesNotExist, lambda: self.src.get(1).links.the_target.item.target)
| apache-2.0 |
andrewbell8/dotfiles | config/qbittorrent/nova3/engines/torlock.py | 6 | 4052 | #VERSION: 2.0
#AUTHORS: Douman (custparasite@gmx.se)
#CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
from novaprinter import prettyPrinter
from helpers import retrieve_url, download_file
from re import compile as re_compile
from html.parser import HTMLParser
class torlock(object):
url = "https://www.torlock.com"
name = "TorLock"
supported_categories = {'all' : 'all',
'anime' : 'anime',
'software' : 'software',
'games' : 'game',
'movies' : 'movie',
'music' : 'music',
'tv' : 'television',
'books' : 'ebooks'}
def download_torrent(self, info):
print(download_file(info))
class MyHtmlParser(HTMLParser):
""" Sub-class for parsing results """
def __init__(self, url):
HTMLParser.__init__(self)
self.url = url
self.article_found = False #true when <article> with results is found
self.item_found = False
self.item_bad = False #set to True for malicious links
self.current_item = None #dict for found item
self.item_name = None #key's name in current_item dict
self.parser_class = {"ts" : "size",
"tul" : "seeds",
"tdl" : "leech"}
def handle_starttag(self, tag, attrs):
params = dict(attrs)
if self.item_found:
if tag == "td":
if "class" in params:
self.item_name = self.parser_class.get(params["class"], None)
if self.item_name:
self.current_item[self.item_name] = ""
elif self.article_found and tag == "a":
if "href" in params:
link = params["href"]
if link.startswith("/torrent"):
self.current_item["desc_link"] = "".join((self.url, link))
self.current_item["link"] = "".join((self.url, "/tor/", link.split('/')[2], ".torrent"))
self.current_item["engine_url"] = self.url
self.item_found = True
self.item_name = "name"
self.current_item["name"] = ""
self.item_bad = "rel" in params and params["rel"] == "nofollow"
elif tag == "article":
self.article_found = True
self.current_item = {}
def handle_data(self, data):
if self.item_name:
self.current_item[self.item_name] += data
def handle_endtag(self, tag):
if tag == "article":
self.article_found = False
elif self.item_name and (tag == "a" or tag == "td"):
self.item_name = None
elif self.item_found and tag == "tr":
self.item_found = False
if not self.item_bad:
prettyPrinter(self.current_item)
self.current_item = {}
def search(self, query, cat='all'):
""" Performs search """
query = query.replace("%20", "-")
parser = self.MyHtmlParser(self.url)
page = "".join((self.url, "/", self.supported_categories[cat], "/torrents/", query, ".html?sort=seeds&page=1"))
html = retrieve_url(page)
parser.feed(html)
counter = 1
additional_pages = re_compile("/{0}/torrents/{1}.html\?sort=seeds&page=[0-9]+".format(self.supported_categories[cat], query))
list_searches = additional_pages.findall(html)[:-1] #last link is next(i.e. second)
for page in map(lambda link: "".join((self.url, link)), list_searches):
html = retrieve_url(page)
parser.feed(html)
counter += 1
if counter > 3:
break
parser.close()
| bsd-2-clause |
revpriest/camtyper | grabCam.py | 1 | 4560 | import numpy as np
import cv2
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
import sys
sys.stdout = Unbuffered(sys.stdout)
radius = 5
keydown = True
labelMap = [
('_', '5', '3', '(', ' ', '(', '1', '9', '7'),
('v', 'f', 'w', 'k', ' ', 'q', 'm', 'p', 'b'),
('l', 's', 'n', 'c', ' ', 'h', 't', 'r', 'd'),
('alt', 'meta', 'altR', 'shift', ' ', 'shiftR', 'ctrl', 'metaR', 'ctrlR'),
(' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '),
(':', 'bs', 'x', 'g', ' ', 'z', 'j', '$', '!'),
('del', 'i', 'e', 'ret', ' ', 'o', 'a', 'u', 'y'),
('/', 'up', '-', 'left', ' ', 'right', '+', 'down', '*'),
('.', '6', '4', ',', ' ', '8', '2', '0', 'enter')
]
charMap = [
(ord(' '), ord('5'), ord('3'), ord('('), ord(' '), ord('('), ord('1'), ord('9'), ord('7')),
(ord('v'), ord('f'), ord('w'), ord('k'), ord(' '), ord('q'), ord('m'), ord('p'), ord('b')),
(ord('l'), ord('s'), ord('n'), ord('c'), ord(' '), ord('h'), ord('t'), ord('r'), ord('d')),
(18, 91, 225, 16, ord(' '), 16, 17, 93, 17),
(ord(' '), ord(' '), ord(' '), ord(' '), ord(' '), ord(' '), ord(' '), ord(' '), ord(' ')),
(ord(':'), 8, ord('x'), ord('g'), ord(' '), ord('z'), ord('j'), ord('$'), ord('!')),
(46, ord('i'), ord('e'), 13, ord(' '), ord('o'), ord('a'), ord('u'), ord('y')),
(ord('/'), 38, ord('-'), 37, ord(' '), 39, ord('+'), 40, ord('*')),
(ord('.'), ord('6'), ord('4'), ord(','), ord(' '), ord('8'), ord('2'), ord('0'), 13)
]
modeMap = ['odd', 'c2', 'c1', 'meta', ' ', '!', 'vow', 'curs', 'even']
def getMax(img):
(minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(img)
return maxLoc
def drawGrid(img, offset):
cv2.line(img, (53+offset, 0), (53+offset, 240), (255,255,255), 2)
cv2.line(img, (107+offset, 0), (107+offset, 240), (255,255,255), 2)
cv2.line(img, (0+offset, 80), (160+offset, 80), (255,255,255), 2)
cv2.line(img, (0+offset, 160), (160+offset, 160), (255,255,255), 2)
def drawLabels(img, offset, labelSet):
font = cv2.FONT_HERSHEY_PLAIN
colour = (255,255,255)
scale = 1
thickness = 2
cv2.putText(img, labelSet[0], (offset+0, 40), font, scale, colour, thickness)
cv2.putText(img, labelSet[1], (offset+55, 40), font, scale, colour, thickness)
cv2.putText(img, labelSet[2], (offset+108, 40), font, scale, colour, thickness)
cv2.putText(img, labelSet[3], (offset+0, 120), font, scale, colour, thickness)
cv2.putText(img, labelSet[4], (offset+55, 120), font, scale, colour, thickness)
cv2.putText(img, labelSet[5], (offset+108, 120), font, scale, colour, thickness)
cv2.putText(img, labelSet[6], (offset+0, 200), font, scale, colour, thickness)
cv2.putText(img, labelSet[7], (offset+55, 200), font, scale, colour, thickness)
cv2.putText(img, labelSet[8], (offset+108, 200), font, scale, colour, thickness)
def getRegion(point):
if (point[0] < 60):
region = 0
elif (point[0] < 120):
region = 1
else:
region = 2
if (point[1] < 80):
region += 0
elif (point[1] < 160):
region += 3
else:
region += 6
return region
def outputChar(mode,key):
print(chr(charMap[mode][key]))
sys.stdout.write(chr(charMap[mode][key]))
cap = cv2.VideoCapture(0)
cap.set(3,320); #x
cap.set(4,240); #y
cap.set(5,1000); #FPS ???
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
flip = cv2.flip(frame,1)
# Display the resulting frame
#cv2.imshow('frame',flip)
# Convert to greyscale
grey = cv2.cvtColor(flip, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(grey, (radius, radius), 0)
leftHalf = blur[0:240, 0:160]
rightHalf = blur[0:240, 161:320]
leftMaxLoc = getMax(leftHalf)
rightMaxLoc = getMax(rightHalf)
adjRightMaxLoc = (rightMaxLoc[0]+160, rightMaxLoc[1])
drawGrid(flip, 0)
drawGrid(flip, 160)
cv2.line(flip, (160, 0), (160, 240), (255, 0, 0), 2)
drawLabels(flip, 0, modeMap)
mode = getRegion(leftMaxLoc)
drawLabels(flip, 160, labelMap[mode])
key = getRegion(rightMaxLoc)
if (key != 4):
if (not keydown):
outputChar(mode,key)
keydown = True
else:
keydown = False
cv2.circle(flip, leftMaxLoc, radius+6, (0, 0, 255), 2)
cv2.circle(flip, adjRightMaxLoc, radius+6, (0, 255, 255), 2)
cv2.imshow('frame',flip)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
| gpl-3.0 |
endolith/scikit-image | skimage/util/montage.py | 38 | 3203 | __all__ = ['montage2d']
import numpy as np
from .. import exposure
EPSILON = 1e-6
def montage2d(arr_in, fill='mean', rescale_intensity=False, grid_shape=None):
"""Create a 2-dimensional 'montage' from a 3-dimensional input array
representing an ensemble of equally shaped 2-dimensional images.
For example, ``montage2d(arr_in, fill)`` with the following `arr_in`
+---+---+---+
| 1 | 2 | 3 |
+---+---+---+
will return:
+---+---+
| 1 | 2 |
+---+---+
| 3 | * |
+---+---+
Where the '*' patch will be determined by the `fill` parameter.
Parameters
----------
arr_in: ndarray, shape=[n_images, height, width]
3-dimensional input array representing an ensemble of n_images
of equal shape (i.e. [height, width]).
fill: float or 'mean', optional
How to fill the 2-dimensional output array when sqrt(n_images)
is not an integer. If 'mean' is chosen, then fill = arr_in.mean().
rescale_intensity: bool, optional
Whether to rescale the intensity of each image to [0, 1].
grid_shape: tuple, optional
The desired grid shape for the montage (tiles_y, tiles_x).
The default aspect ratio is square.
Returns
-------
arr_out: ndarray, shape=[alpha * height, alpha * width]
Output array where 'alpha' has been determined automatically to
fit (at least) the `n_images` in `arr_in`.
Examples
--------
>>> import numpy as np
>>> from skimage.util.montage import montage2d
>>> arr_in = np.arange(3 * 2 * 2).reshape(3, 2, 2)
>>> arr_in # doctest: +NORMALIZE_WHITESPACE
array([[[ 0, 1],
[ 2, 3]],
[[ 4, 5],
[ 6, 7]],
[[ 8, 9],
[10, 11]]])
>>> arr_out = montage2d(arr_in)
>>> arr_out.shape
(4, 4)
>>> arr_out
array([[ 0. , 1. , 4. , 5. ],
[ 2. , 3. , 6. , 7. ],
[ 8. , 9. , 5.5, 5.5],
[ 10. , 11. , 5.5, 5.5]])
>>> arr_in.mean()
5.5
>>> arr_out_nonsquare = montage2d(arr_in, grid_shape=(1, 3))
>>> arr_out_nonsquare
array([[ 0., 1., 4., 5., 8., 9.],
[ 2., 3., 6., 7., 10., 11.]])
>>> arr_out_nonsquare.shape
(2, 6)
"""
assert arr_in.ndim == 3
n_images, height, width = arr_in.shape
arr_in = arr_in.copy()
# -- rescale intensity if necessary
if rescale_intensity:
for i in range(n_images):
arr_in[i] = exposure.rescale_intensity(arr_in[i])
# -- determine alpha
if grid_shape:
alpha_y, alpha_x = grid_shape
else:
alpha_y = alpha_x = int(np.ceil(np.sqrt(n_images)))
# -- fill missing patches
if fill == 'mean':
fill = arr_in.mean()
n_missing = int((alpha_y * alpha_x) - n_images)
missing = np.ones((n_missing, height, width), dtype=arr_in.dtype) * fill
arr_out = np.vstack((arr_in, missing))
# -- reshape to 2d montage, step by step
arr_out = arr_out.reshape(alpha_y, alpha_x, height, width)
arr_out = arr_out.swapaxes(1, 2)
arr_out = arr_out.reshape(alpha_y * height, alpha_x * width)
return arr_out
| bsd-3-clause |
googleapis/python-recommender | setup.py | 1 | 2520 | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import setuptools
name = "google-cloud-recommender"
description = "Cloud Recommender API client library"
version = "2.2.0"
release_status = "Development Status :: 5 - Production/Stable"
dependencies = [
"google-api-core[grpc] >= 1.26.0, <2.0.0dev",
"proto-plus >= 1.10.0",
"packaging >= 14.3",
]
extras = {"libcst": "libcst >= 0.2.5"}
scripts = [
"scripts/fixup_recommender_v1_keywords.py",
"scripts/fixup_recommender_v1beta1_keywords.py",
]
package_root = os.path.abspath(os.path.dirname(__file__))
readme_filename = os.path.join(package_root, "README.rst")
with io.open(readme_filename, encoding="utf-8") as readme_file:
readme = readme_file.read()
packages = [
package
for package in setuptools.PEP420PackageFinder.find()
if package.startswith("google")
]
namespaces = ["google"]
if "google.cloud" in packages:
namespaces.append("google.cloud")
setuptools.setup(
name=name,
version=version,
description=description,
long_description=readme,
author="Google LLC",
author_email="googleapis-packages@google.com",
license="Apache 2.0",
url="https://github.com/googleapis/python-recommender",
classifiers=[
release_status,
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Operating System :: OS Independent",
"Topic :: Internet",
],
platforms="Posix; MacOS X; Windows",
packages=packages,
namespace_packages=namespaces,
install_requires=dependencies,
extras_require=extras,
python_requires=">=3.6",
scripts=scripts,
include_package_data=True,
zip_safe=False,
)
| apache-2.0 |
hehongliang/tensorflow | tensorflow/contrib/learn/python/learn/utils/__init__.py | 42 | 1492 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Learn Utils (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.utils.export import export_estimator
from tensorflow.contrib.learn.python.learn.utils.input_fn_utils import build_default_serving_input_fn
from tensorflow.contrib.learn.python.learn.utils.input_fn_utils import build_parsing_serving_input_fn
from tensorflow.contrib.learn.python.learn.utils.input_fn_utils import InputFnOps
from tensorflow.contrib.learn.python.learn.utils.saved_model_export_utils import make_export_strategy
| apache-2.0 |
pygeek/django | django/contrib/admin/templatetags/log.py | 104 | 2124 | from django import template
from django.contrib.admin.models import LogEntry
register = template.Library()
class AdminLogNode(template.Node):
def __init__(self, limit, varname, user):
self.limit, self.varname, self.user = limit, varname, user
def __repr__(self):
return "<GetAdminLog Node>"
def render(self, context):
if self.user is None:
context[self.varname] = LogEntry.objects.all().select_related('content_type', 'user')[:self.limit]
else:
user_id = self.user
if not user_id.isdigit():
user_id = context[self.user].id
context[self.varname] = LogEntry.objects.filter(user__id__exact=user_id).select_related('content_type', 'user')[:int(self.limit)]
return ''
@register.tag
def get_admin_log(parser, token):
"""
Populates a template variable with the admin log for the given criteria.
Usage::
{% get_admin_log [limit] as [varname] for_user [context_var_containing_user_obj] %}
Examples::
{% get_admin_log 10 as admin_log for_user 23 %}
{% get_admin_log 10 as admin_log for_user user %}
{% get_admin_log 10 as admin_log %}
Note that ``context_var_containing_user_obj`` can be a hard-coded integer
(user ID) or the name of a template context variable containing the user
object whose ID you want.
"""
tokens = token.contents.split()
if len(tokens) < 4:
raise template.TemplateSyntaxError(
"'get_admin_log' statements require two arguments")
if not tokens[1].isdigit():
raise template.TemplateSyntaxError(
"First argument to 'get_admin_log' must be an integer")
if tokens[2] != 'as':
raise template.TemplateSyntaxError(
"Second argument to 'get_admin_log' must be 'as'")
if len(tokens) > 4:
if tokens[4] != 'for_user':
raise template.TemplateSyntaxError(
"Fourth argument to 'get_admin_log' must be 'for_user'")
return AdminLogNode(limit=tokens[1], varname=tokens[3], user=(len(tokens) > 5 and tokens[5] or None))
| bsd-3-clause |
mezz64/home-assistant | homeassistant/components/co2signal/sensor.py | 13 | 3295 | """Support for the CO2signal platform."""
import logging
import CO2Signal
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_TOKEN,
ENERGY_KILO_WATT_HOUR,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
CONF_COUNTRY_CODE = "country_code"
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by CO2signal"
MSG_LOCATION = (
"Please use either coordinates or the country code. "
"For the coordinates, "
"you need to use both latitude and longitude."
)
CO2_INTENSITY_UNIT = f"CO2eq/{ENERGY_KILO_WATT_HOUR}"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_TOKEN): cv.string,
vol.Inclusive(CONF_LATITUDE, "coords", msg=MSG_LOCATION): cv.latitude,
vol.Inclusive(CONF_LONGITUDE, "coords", msg=MSG_LOCATION): cv.longitude,
vol.Optional(CONF_COUNTRY_CODE): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the CO2signal sensor."""
token = config[CONF_TOKEN]
lat = config.get(CONF_LATITUDE, hass.config.latitude)
lon = config.get(CONF_LONGITUDE, hass.config.longitude)
country_code = config.get(CONF_COUNTRY_CODE)
_LOGGER.debug("Setting up the sensor using the %s", country_code)
devs = []
devs.append(CO2Sensor(token, country_code, lat, lon))
add_entities(devs, True)
class CO2Sensor(Entity):
"""Implementation of the CO2Signal sensor."""
def __init__(self, token, country_code, lat, lon):
"""Initialize the sensor."""
self._token = token
self._country_code = country_code
self._latitude = lat
self._longitude = lon
self._data = None
if country_code is not None:
device_name = country_code
else:
device_name = f"{round(self._latitude, 2)}/{round(self._longitude, 2)}"
self._friendly_name = f"CO2 intensity - {device_name}"
@property
def name(self):
"""Return the name of the sensor."""
return self._friendly_name
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return "mdi:molecule-co2"
@property
def state(self):
"""Return the state of the device."""
return self._data
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return CO2_INTENSITY_UNIT
@property
def device_state_attributes(self):
"""Return the state attributes of the last update."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
def update(self):
"""Get the latest data and updates the states."""
_LOGGER.debug("Update data for %s", self._friendly_name)
if self._country_code is not None:
self._data = CO2Signal.get_latest_carbon_intensity(
self._token, country_code=self._country_code
)
else:
self._data = CO2Signal.get_latest_carbon_intensity(
self._token, latitude=self._latitude, longitude=self._longitude
)
self._data = round(self._data, 2)
| apache-2.0 |
VitalPet/odoo | addons/crm_profiling/crm_profiling.py | 52 | 10442 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp.osv import orm
from openerp.tools.translate import _
def _get_answers(cr, uid, ids):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm profiling’s IDs """
query = """
select distinct(answer)
from profile_question_yes_rel
where profile IN %s"""
cr.execute(query, (tuple(ids),))
ans_yes = [x[0] for x in cr.fetchall()]
query = """
select distinct(answer)
from profile_question_no_rel
where profile IN %s"""
cr.execute(query, (tuple(ids),))
ans_no = [x[0] for x in cr.fetchall()]
return [ans_yes, ans_no]
def _get_parents(cr, uid, ids):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm profiling’s IDs
@return: Get parents's Id """
ids_to_check = ids
cr.execute("""
select distinct(parent_id)
from crm_segmentation
where parent_id is not null
and id IN %s""",(tuple(ids),))
parent_ids = [x[0] for x in cr.fetchall()]
trigger = False
for x in parent_ids:
if x not in ids_to_check:
ids_to_check.append(x)
trigger = True
if trigger:
ids_to_check = _get_parents(cr, uid, ids_to_check)
return ids_to_check
def test_prof(cr, uid, seg_id, pid, answers_ids=None):
""" return True if the partner pid fetch the segmentation rule seg_id
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param seg_id: Segmentaion's ID
@param pid: partner's ID
@param answers_ids: Answers's IDs
"""
ids_to_check = _get_parents(cr, uid, [seg_id])
[yes_answers, no_answers] = _get_answers(cr, uid, ids_to_check)
temp = True
for y_ans in yes_answers:
if y_ans not in answers_ids:
temp = False
break
if temp:
for ans in answers_ids:
if ans in no_answers:
temp = False
break
if temp:
return True
return False
def _recompute_categ(self, cr, uid, pid, answers_ids):
""" Recompute category
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param pid: partner's ID
@param answers_ids: Answers's IDs
"""
ok = []
cr.execute('''
select r.category_id
from res_partner_res_partner_category_rel r left join crm_segmentation s on (r.category_id = s.categ_id)
where r.partner_id = %s and (s.exclusif = false or s.exclusif is null)
''', (pid,))
for x in cr.fetchall():
ok.append(x[0])
query = '''
select id, categ_id
from crm_segmentation
where profiling_active = true'''
if ok != []:
query = query +''' and categ_id not in(%s)'''% ','.join([str(i) for i in ok ])
query = query + ''' order by id '''
cr.execute(query)
segm_cat_ids = cr.fetchall()
for (segm_id, cat_id) in segm_cat_ids:
if test_prof(cr, uid, segm_id, pid, answers_ids):
ok.append(cat_id)
return ok
class question(osv.osv):
""" Question """
_name="crm_profiling.question"
_description= "Question"
_columns={
'name': fields.char("Question",size=128, required=True),
'answers_ids': fields.one2many("crm_profiling.answer","question_id","Avalaible Answers",),
}
question()
class questionnaire(osv.osv):
""" Questionnaire """
_name="crm_profiling.questionnaire"
_description= "Questionnaire"
_columns = {
'name': fields.char("Questionnaire",size=128, required=True),
'description':fields.text("Description", required=True),
'questions_ids': fields.many2many('crm_profiling.question','profile_questionnaire_quest_rel',\
'questionnaire', 'question', "Questions"),
}
questionnaire()
class answer(osv.osv):
_name="crm_profiling.answer"
_description="Answer"
_columns={
"name": fields.char("Answer",size=128, required=True),
"question_id": fields.many2one('crm_profiling.question',"Question"),
}
answer()
class partner(osv.osv):
_inherit="res.partner"
_columns={
"answers_ids": fields.many2many("crm_profiling.answer","partner_question_rel",\
"partner","answer","Answers"),
}
def _questionnaire_compute(self, cr, uid, answers, context=None):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param data: Get Data
@param context: A standard dictionary for contextual values """
partner_id = context.get('active_id')
query = "select answer from partner_question_rel where partner=%s"
cr.execute(query, (partner_id,))
for x in cr.fetchall():
answers.append(x[0])
self.write(cr, uid, [partner_id], {'answers_ids': [[6, 0, answers]]}, context=context)
return {}
def write(self, cr, uid, ids, vals, context=None):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm profiling’s IDs
@param context: A standard dictionary for contextual values """
if 'answers_ids' in vals:
vals['category_id']=[[6, 0, _recompute_categ(self, cr, uid, ids[0], vals['answers_ids'][0][2])]]
return super(partner, self).write(cr, uid, ids, vals, context=context)
partner()
class crm_segmentation(osv.osv):
""" CRM Segmentation """
_inherit="crm.segmentation"
_columns={
"answer_yes": fields.many2many("crm_profiling.answer","profile_question_yes_rel",\
"profile","answer","Included Answers"),
"answer_no": fields.many2many("crm_profiling.answer","profile_question_no_rel",\
"profile","answer","Excluded Answers"),
'parent_id': fields.many2one('crm.segmentation', 'Parent Profile'),
'child_ids': fields.one2many('crm.segmentation', 'parent_id', 'Child Profiles'),
'profiling_active': fields.boolean('Use The Profiling Rules', help='Check\
this box if you want to use this tab as part of the \
segmentation rule. If not checked, the criteria beneath will be ignored')
}
_constraints = [
(osv.osv._check_recursion, 'Error ! You cannot create recursive profiles.', ['parent_id'])
]
def process_continue(self, cr, uid, ids, start=False):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm segmentation’s IDs """
partner_obj = self.pool.get('res.partner')
categs = self.read(cr,uid,ids,['categ_id','exclusif','partner_id', \
'sales_purchase_active', 'profiling_active'])
for categ in categs:
if start:
if categ['exclusif']:
cr.execute('delete from res_partner_res_partner_category_rel where \
category_id=%s', (categ['categ_id'][0],))
id = categ['id']
cr.execute('select id from res_partner order by id ')
partners = [x[0] for x in cr.fetchall()]
if categ['sales_purchase_active']:
to_remove_list=[]
cr.execute('select id from crm_segmentation_line where segmentation_id=%s', (id,))
line_ids = [x[0] for x in cr.fetchall()]
for pid in partners:
if (not self.pool.get('crm.segmentation.line').test(cr, uid, line_ids, pid)):
to_remove_list.append(pid)
for pid in to_remove_list:
partners.remove(pid)
if categ['profiling_active']:
to_remove_list = []
for pid in partners:
cr.execute('select distinct(answer) from partner_question_rel where partner=%s',(pid,))
answers_ids = [x[0] for x in cr.fetchall()]
if (not test_prof(cr, uid, id, pid, answers_ids)):
to_remove_list.append(pid)
for pid in to_remove_list:
partners.remove(pid)
for partner in partner_obj.browse(cr, uid, partners):
category_ids = [categ_id.id for categ_id in partner.category_id]
if categ['categ_id'][0] not in category_ids:
cr.execute('insert into res_partner_res_partner_category_rel (category_id,partner_id) values (%s,%s)', (categ['categ_id'][0],partner.id))
self.write(cr, uid, [id], {'state':'not running', 'partner_id':0})
return True
crm_segmentation()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
TylerBrock/mongo-orchestration | tests/test_process.py | 2 | 12878 | #!/usr/bin/python
# coding=utf-8
# Copyright 2012-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import platform
import random
import socket
import subprocess
import sys
import tempfile
sys.path.insert(0, '../')
import mongo_orchestration.process as process
from mongo_orchestration.errors import TimeoutError
from nose.plugins.attrib import attr
from tests import unittest, SkipTest
@attr('process')
@attr('portpool')
@attr('test')
class PortPoolTestCase(unittest.TestCase):
def setUp(self):
self.hostname = process.HOSTNAME
self.pp = process.PortPool()
self.pp.change_range(min_port=1025, max_port=1080)
self.sockets = {}
def tearDown(self):
for s in self.sockets:
self.sockets[s].close()
def listen_port(self, port, max_connection=0):
if self.sockets.get(port, None):
self.sockets[port].close()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((process.HOSTNAME, port))
s.listen(max_connection)
self.sockets[port] = s
def test_singleton(self):
pp2 = process.PortPool(min_port=1025, max_port=1038)
self.assertEqual(id(self.pp), id(pp2))
def test_port_sequence(self):
ports = set([1025, 1026, 1027, 1028, 30, 28, 22, 45])
self.pp.change_range(port_sequence=ports)
_ports = self.pp._PortPool__closed.union(self.pp._PortPool__ports)
self.assertEqual(ports, _ports)
def test_find_port(self):
port = self.pp.port()
self.pp.change_range(port, port)
port = self.pp.port()
self.assertTrue(port > 0)
self.listen_port(port)
self.assertRaises(IndexError, self.pp.port)
def test_port_with_check(self):
self.pp.change_range(min_port=1100, max_port=1200)
port1, port2 = self.pp.port(check=True), self.pp.port(check=True)
self.pp.change_range(port_sequence=[port1, port2])
self.listen_port(port1, 0)
self.assertTrue(port2 == self.pp.port(check=True))
def test_check_port(self):
port = self.pp.port(check=True)
self.assertTrue(self.pp._PortPool__check_port(port))
self.listen_port(port)
self.assertFalse(self.pp._PortPool__check_port(port))
def test_release_port(self):
port = self.pp.port(check=True)
self.assertTrue(port in self.pp._PortPool__closed)
self.pp.release_port(port)
self.assertFalse(port in self.pp._PortPool__closed)
def test_refresh(self):
ports = set([random.randint(1025, 2000) for i in range(15)])
self.pp.change_range(port_sequence=ports)
ports_opened = self.pp._PortPool__ports.copy()
test_port = ports_opened.pop()
self.assertTrue(test_port in self.pp._PortPool__ports)
self.assertTrue(len(self.pp._PortPool__ports) > 1)
for port in ports:
if port != test_port:
try:
self.listen_port(port)
except (socket.error):
pass
self.pp.refresh()
self.assertTrue(len(self.pp._PortPool__ports) == 1)
def test_refresh_only_closed(self):
ports = set([random.randint(1025, 2000) for _ in range(15)])
self.pp.change_range(port_sequence=ports)
closed_num = len(self.pp._PortPool__closed)
self.pp.port(), self.pp.port()
self.assertTrue(closed_num + 2 == len(self.pp._PortPool__closed))
ports_opened = self.pp._PortPool__ports.copy()
test_port = ports_opened.pop()
self.listen_port(test_port)
self.pp.refresh(only_closed=True)
self.assertTrue(closed_num == len(self.pp._PortPool__closed))
self.pp.refresh()
self.assertTrue(closed_num + 1 == len(self.pp._PortPool__closed))
def test_change_range(self):
self.pp.change_range(min_port=1025, max_port=1033)
ports = self.pp._PortPool__closed.union(self.pp._PortPool__ports)
self.assertTrue(ports == set(range(1025, 1033 + 1)))
random_ports = set([random.randint(1025, 2000) for i in range(15)])
self.pp.change_range(port_sequence=random_ports)
ports = self.pp._PortPool__closed.union(self.pp._PortPool__ports)
self.assertTrue(ports == random_ports)
@attr('process')
@attr('test')
class ProcessTestCase(unittest.TestCase):
def setUp(self):
self.hostname = process.HOSTNAME
self.s = None
self.executable = sys.executable
self.pp = process.PortPool(min_port=1025, max_port=2000)
self.sockets = {}
self.tmp_files = list()
self.bin_path = os.path.join(os.environ.get('MONGOBIN', ''), 'mongod')
self.db_path = tempfile.mkdtemp()
self.cfg = {"noprealloc": True, "smallfiles": True, "oplogSize": 10, 'dbpath': self.db_path}
def tearDown(self):
for s in self.sockets:
self.sockets[s].close()
if self.cfg:
process.cleanup_mprocess('', self.cfg)
for item in self.tmp_files:
if os.path.exists(item):
os.remove(item)
def listen_port(self, port, max_connection=0):
if self.sockets.get(port, None):
self.sockets[port].close()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((process.HOSTNAME, port))
s.listen(max_connection)
self.sockets[port] = s
def test_wait_for(self):
port = self.pp.port(check=True)
self.listen_port(port, max_connection=1)
self.assertTrue(process.wait_for(port, 1))
self.sockets.pop(port).close()
self.assertFalse(process.wait_for(port, 1))
def test_repair(self):
port = self.pp.port(check=True)
# Assume we're testing on 64-bit machines.
self.cfg['nojournal'] = True
lock_file = os.path.join(self.cfg['dbpath'], 'mongod.lock')
config_path = process.write_config(self.cfg)
self.tmp_files.append(config_path)
proc, host = process.mprocess(self.bin_path, config_path, port=port, timeout=60)
self.assertTrue(os.path.exists(lock_file))
if platform.system() == 'Windows':
# mongod.lock cannot be read by any external process on Windows.
with self.assertRaises(IOError):
open(lock_file, 'r')
else:
with open(lock_file, 'r') as fd:
self.assertGreater(len(fd.read()), 0)
proc.terminate()
process.repair_mongo(self.bin_path, self.cfg['dbpath'])
with open(lock_file, 'r') as fd:
contents = fd.read()
self.assertEqual(len(contents), 0,
"lock_file contains: " + contents)
def test_mprocess_fail(self):
fd_cfg, config_path = tempfile.mkstemp()
os.close(fd_cfg)
self.tmp_files.append(config_path)
self.assertRaises(OSError, process.mprocess,
'fake-process_', config_path, None, 30)
process.write_config({"fake": True}, config_path)
self.assertRaises(TimeoutError, process.mprocess,
'mongod', config_path, None, 30)
def test_mprocess(self):
port = self.pp.port(check=True)
config_path = process.write_config(self.cfg)
self.tmp_files.append(config_path)
result = process.mprocess(self.bin_path, config_path, port=port, timeout=60)
self.assertTrue(isinstance(result, tuple))
proc, host = result
self.assertTrue(isinstance(proc, subprocess.Popen))
self.assertTrue(isinstance(host, str))
process.kill_mprocess(proc)
def test_mprocess_timeout(self):
port = self.pp.port()
cfg = self.cfg.copy()
cfg.pop('noprealloc')
cfg.pop('smallfiles')
cfg['journal'] = True
config_path = process.write_config(cfg)
self.tmp_files.append(config_path)
proc, host = process.mprocess(self.bin_path, config_path, port, 0)
self.assertTrue(isinstance(proc, subprocess.Popen))
self.assertTrue(isinstance(host, str))
process.kill_mprocess(proc)
if platform.system() == 'Windows':
raise SkipTest("Cannot test mongod startup timeout on Windows.")
with self.assertRaises(TimeoutError):
result = process.mprocess(self.bin_path, config_path, port, 0.1)
print(result)
def test_mprocess_busy_port(self):
config_path = process.write_config(self.cfg)
self.tmp_files.append(config_path)
port = self.pp.port()
self.listen_port(port, max_connection=0)
proc, host = process.mprocess(self.executable, config_path,
port=port, timeout=2)
self.assertTrue(proc.pid > 0)
self.assertEqual(host, self.hostname + ':' + str(port))
self.sockets.pop(port).close()
self.assertRaises(OSError, process.mprocess,
self.executable, '', port, 1)
def test_kill_mprocess(self):
p = subprocess.Popen([self.executable])
self.assertTrue(process.proc_alive(p))
process.kill_mprocess(p)
self.assertFalse(process.proc_alive(p))
def test_cleanup_process(self):
fd_cfg, config_path = tempfile.mkstemp()
fd_key, key_file = tempfile.mkstemp()
fd_log, log_path = tempfile.mkstemp()
db_path = tempfile.mkdtemp()
self.assertTrue(os.path.exists(config_path))
self.assertTrue(os.path.exists(key_file))
self.assertTrue(os.path.exists(log_path))
self.assertTrue(os.path.exists(db_path))
with os.fdopen(fd_cfg, 'w') as fd:
fd.write('keyFile={key_file}\n'
'logPath={log_path}\n'
'dbpath={db_path}'.format(**locals()))
for fd in (fd_cfg, fd_key, fd_log):
try:
os.close(fd)
except OSError:
# fd_cfg may be closed already if fdopen() didn't raise
pass
cfg = {'keyFile': key_file, 'logPath': log_path, 'dbpath': db_path}
process.cleanup_mprocess(config_path, cfg)
self.assertFalse(os.path.exists(config_path))
self.assertFalse(os.path.exists(key_file))
self.assertFalse(os.path.exists(log_path))
self.assertFalse(os.path.exists(db_path))
def test_remove_path(self):
fd, file_path = tempfile.mkstemp()
os.close(fd)
self.assertTrue(os.path.exists(file_path))
process.remove_path(file_path)
self.assertFalse(os.path.exists(file_path))
dir_path = tempfile.mkdtemp()
fd, file_path = tempfile.mkstemp(dir=dir_path)
os.close(fd)
process.remove_path(dir_path)
self.assertFalse(os.path.exists(file_path))
self.assertFalse(os.path.exists(dir_path))
def test_write_config(self):
cfg = {'port': 27017, 'objcheck': 'true'}
config_path = process.write_config(cfg)
self.assertTrue(os.path.exists(config_path))
with open(config_path, 'r') as fd:
config_data = fd.read()
self.assertTrue('port=27017' in config_data)
self.assertTrue('objcheck=true' in config_data)
process.cleanup_mprocess(config_path, cfg)
def test_write_config_with_specify_config_path(self):
cfg = {'port': 27017, 'objcheck': 'true'}
fd_key, file_path = tempfile.mkstemp()
os.close(fd_key)
config_path = process.write_config(cfg, file_path)
self.assertEqual(file_path, config_path)
process.cleanup_mprocess(config_path, cfg)
def test_proc_alive(self):
p = subprocess.Popen([self.executable])
self.assertTrue(process.proc_alive(p))
p.terminate()
p.wait()
self.assertFalse(process.proc_alive(p))
self.assertFalse(process.proc_alive(None))
def test_read_config(self):
cfg = {"noprealloc": True, "smallfiles": False, "oplogSize": 10, "other": "some string"}
config_path = process.write_config(cfg)
self.tmp_files.append(config_path)
self.assertEqual(process.read_config(config_path), cfg)
if __name__ == '__main__':
unittest.main(verbosity=3)
# suite = unittest.TestSuite()
# suite.addTest(ProcessTestCase('test_repair'))
# unittest.TextTestRunner(verbosity=2).run(suite)
| apache-2.0 |
lmregus/Portfolio | python/design_patterns/env/lib/python3.7/site-packages/pip/_vendor/requests/adapters.py | 54 | 21548 | # -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import os.path
import socket
from pip._vendor.urllib3.poolmanager import PoolManager, proxy_from_url
from pip._vendor.urllib3.response import HTTPResponse
from pip._vendor.urllib3.util import parse_url
from pip._vendor.urllib3.util import Timeout as TimeoutSauce
from pip._vendor.urllib3.util.retry import Retry
from pip._vendor.urllib3.exceptions import ClosedPoolError
from pip._vendor.urllib3.exceptions import ConnectTimeoutError
from pip._vendor.urllib3.exceptions import HTTPError as _HTTPError
from pip._vendor.urllib3.exceptions import MaxRetryError
from pip._vendor.urllib3.exceptions import NewConnectionError
from pip._vendor.urllib3.exceptions import ProxyError as _ProxyError
from pip._vendor.urllib3.exceptions import ProtocolError
from pip._vendor.urllib3.exceptions import ReadTimeoutError
from pip._vendor.urllib3.exceptions import SSLError as _SSLError
from pip._vendor.urllib3.exceptions import ResponseError
from pip._vendor.urllib3.exceptions import LocationValueError
from .models import Response
from .compat import urlparse, basestring
from .utils import (DEFAULT_CA_BUNDLE_PATH, extract_zipped_paths,
get_encoding_from_headers, prepend_scheme_if_needed,
get_auth_from_url, urldefragauth, select_proxy)
from .structures import CaseInsensitiveDict
from .cookies import extract_cookies_to_jar
from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
ProxyError, RetryError, InvalidSchema, InvalidProxyURL,
InvalidURL)
from .auth import _basic_auth_str
try:
from pip._vendor.urllib3.contrib.socks import SOCKSProxyManager
except ImportError:
def SOCKSProxyManager(*args, **kwargs):
raise InvalidSchema("Missing dependencies for SOCKS support.")
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
DEFAULT_POOL_TIMEOUT = None
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self, request, stream=False, timeout=None, verify=True,
cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
raise NotImplementedError
def close(self):
"""Cleans up adapter specific items."""
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed DNS lookups, socket
connections and connection timeouts, never to requests where data has
made it to the server. By default, Requests does not retry failed
connections. If you need granular control over the conditions under
which we retry a request, import urllib3's ``Retry`` class and pass
that instead.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
if max_retries == DEFAULT_RETRIES:
self.max_retries = Retry(0, read=False)
else:
self.max_retries = Retry.from_int(max_retries)
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return {attr: getattr(self, attr, None) for attr in self.__attrs__}
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block, strict=True, **pool_kwargs)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
:rtype: urllib3.ProxyManager
"""
if proxy in self.proxy_manager:
manager = self.proxy_manager[proxy]
elif proxy.lower().startswith('socks'):
username, password = get_auth_from_url(proxy)
manager = self.proxy_manager[proxy] = SOCKSProxyManager(
proxy,
username=username,
password=password,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs
)
else:
proxy_headers = self.proxy_headers(proxy)
manager = self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs)
return manager
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH)
if not cert_loc or not os.path.exists(cert_loc):
raise IOError("Could not find a suitable TLS CA certificate bundle, "
"invalid path: {}".format(cert_loc))
conn.cert_reqs = 'CERT_REQUIRED'
if not os.path.isdir(cert_loc):
conn.ca_certs = cert_loc
else:
conn.ca_cert_dir = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
conn.ca_cert_dir = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
conn.key_file = None
if conn.cert_file and not os.path.exists(conn.cert_file):
raise IOError("Could not find the TLS certificate file, "
"invalid path: {}".format(conn.cert_file))
if conn.key_file and not os.path.exists(conn.key_file):
raise IOError("Could not find the TLS key file, "
"invalid path: {}".format(conn.key_file))
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
:rtype: requests.Response
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
:rtype: urllib3.ConnectionPool
"""
proxy = select_proxy(url, proxies)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_url = parse_url(proxy)
if not proxy_url.host:
raise InvalidProxyURL("Please check proxy URL. It is malformed"
" and could be missing the host.")
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this closes the PoolManager and any active ProxyManager,
which closes any pooled connections.
"""
self.poolmanager.clear()
for proxy in self.proxy_manager.values():
proxy.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs.
:rtype: str
"""
proxy = select_proxy(request.url, proxies)
scheme = urlparse(request.url).scheme
is_proxied_http_request = (proxy and scheme != 'https')
using_socks_proxy = False
if proxy:
proxy_scheme = urlparse(proxy).scheme.lower()
using_socks_proxy = proxy_scheme.startswith('socks')
url = request.path_url
if is_proxied_http_request and not using_socks_proxy:
url = urldefragauth(request.url)
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The url of the proxy being used for this request.
:rtype: dict
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple or urllib3 Timeout object
:param verify: (optional) Either a boolean, in which case it controls whether
we verify the server's TLS certificate, or a string, in which case it
must be a path to a CA bundle to use
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
:rtype: requests.Response
"""
try:
conn = self.get_connection(request.url, proxies)
except LocationValueError as e:
raise InvalidURL(e, request=request)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if isinstance(timeout, tuple):
try:
connect, read = timeout
timeout = TimeoutSauce(connect=connect, read=read)
except ValueError as e:
# this may raise a string formatting error.
err = ("Invalid timeout {}. Pass a (connect, read) "
"timeout tuple, or a single float to set "
"both timeouts to the same value".format(timeout))
raise ValueError(err)
elif isinstance(timeout, TimeoutSauce):
pass
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
# Receive the response from the server
try:
# For Python 2.7, use buffering of HTTP responses
r = low_conn.getresponse(buffering=True)
except TypeError:
# For compatibility with Python 3.3+
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
except (ProtocolError, socket.error) as err:
raise ConnectionError(err, request=request)
except MaxRetryError as e:
if isinstance(e.reason, ConnectTimeoutError):
# TODO: Remove this in 3.0.0: see #2811
if not isinstance(e.reason, NewConnectionError):
raise ConnectTimeout(e, request=request)
if isinstance(e.reason, ResponseError):
raise RetryError(e, request=request)
if isinstance(e.reason, _ProxyError):
raise ProxyError(e, request=request)
if isinstance(e.reason, _SSLError):
# This branch is for urllib3 v1.22 and later.
raise SSLError(e, request=request)
raise ConnectionError(e, request=request)
except ClosedPoolError as e:
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
# This branch is for urllib3 versions earlier than v1.22
raise SSLError(e, request=request)
elif isinstance(e, ReadTimeoutError):
raise ReadTimeout(e, request=request)
else:
raise
return self.build_response(request, resp)
| mit |
lv0/whisper-backup | whisperbackup/pycronscript.py | 3 | 7452 | ''' Convenience class for writing cron scripts'''
# pylint: disable=R0903
# Copyright 2014 42Lines, Inc.
# Original Author: Jim Browne
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime as DT
from lockfile import FileLock, LockFailed, LockTimeout
import logging
import logging.handlers
import __main__ as main
from optparse import OptionParser, make_option
import os
from random import randint
import sys
import time
# Support for RotateFileHandler in multiple processes
from multiprocessinglog import MultiProcessingLog, MultiProcessingLogStream
__version__ = '0.2.1'
class StdErrFilter(logging.Filter):
''' Discard all events below a configured level '''
def __init__(self, level=logging.WARNING, discard_all=False):
self.level = level
self.discard_all = discard_all
super(StdErrFilter, self).__init__()
def filter(self, record):
if self.discard_all:
return False
else:
return (record.levelno >= self.level)
class CronScript(object):
''' Convenience class for writing cron scripts '''
def __init__(self, args=None, options=None, usage=None,
disable_interspersed_args=False):
self.lock = None
self.start_time = None
self.end_time = None
if options is None:
options = []
if args is None:
args = sys.argv[1:]
prog = os.path.basename(main.__file__)
logfile = os.path.join('/var/log/', "%s.log" % prog)
lockfile = os.path.join('/var/lock/', "%s" % prog)
stampfile = os.path.join('/var/tmp/', "%s.success" % prog)
options.append(make_option("--debug", "-d", action="store_true",
help="Minimum log level of DEBUG"))
options.append(make_option("--quiet", "-q", action="store_true",
help="Only WARN and above to stdout"))
options.append(make_option("--nolog", action="store_true",
help="Do not log to LOGFILE"))
options.append(make_option("--logfile", type="string",
default=logfile,
help="File to log to, default %default"))
options.append(make_option("--syslog", action="store_true",
help="Log to syslog instead of a file"))
options.append(make_option("--nolock", action="store_true",
help="Do not use a lockfile"))
options.append(make_option("--lockfile", type="string",
default=lockfile,
help="Lock file, default %default"))
options.append(make_option("--nostamp", action="store_true",
help="Do not use a success stamp file"))
options.append(make_option("--stampfile", type="string",
default=stampfile,
help="Success stamp file, default %default"))
helpmsg = "Lock timeout in seconds, default %default"
options.append(make_option("--locktimeout", default=90, type="int",
help=helpmsg))
helpmsg = "Sleep a random time between 0 and N seconds before starting, default %default"
options.append(make_option("--splay", default=0, type="int",
help=helpmsg))
parser = OptionParser(option_list=options, usage=usage)
if disable_interspersed_args:
# Stop option parsing at first non-option
parser.disable_interspersed_args()
(self.options, self.args) = parser.parse_args(args)
self.logger = logging.getLogger(main.__name__)
if self.options.debug:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.INFO)
# Log to syslog
if self.options.syslog:
syslog_formatter = logging.Formatter("%s: %%(levelname)s %%(message)s" % prog)
handler = logging.handlers.SysLogHandler(
address="/dev/log",
facility=logging.handlers.SysLogHandler.LOG_LOCAL3
)
handler.setFormatter(syslog_formatter)
self.logger.addHandler(handler)
default_formatter = logging.Formatter("%(asctime)s;%(levelname)s;%(message)s",
"%Y-%m-%d-%H:%M:%S")
if not self.options.nolog:
# Log to file
try:
handler = MultiProcessingLog(
"%s" % (self.options.logfile),
maxBytes=(50 * 1024 * 1024),
backupCount=10)
except IOError:
sys.stderr.write("Fatal: Could not open log file: %s\n"
% self.options.logfile)
sys.exit(1)
handler.setFormatter(default_formatter)
self.logger.addHandler(handler)
# If quiet, only WARNING and above go to STDERR; otherwise all
# logging goes to stderr
handler2 = MultiProcessingLogStream(sys.stderr)
if self.options.quiet:
err_filter = StdErrFilter()
handler2.addFilter(err_filter)
handler2.setFormatter(default_formatter)
self.logger.addHandler(handler2)
self.logger.debug(self.options)
def __enter__(self):
if self.options.splay > 0:
splay = randint(0, self.options.splay)
self.logger.debug('Sleeping for %d seconds (splay=%d)' %
(splay, self.options.splay))
time.sleep(splay)
self.start_time = DT.datetime.today()
if not self.options.nolock:
self.logger.debug('Attempting to acquire lock %s (timeout %s)',
self.options.lockfile,
self.options.locktimeout)
self.lock = FileLock(self.options.lockfile)
try:
self.lock.acquire(timeout=self.options.locktimeout)
except LockFailed as e:
self.logger.error("Lock could not be acquired.")
self.logger.error(str(e))
sys.exit(1)
except LockTimeout as e:
msg = "Lock could not be acquired. Timeout exceeded."
self.logger.error(msg)
sys.exit(1)
def __exit__(self, etype, value, traceback):
self.end_time = DT.datetime.today()
self.logger.debug('Run time: %s', self.end_time - self.start_time)
if not self.options.nolock:
self.logger.debug('Attempting to release lock %s',
self.options.lockfile)
self.lock.release()
if etype is None:
if not self.options.nostamp:
open(self.options.stampfile, "w")
| apache-2.0 |
cdynak/lift-event-based-control | simulator/elevator.py | 1 | 3366 | from Tkinter import *
class Elevator():
lift_in_row = None
class Floor():
state_machine = {'moveup': 'green', 'movedown': 'green', 'stay': 'red', 'clear': 'white', 'open': 'blue'}
def __init__(self, canvas, floor_no, height):
self.canvas = canvas
self.rec = canvas.create_rectangle(canvas.winfo_width()/2-20, canvas.winfo_height()-25-floor_no*height, canvas.winfo_width()/2+20, canvas.winfo_height()-25-(floor_no+1)*height+2, fill="white")
def setState(self, state):
self.canvas.itemconfigure(self.rec, fill=self.state_machine[state])
def __init__(self, frame, floors, id, count):
self.max_floors = floors
self.lift_in_row = (count+1)//2
self.id = id
self.actual_floor=0
self.floors = []
self.canvas = Canvas(frame, height=(frame.winfo_height()/2)-2, width=(frame.winfo_width()/self.lift_in_row)-self.lift_in_row)
self.canvas.grid(row=id/self.lift_in_row, column=id%self.lift_in_row, padx=1, pady=1)
self.add_arrows()
for i in range(floors+1):
floor = self.Floor(self.canvas, i, (self.canvas.winfo_height()-50)/(floors+1))
self.floors.append(floor)
self.floor_display = self.canvas.create_text(self.canvas.winfo_width()-10, self.canvas.winfo_height()-10, anchor="se")
self.canvas.insert(self.floor_display, 15, "")
def add_arrows(self):
self.canvas.update()
self.arrow_up = self.canvas.create_text(self.canvas.winfo_width()/2-4, 10, anchor="nw")
self.canvas.itemconfig(self.arrow_up, text=u'\N{BLACK UP-POINTING TRIANGLE}')
self.canvas.insert(self.arrow_up, 15, "")
self.arrow_down = self.canvas.create_text(self.canvas.winfo_width()/2-4, self.canvas.winfo_height()-10, anchor="sw")
self.canvas.itemconfig(self.arrow_down, text=u'\N{BLACK DOWN-POINTING TRIANGLE}')
self.canvas.insert(self.arrow_down, 15, "")
def UpdateLift(self, state='stay'):
for floor_no in range(self.max_floors+1):
self.floors[floor_no].setState('clear')
if self.actual_floor > self.max_floors:
self.floors[self.max_floors].setState(state)
else:
self.floors[self.actual_floor].setState(state)
self.canvas.itemconfig(self.floor_display, text=self.actual_floor)
def setState(self, state):
self.canvas.itemconfig(self.arrow_up, text=u'\N{BLACK UP-POINTING TRIANGLE}', fill="black")
self.canvas.itemconfig(self.arrow_down, text=u'\N{BLACK DOWN-POINTING TRIANGLE}', fill="black")
if state == "moveup":
self.canvas.itemconfig(self.arrow_up, text=u'\N{BLACK UP-POINTING TRIANGLE}', fill="green")
if state == "movedown":
self.canvas.itemconfig(self.arrow_down, text=u'\N{BLACK DOWN-POINTING TRIANGLE}', fill="green")
if state == "open":
self.canvas.itemconfig(self.arrow_up, text=u'\N{BLACK UP-POINTING TRIANGLE}', fill="blue")
self.canvas.itemconfig(self.arrow_down, text=u'\N{BLACK DOWN-POINTING TRIANGLE}', fill="blue")
def OpenDoors(self):
pass
def CloseDoors(self):
pass
def MoveLiftToFloor(self, floor):
pass | mit |
aimas/TuniErp-8.0 | addons/account/test/test_parent_structure.py | 432 | 2108 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#
# TODO: move this in a YAML test with !python tag
#
import xmlrpclib
DB = 'training3'
USERID = 1
USERPASS = 'admin'
sock = xmlrpclib.ServerProxy('http://%s:%s/xmlrpc/object' % ('localhost',8069))
ids = sock.execute(DB, USERID, USERPASS, 'account.account', 'search', [], {})
account_lists = sock.execute(DB, USERID, USERPASS, 'account.account', 'read', ids, ['parent_id','parent_left','parent_right'])
accounts = dict(map(lambda x: (x['id'],x), account_lists))
for a in account_lists:
if a['parent_id']:
assert a['parent_left'] > accounts[a['parent_id'][0]]['parent_left']
assert a['parent_right'] < accounts[a['parent_id'][0]]['parent_right']
assert a['parent_left'] < a['parent_right']
for a2 in account_lists:
assert not ((a2['parent_right']>a['parent_left']) and
(a2['parent_left']<a['parent_left']) and
(a2['parent_right']<a['parent_right']))
if a2['parent_id']==a['id']:
assert (a2['parent_left']>a['parent_left']) and (a2['parent_right']<a['parent_right'])
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/third_party/cython/src/Cython/Compiler/MemoryView.py | 95 | 33761 | from Errors import CompileError, error
import ExprNodes
from ExprNodes import IntNode, NameNode, AttributeNode
import Options
from Code import UtilityCode, TempitaUtilityCode
from UtilityCode import CythonUtilityCode
import Buffer
import PyrexTypes
import ModuleNode
START_ERR = "Start must not be given."
STOP_ERR = "Axis specification only allowed in the 'step' slot."
STEP_ERR = "Step must be omitted, 1, or a valid specifier."
BOTH_CF_ERR = "Cannot specify an array that is both C and Fortran contiguous."
INVALID_ERR = "Invalid axis specification."
NOT_CIMPORTED_ERR = "Variable was not cimported from cython.view"
EXPR_ERR = "no expressions allowed in axis spec, only names and literals."
CF_ERR = "Invalid axis specification for a C/Fortran contiguous array."
ERR_UNINITIALIZED = ("Cannot check if memoryview %s is initialized without the "
"GIL, consider using initializedcheck(False)")
def err_if_nogil_initialized_check(pos, env, name='variable'):
"This raises an exception at runtime now"
pass
#if env.nogil and env.directives['initializedcheck']:
#error(pos, ERR_UNINITIALIZED % name)
def concat_flags(*flags):
return "(%s)" % "|".join(flags)
format_flag = "PyBUF_FORMAT"
memview_c_contiguous = "(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT | PyBUF_WRITABLE)"
memview_f_contiguous = "(PyBUF_F_CONTIGUOUS | PyBUF_FORMAT | PyBUF_WRITABLE)"
memview_any_contiguous = "(PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT | PyBUF_WRITABLE)"
memview_full_access = "PyBUF_FULL"
#memview_strided_access = "PyBUF_STRIDED"
memview_strided_access = "PyBUF_RECORDS"
MEMVIEW_DIRECT = '__Pyx_MEMVIEW_DIRECT'
MEMVIEW_PTR = '__Pyx_MEMVIEW_PTR'
MEMVIEW_FULL = '__Pyx_MEMVIEW_FULL'
MEMVIEW_CONTIG = '__Pyx_MEMVIEW_CONTIG'
MEMVIEW_STRIDED= '__Pyx_MEMVIEW_STRIDED'
MEMVIEW_FOLLOW = '__Pyx_MEMVIEW_FOLLOW'
_spec_to_const = {
'direct' : MEMVIEW_DIRECT,
'ptr' : MEMVIEW_PTR,
'full' : MEMVIEW_FULL,
'contig' : MEMVIEW_CONTIG,
'strided': MEMVIEW_STRIDED,
'follow' : MEMVIEW_FOLLOW,
}
_spec_to_abbrev = {
'direct' : 'd',
'ptr' : 'p',
'full' : 'f',
'contig' : 'c',
'strided' : 's',
'follow' : '_',
}
memslice_entry_init = "{ 0, 0, { 0 }, { 0 }, { 0 } }"
memview_name = u'memoryview'
memview_typeptr_cname = '__pyx_memoryview_type'
memview_objstruct_cname = '__pyx_memoryview_obj'
memviewslice_cname = u'__Pyx_memviewslice'
def put_init_entry(mv_cname, code):
code.putln("%s.data = NULL;" % mv_cname)
code.putln("%s.memview = NULL;" % mv_cname)
def mangle_dtype_name(dtype):
# a dumb wrapper for now; move Buffer.mangle_dtype_name in here later?
import Buffer
return Buffer.mangle_dtype_name(dtype)
#def axes_to_str(axes):
# return "".join([access[0].upper()+packing[0] for (access, packing) in axes])
def put_acquire_memoryviewslice(lhs_cname, lhs_type, lhs_pos, rhs, code,
have_gil=False, first_assignment=True):
"We can avoid decreffing the lhs if we know it is the first assignment"
assert rhs.type.is_memoryviewslice
pretty_rhs = rhs.result_in_temp() or rhs.is_simple()
if pretty_rhs:
rhstmp = rhs.result()
else:
rhstmp = code.funcstate.allocate_temp(lhs_type, manage_ref=False)
code.putln("%s = %s;" % (rhstmp, rhs.result_as(lhs_type)))
# Allow uninitialized assignment
#code.putln(code.put_error_if_unbound(lhs_pos, rhs.entry))
put_assign_to_memviewslice(lhs_cname, rhs, rhstmp, lhs_type, code,
have_gil=have_gil, first_assignment=first_assignment)
if not pretty_rhs:
code.funcstate.release_temp(rhstmp)
def put_assign_to_memviewslice(lhs_cname, rhs, rhs_cname, memviewslicetype, code,
have_gil=False, first_assignment=False):
if not first_assignment:
code.put_xdecref_memoryviewslice(lhs_cname, have_gil=have_gil)
if not rhs.result_in_temp():
rhs.make_owned_memoryviewslice(code)
code.putln("%s = %s;" % (lhs_cname, rhs_cname))
def get_buf_flags(specs):
is_c_contig, is_f_contig = is_cf_contig(specs)
if is_c_contig:
return memview_c_contiguous
elif is_f_contig:
return memview_f_contiguous
access, packing = zip(*specs)
if 'full' in access or 'ptr' in access:
return memview_full_access
else:
return memview_strided_access
def insert_newaxes(memoryviewtype, n):
axes = [('direct', 'strided')] * n
axes.extend(memoryviewtype.axes)
return PyrexTypes.MemoryViewSliceType(memoryviewtype.dtype, axes)
def broadcast_types(src, dst):
n = abs(src.ndim - dst.ndim)
if src.ndim < dst.ndim:
return insert_newaxes(src, n), dst
else:
return src, insert_newaxes(dst, n)
def src_conforms_to_dst(src, dst, broadcast=False):
'''
returns True if src conforms to dst, False otherwise.
If conformable, the types are the same, the ndims are equal, and each axis spec is conformable.
Any packing/access spec is conformable to itself.
'direct' and 'ptr' are conformable to 'full'.
'contig' and 'follow' are conformable to 'strided'.
Any other combo is not conformable.
'''
if src.dtype != dst.dtype:
return False
if src.ndim != dst.ndim:
if broadcast:
src, dst = broadcast_types(src, dst)
else:
return False
for src_spec, dst_spec in zip(src.axes, dst.axes):
src_access, src_packing = src_spec
dst_access, dst_packing = dst_spec
if src_access != dst_access and dst_access != 'full':
return False
if src_packing != dst_packing and dst_packing != 'strided':
return False
return True
def valid_memslice_dtype(dtype, i=0):
"""
Return whether type dtype can be used as the base type of a
memoryview slice.
We support structs, numeric types and objects
"""
if dtype.is_complex and dtype.real_type.is_int:
return False
if dtype is PyrexTypes.c_bint_type:
return False
if dtype.is_struct and dtype.kind == 'struct':
for member in dtype.scope.var_entries:
if not valid_memslice_dtype(member.type):
return False
return True
return (
dtype.is_error or
# Pointers are not valid (yet)
# (dtype.is_ptr and valid_memslice_dtype(dtype.base_type)) or
(dtype.is_array and i < 8 and
valid_memslice_dtype(dtype.base_type, i + 1)) or
dtype.is_numeric or
dtype.is_pyobject or
dtype.is_fused or # accept this as it will be replaced by specializations later
(dtype.is_typedef and valid_memslice_dtype(dtype.typedef_base_type))
)
def validate_memslice_dtype(pos, dtype):
if not valid_memslice_dtype(dtype):
error(pos, "Invalid base type for memoryview slice: %s" % dtype)
class MemoryViewSliceBufferEntry(Buffer.BufferEntry):
def __init__(self, entry):
self.entry = entry
self.type = entry.type
self.cname = entry.cname
self.buf_ptr = "%s.data" % self.cname
dtype = self.entry.type.dtype
dtype = PyrexTypes.CPtrType(dtype)
self.buf_ptr_type = dtype
def get_buf_suboffsetvars(self):
return self._for_all_ndim("%s.suboffsets[%d]")
def get_buf_stridevars(self):
return self._for_all_ndim("%s.strides[%d]")
def get_buf_shapevars(self):
return self._for_all_ndim("%s.shape[%d]")
def generate_buffer_lookup_code(self, code, index_cnames):
axes = [(dim, index_cnames[dim], access, packing)
for dim, (access, packing) in enumerate(self.type.axes)]
return self._generate_buffer_lookup_code(code, axes)
def _generate_buffer_lookup_code(self, code, axes, cast_result=True):
bufp = self.buf_ptr
type_decl = self.type.dtype.declaration_code("")
for dim, index, access, packing in axes:
shape = "%s.shape[%d]" % (self.cname, dim)
stride = "%s.strides[%d]" % (self.cname, dim)
suboffset = "%s.suboffsets[%d]" % (self.cname, dim)
flag = get_memoryview_flag(access, packing)
if flag in ("generic", "generic_contiguous"):
# Note: we cannot do cast tricks to avoid stride multiplication
# for generic_contiguous, as we may have to do (dtype *)
# or (dtype **) arithmetic, we won't know which unless
# we check suboffsets
code.globalstate.use_utility_code(memviewslice_index_helpers)
bufp = ('__pyx_memviewslice_index_full(%s, %s, %s, %s)' %
(bufp, index, stride, suboffset))
elif flag == "indirect":
bufp = "(%s + %s * %s)" % (bufp, index, stride)
bufp = ("(*((char **) %s) + %s)" % (bufp, suboffset))
elif flag == "indirect_contiguous":
# Note: we do char ** arithmetic
bufp = "(*((char **) %s + %s) + %s)" % (bufp, index, suboffset)
elif flag == "strided":
bufp = "(%s + %s * %s)" % (bufp, index, stride)
else:
assert flag == 'contiguous', flag
bufp = '((char *) (((%s *) %s) + %s))' % (type_decl, bufp, index)
bufp = '( /* dim=%d */ %s )' % (dim, bufp)
if cast_result:
return "((%s *) %s)" % (type_decl, bufp)
return bufp
def generate_buffer_slice_code(self, code, indices, dst, have_gil,
have_slices, directives):
"""
Slice a memoryviewslice.
indices - list of index nodes. If not a SliceNode, or NoneNode,
then it must be coercible to Py_ssize_t
Simply call __pyx_memoryview_slice_memviewslice with the right
arguments.
"""
new_ndim = 0
src = self.cname
def load_slice_util(name, dict):
proto, impl = TempitaUtilityCode.load_as_string(
name, "MemoryView_C.c", context=dict)
return impl
all_dimensions_direct = True
for access, packing in self.type.axes:
if access != 'direct':
all_dimensions_direct = False
break
no_suboffset_dim = all_dimensions_direct and not have_slices
if not no_suboffset_dim:
suboffset_dim = code.funcstate.allocate_temp(
PyrexTypes.c_int_type, False)
code.putln("%s = -1;" % suboffset_dim)
code.putln("%(dst)s.data = %(src)s.data;" % locals())
code.putln("%(dst)s.memview = %(src)s.memview;" % locals())
code.put_incref_memoryviewslice(dst)
dim = -1
for index in indices:
error_goto = code.error_goto(index.pos)
if not index.is_none:
dim += 1
access, packing = self.type.axes[dim]
if isinstance(index, ExprNodes.SliceNode):
# slice, unspecified dimension, or part of ellipsis
d = locals()
for s in "start stop step".split():
idx = getattr(index, s)
have_idx = d['have_' + s] = not idx.is_none
if have_idx:
d[s] = idx.result()
else:
d[s] = "0"
if (not d['have_start'] and
not d['have_stop'] and
not d['have_step']):
# full slice (:), simply copy over the extent, stride
# and suboffset. Also update suboffset_dim if needed
d['access'] = access
code.put(load_slice_util("SimpleSlice", d))
else:
code.put(load_slice_util("ToughSlice", d))
new_ndim += 1
elif index.is_none:
# newaxis
attribs = [('shape', 1), ('strides', 0), ('suboffsets', -1)]
for attrib, value in attribs:
code.putln("%s.%s[%d] = %d;" % (dst, attrib, new_ndim, value))
new_ndim += 1
else:
# normal index
idx = index.result()
if access == 'direct':
indirect = False
else:
indirect = True
generic = (access == 'full')
if new_ndim != 0:
return error(index.pos,
"All preceding dimensions must be "
"indexed and not sliced")
wraparound = int(directives['wraparound'])
boundscheck = int(directives['boundscheck'])
d = locals()
code.put(load_slice_util("SliceIndex", d))
if not no_suboffset_dim:
code.funcstate.release_temp(suboffset_dim)
def empty_slice(pos):
none = ExprNodes.NoneNode(pos)
return ExprNodes.SliceNode(pos, start=none,
stop=none, step=none)
def unellipsify(indices, newaxes, ndim):
result = []
seen_ellipsis = False
have_slices = False
n_indices = len(indices) - len(newaxes)
for index in indices:
if isinstance(index, ExprNodes.EllipsisNode):
have_slices = True
full_slice = empty_slice(index.pos)
if seen_ellipsis:
result.append(full_slice)
else:
nslices = ndim - n_indices + 1
result.extend([full_slice] * nslices)
seen_ellipsis = True
else:
have_slices = (have_slices or
isinstance(index, ExprNodes.SliceNode) or
index.is_none)
result.append(index)
result_length = len(result) - len(newaxes)
if result_length < ndim:
have_slices = True
nslices = ndim - result_length
result.extend([empty_slice(indices[-1].pos)] * nslices)
return have_slices, result
def get_memoryview_flag(access, packing):
if access == 'full' and packing in ('strided', 'follow'):
return 'generic'
elif access == 'full' and packing == 'contig':
return 'generic_contiguous'
elif access == 'ptr' and packing in ('strided', 'follow'):
return 'indirect'
elif access == 'ptr' and packing == 'contig':
return 'indirect_contiguous'
elif access == 'direct' and packing in ('strided', 'follow'):
return 'strided'
else:
assert (access, packing) == ('direct', 'contig'), (access, packing)
return 'contiguous'
def get_is_contig_func_name(c_or_f, ndim):
return "__pyx_memviewslice_is_%s_contig%d" % (c_or_f, ndim)
def get_is_contig_utility(c_contig, ndim):
C = dict(context, ndim=ndim)
if c_contig:
utility = load_memview_c_utility("MemviewSliceIsCContig", C,
requires=[is_contig_utility])
else:
utility = load_memview_c_utility("MemviewSliceIsFContig", C,
requires=[is_contig_utility])
return utility
def copy_src_to_dst_cname():
return "__pyx_memoryview_copy_contents"
def verify_direct_dimensions(node):
for access, packing in node.type.axes:
if access != 'direct':
error(self.pos, "All dimensions must be direct")
def copy_broadcast_memview_src_to_dst(src, dst, code):
"""
Copy the contents of slice src to slice dst. Does not support indirect
slices.
"""
verify_direct_dimensions(src)
verify_direct_dimensions(dst)
code.putln(code.error_goto_if_neg(
"%s(%s, %s, %d, %d, %d)" % (copy_src_to_dst_cname(),
src.result(), dst.result(),
src.type.ndim, dst.type.ndim,
dst.type.dtype.is_pyobject),
dst.pos))
def get_1d_fill_scalar_func(type, code):
dtype = type.dtype
type_decl = dtype.declaration_code("")
dtype_name = mangle_dtype_name(dtype)
context = dict(dtype_name=dtype_name, type_decl=type_decl)
utility = load_memview_c_utility("FillStrided1DScalar", context)
code.globalstate.use_utility_code(utility)
return '__pyx_fill_slice_%s' % dtype_name
def assign_scalar(dst, scalar, code):
"""
Assign a scalar to a slice. dst must be a temp, scalar will be assigned
to a correct type and not just something assignable.
"""
verify_direct_dimensions(dst)
dtype = dst.type.dtype
type_decl = dtype.declaration_code("")
slice_decl = dst.type.declaration_code("")
code.begin_block()
code.putln("%s __pyx_temp_scalar = %s;" % (type_decl, scalar.result()))
if dst.result_in_temp() or (dst.base.is_name and
isinstance(dst.index, ExprNodes.EllipsisNode)):
dst_temp = dst.result()
else:
code.putln("%s __pyx_temp_slice = %s;" % (slice_decl, dst.result()))
dst_temp = "__pyx_temp_slice"
# with slice_iter(dst.type, dst_temp, dst.type.ndim, code) as p:
slice_iter_obj = slice_iter(dst.type, dst_temp, dst.type.ndim, code)
p = slice_iter_obj.start_loops()
if dtype.is_pyobject:
code.putln("Py_DECREF(*(PyObject **) %s);" % p)
code.putln("*((%s *) %s) = __pyx_temp_scalar;" % (type_decl, p))
if dtype.is_pyobject:
code.putln("Py_INCREF(__pyx_temp_scalar);")
slice_iter_obj.end_loops()
code.end_block()
def slice_iter(slice_type, slice_temp, ndim, code):
if slice_type.is_c_contig or slice_type.is_f_contig:
return ContigSliceIter(slice_type, slice_temp, ndim, code)
else:
return StridedSliceIter(slice_type, slice_temp, ndim, code)
class SliceIter(object):
def __init__(self, slice_type, slice_temp, ndim, code):
self.slice_type = slice_type
self.slice_temp = slice_temp
self.code = code
self.ndim = ndim
class ContigSliceIter(SliceIter):
def start_loops(self):
code = self.code
code.begin_block()
type_decl = self.slice_type.dtype.declaration_code("")
total_size = ' * '.join("%s.shape[%d]" % (self.slice_temp, i)
for i in range(self.ndim))
code.putln("Py_ssize_t __pyx_temp_extent = %s;" % total_size)
code.putln("Py_ssize_t __pyx_temp_idx;")
code.putln("%s *__pyx_temp_pointer = (%s *) %s.data;" % (
type_decl, type_decl, self.slice_temp))
code.putln("for (__pyx_temp_idx = 0; "
"__pyx_temp_idx < __pyx_temp_extent; "
"__pyx_temp_idx++) {")
return "__pyx_temp_pointer"
def end_loops(self):
self.code.putln("__pyx_temp_pointer += 1;")
self.code.putln("}")
self.code.end_block()
class StridedSliceIter(SliceIter):
def start_loops(self):
code = self.code
code.begin_block()
for i in range(self.ndim):
t = i, self.slice_temp, i
code.putln("Py_ssize_t __pyx_temp_extent_%d = %s.shape[%d];" % t)
code.putln("Py_ssize_t __pyx_temp_stride_%d = %s.strides[%d];" % t)
code.putln("char *__pyx_temp_pointer_%d;" % i)
code.putln("Py_ssize_t __pyx_temp_idx_%d;" % i)
code.putln("__pyx_temp_pointer_0 = %s.data;" % self.slice_temp)
for i in range(self.ndim):
if i > 0:
code.putln("__pyx_temp_pointer_%d = __pyx_temp_pointer_%d;" % (i, i - 1))
code.putln("for (__pyx_temp_idx_%d = 0; "
"__pyx_temp_idx_%d < __pyx_temp_extent_%d; "
"__pyx_temp_idx_%d++) {" % (i, i, i, i))
return "__pyx_temp_pointer_%d" % (self.ndim - 1)
def end_loops(self):
code = self.code
for i in range(self.ndim - 1, -1, -1):
code.putln("__pyx_temp_pointer_%d += __pyx_temp_stride_%d;" % (i, i))
code.putln("}")
code.end_block()
def copy_c_or_fortran_cname(memview):
if memview.is_c_contig:
c_or_f = 'c'
else:
c_or_f = 'f'
return "__pyx_memoryview_copy_slice_%s_%s" % (
memview.specialization_suffix(), c_or_f)
def get_copy_new_utility(pos, from_memview, to_memview):
if from_memview.dtype != to_memview.dtype:
return error(pos, "dtypes must be the same!")
if len(from_memview.axes) != len(to_memview.axes):
return error(pos, "number of dimensions must be same")
if not (to_memview.is_c_contig or to_memview.is_f_contig):
return error(pos, "to_memview must be c or f contiguous.")
for (access, packing) in from_memview.axes:
if access != 'direct':
return error(
pos, "cannot handle 'full' or 'ptr' access at this time.")
if to_memview.is_c_contig:
mode = 'c'
contig_flag = memview_c_contiguous
elif to_memview.is_f_contig:
mode = 'fortran'
contig_flag = memview_f_contiguous
return load_memview_c_utility(
"CopyContentsUtility",
context=dict(
context,
mode=mode,
dtype_decl=to_memview.dtype.declaration_code(''),
contig_flag=contig_flag,
ndim=to_memview.ndim,
func_cname=copy_c_or_fortran_cname(to_memview),
dtype_is_object=int(to_memview.dtype.is_pyobject)),
requires=[copy_contents_new_utility])
def get_axes_specs(env, axes):
'''
get_axes_specs(env, axes) -> list of (access, packing) specs for each axis.
access is one of 'full', 'ptr' or 'direct'
packing is one of 'contig', 'strided' or 'follow'
'''
cythonscope = env.global_scope().context.cython_scope
cythonscope.load_cythonscope()
viewscope = cythonscope.viewscope
access_specs = tuple([viewscope.lookup(name)
for name in ('full', 'direct', 'ptr')])
packing_specs = tuple([viewscope.lookup(name)
for name in ('contig', 'strided', 'follow')])
is_f_contig, is_c_contig = False, False
default_access, default_packing = 'direct', 'strided'
cf_access, cf_packing = default_access, 'follow'
axes_specs = []
# analyse all axes.
for idx, axis in enumerate(axes):
if not axis.start.is_none:
raise CompileError(axis.start.pos, START_ERR)
if not axis.stop.is_none:
raise CompileError(axis.stop.pos, STOP_ERR)
if axis.step.is_none:
axes_specs.append((default_access, default_packing))
elif isinstance(axis.step, IntNode):
# the packing for the ::1 axis is contiguous,
# all others are cf_packing.
if axis.step.compile_time_value(env) != 1:
raise CompileError(axis.step.pos, STEP_ERR)
axes_specs.append((cf_access, 'cfcontig'))
elif isinstance(axis.step, (NameNode, AttributeNode)):
entry = _get_resolved_spec(env, axis.step)
if entry.name in view_constant_to_access_packing:
axes_specs.append(view_constant_to_access_packing[entry.name])
else:
raise CompilerError(axis.step.pos, INVALID_ERR)
else:
raise CompileError(axis.step.pos, INVALID_ERR)
# First, find out if we have a ::1 somewhere
contig_dim = 0
is_contig = False
for idx, (access, packing) in enumerate(axes_specs):
if packing == 'cfcontig':
if is_contig:
raise CompileError(axis.step.pos, BOTH_CF_ERR)
contig_dim = idx
axes_specs[idx] = (access, 'contig')
is_contig = True
if is_contig:
# We have a ::1 somewhere, see if we're C or Fortran contiguous
if contig_dim == len(axes) - 1:
is_c_contig = True
else:
is_f_contig = True
if contig_dim and not axes_specs[contig_dim - 1][0] in ('full', 'ptr'):
raise CompileError(axes[contig_dim].pos,
"Fortran contiguous specifier must follow an indirect dimension")
if is_c_contig:
# Contiguous in the last dimension, find the last indirect dimension
contig_dim = -1
for idx, (access, packing) in enumerate(reversed(axes_specs)):
if access in ('ptr', 'full'):
contig_dim = len(axes) - idx - 1
# Replace 'strided' with 'follow' for any dimension following the last
# indirect dimension, the first dimension or the dimension following
# the ::1.
# int[::indirect, ::1, :, :]
# ^ ^
# int[::indirect, :, :, ::1]
# ^ ^
start = contig_dim + 1
stop = len(axes) - is_c_contig
for idx, (access, packing) in enumerate(axes_specs[start:stop]):
idx = contig_dim + 1 + idx
if access != 'direct':
raise CompileError(axes[idx].pos,
"Indirect dimension may not follow "
"Fortran contiguous dimension")
if packing == 'contig':
raise CompileError(axes[idx].pos,
"Dimension may not be contiguous")
axes_specs[idx] = (access, cf_packing)
if is_c_contig:
# For C contiguity, we need to fix the 'contig' dimension
# after the loop
a, p = axes_specs[-1]
axes_specs[-1] = a, 'contig'
validate_axes_specs([axis.start.pos for axis in axes],
axes_specs,
is_c_contig,
is_f_contig)
return axes_specs
def validate_axes(pos, axes):
if len(axes) >= Options.buffer_max_dims:
error(pos, "More dimensions than the maximum number"
" of buffer dimensions were used.")
return False
return True
def all(it):
for item in it:
if not item:
return False
return True
def is_cf_contig(specs):
is_c_contig = is_f_contig = False
if (len(specs) == 1 and specs == [('direct', 'contig')]):
is_c_contig = True
elif (specs[-1] == ('direct','contig') and
all([axis == ('direct','follow') for axis in specs[:-1]])):
# c_contiguous: 'follow', 'follow', ..., 'follow', 'contig'
is_c_contig = True
elif (len(specs) > 1 and
specs[0] == ('direct','contig') and
all([axis == ('direct','follow') for axis in specs[1:]])):
# f_contiguous: 'contig', 'follow', 'follow', ..., 'follow'
is_f_contig = True
return is_c_contig, is_f_contig
def get_mode(specs):
is_c_contig, is_f_contig = is_cf_contig(specs)
if is_c_contig:
return 'c'
elif is_f_contig:
return 'fortran'
for access, packing in specs:
if access in ('ptr', 'full'):
return 'full'
return 'strided'
view_constant_to_access_packing = {
'generic': ('full', 'strided'),
'strided': ('direct', 'strided'),
'indirect': ('ptr', 'strided'),
'generic_contiguous': ('full', 'contig'),
'contiguous': ('direct', 'contig'),
'indirect_contiguous': ('ptr', 'contig'),
}
def validate_axes_specs(positions, specs, is_c_contig, is_f_contig):
packing_specs = ('contig', 'strided', 'follow')
access_specs = ('direct', 'ptr', 'full')
# is_c_contig, is_f_contig = is_cf_contig(specs)
has_contig = has_follow = has_strided = has_generic_contig = False
last_indirect_dimension = -1
for idx, (access, packing) in enumerate(specs):
if access == 'ptr':
last_indirect_dimension = idx
for idx, pos, (access, packing) in zip(xrange(len(specs)), positions, specs):
if not (access in access_specs and
packing in packing_specs):
raise CompileError(pos, "Invalid axes specification.")
if packing == 'strided':
has_strided = True
elif packing == 'contig':
if has_contig:
raise CompileError(pos, "Only one direct contiguous "
"axis may be specified.")
valid_contig_dims = last_indirect_dimension + 1, len(specs) - 1
if idx not in valid_contig_dims and access != 'ptr':
if last_indirect_dimension + 1 != len(specs) - 1:
dims = "dimensions %d and %d" % valid_contig_dims
else:
dims = "dimension %d" % valid_contig_dims[0]
raise CompileError(pos, "Only %s may be contiguous and direct" % dims)
has_contig = access != 'ptr'
elif packing == 'follow':
if has_strided:
raise CompileError(pos, "A memoryview cannot have both follow and strided axis specifiers.")
if not (is_c_contig or is_f_contig):
raise CompileError(pos, "Invalid use of the follow specifier.")
if access in ('ptr', 'full'):
has_strided = False
def _get_resolved_spec(env, spec):
# spec must be a NameNode or an AttributeNode
if isinstance(spec, NameNode):
return _resolve_NameNode(env, spec)
elif isinstance(spec, AttributeNode):
return _resolve_AttributeNode(env, spec)
else:
raise CompileError(spec.pos, INVALID_ERR)
def _resolve_NameNode(env, node):
try:
resolved_name = env.lookup(node.name).name
except AttributeError:
raise CompileError(node.pos, INVALID_ERR)
viewscope = env.global_scope().context.cython_scope.viewscope
entry = viewscope.lookup(resolved_name)
if entry is None:
raise CompileError(node.pos, NOT_CIMPORTED_ERR)
return entry
def _resolve_AttributeNode(env, node):
path = []
while isinstance(node, AttributeNode):
path.insert(0, node.attribute)
node = node.obj
if isinstance(node, NameNode):
path.insert(0, node.name)
else:
raise CompileError(node.pos, EXPR_ERR)
modnames = path[:-1]
# must be at least 1 module name, o/w not an AttributeNode.
assert modnames
scope = env
for modname in modnames:
mod = scope.lookup(modname)
if not mod or not mod.as_module:
raise CompileError(
node.pos, "undeclared name not builtin: %s" % modname)
scope = mod.as_module
entry = scope.lookup(path[-1])
if not entry:
raise CompileError(node.pos, "No such attribute '%s'" % path[-1])
return entry
#
### Utility loading
#
def load_memview_cy_utility(util_code_name, context=None, **kwargs):
return CythonUtilityCode.load(util_code_name, "MemoryView.pyx",
context=context, **kwargs)
def load_memview_c_utility(util_code_name, context=None, **kwargs):
if context is None:
return UtilityCode.load(util_code_name, "MemoryView_C.c", **kwargs)
else:
return TempitaUtilityCode.load(util_code_name, "MemoryView_C.c",
context=context, **kwargs)
def use_cython_array_utility_code(env):
cython_scope = env.global_scope().context.cython_scope
cython_scope.load_cythonscope()
cython_scope.viewscope.lookup('array_cwrapper').used = True
context = {
'memview_struct_name': memview_objstruct_cname,
'max_dims': Options.buffer_max_dims,
'memviewslice_name': memviewslice_cname,
'memslice_init': memslice_entry_init,
}
memviewslice_declare_code = load_memview_c_utility(
"MemviewSliceStruct",
proto_block='utility_code_proto_before_types',
context=context,
requires=[])
atomic_utility = load_memview_c_utility("Atomics", context,
proto_block='utility_code_proto_before_types')
memviewslice_init_code = load_memview_c_utility(
"MemviewSliceInit",
context=dict(context, BUF_MAX_NDIMS=Options.buffer_max_dims),
requires=[memviewslice_declare_code,
Buffer.acquire_utility_code,
atomic_utility],
)
memviewslice_index_helpers = load_memview_c_utility("MemviewSliceIndex")
typeinfo_to_format_code = load_memview_cy_utility(
"BufferFormatFromTypeInfo", requires=[Buffer._typeinfo_to_format_code])
is_contig_utility = load_memview_c_utility("MemviewSliceIsContig", context)
overlapping_utility = load_memview_c_utility("OverlappingSlices", context)
copy_contents_new_utility = load_memview_c_utility(
"MemviewSliceCopyTemplate",
context,
requires=[], # require cython_array_utility_code
)
view_utility_code = load_memview_cy_utility(
"View.MemoryView",
context=context,
requires=[Buffer.GetAndReleaseBufferUtilityCode(),
Buffer.buffer_struct_declare_code,
Buffer.empty_bufstruct_utility,
memviewslice_init_code,
is_contig_utility,
overlapping_utility,
copy_contents_new_utility,
ModuleNode.capsule_utility_code],
)
view_utility_whitelist = ('array', 'memoryview', 'array_cwrapper',
'generic', 'strided', 'indirect', 'contiguous',
'indirect_contiguous')
memviewslice_declare_code.requires.append(view_utility_code)
copy_contents_new_utility.requires.append(view_utility_code) | mit |
FranMachio/plugin.video.Machio.fran | servers/jumbofiles.py | 44 | 2439 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para jumbofiles
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[jumbofiles.py] get_video_url(page_url='%s')" % page_url)
video_urls = []
data = scrapertools.cache_page(page_url)
# op=download2&id=oiyetnk5vwzf&rand=m2080mem&referer=&method_free=&method_premium=&down_direct=1&x=64&y=5
op = scrapertools.get_match(data,'<input type="hidden" name="op" value="([^"]+)">')
id = scrapertools.get_match(data,'<input type="hidden" name="id" value="([^"]+)">')
random_number = scrapertools.get_match(data,'<input type="hidden" name="rand" value="([^"]+)">')
down_direct = scrapertools.get_match(data,'<input type="hidden" name="down_direct" value="([^"]+)">')
post = "op=%s&id=%s&rand=%s&referer=&method_free=&method_premium=&down_direct=%s&x=64&y=5" % (op,id,random_number,down_direct)
data = scrapertools.cache_page(page_url,post=post)
#logger.info("data="+data)
#<FORM METHOD="LINK" ACTION="http://www96.jumbofiles.com:443/d/jbswjaebcr4eam62sd6ue2bb47yo6ldj5pcbc6wed6qteh73vjzcu/ORNE.avi">
video_url = scrapertools.get_match(data,'<FORM METHOD="LINK" ACTION="([^"]+)">')
video_urls.append( [ video_url[-4:]+" [jumbofiles]" , video_url ] )
for video_url in video_urls:
logger.info("[jumbofiles.py] %s - %s" % (video_url[0],video_url[1]))
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
# http://jumbofiles.com/oiyetnk5vwzf
patronvideos = '(http://jumbofiles.com/[0-9a-z]+)'
logger.info("[jumbofiles.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[jumbofiles]"
url = match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'jumbofiles' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
| gpl-2.0 |
popazerty/bnigma2 | lib/python/Plugins/Extensions/DVDBurn/TitleProperties.py | 14 | 8424 | from Screens.Screen import Screen
from Screens.ChoiceBox import ChoiceBox
from Screens.InputBox import InputBox
from Screens.MessageBox import MessageBox
from Screens.HelpMenu import HelpableScreen
from Components.ActionMap import HelpableActionMap, ActionMap
from Components.Sources.List import List
from Components.Sources.StaticText import StaticText
from Components.Sources.Progress import Progress
from Components.FileList import FileList
from Components.Pixmap import Pixmap
from enigma import ePicLoad
from Tools.Directories import fileExists, resolveFilename, SCOPE_PLUGINS, SCOPE_FONTS, SCOPE_HDD
from Components.config import config, getConfigListEntry, ConfigInteger, ConfigSubsection, ConfigSelection
from Components.ConfigList import ConfigListScreen
from Components.AVSwitch import AVSwitch
import DVDTitle
class TitleProperties(Screen,ConfigListScreen):
skin = """
<screen name="TitleProperties" position="center,center" size="560,445" title="Properties of current title" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget source="serviceinfo" render="Label" position="10,46" size="350,144" font="Regular;18" />
<widget name="thumbnail" position="370,46" size="180,144" alphatest="on" />
<widget name="config" position="10,206" size="540,228" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session, parent, project, title_idx):
Screen.__init__(self, session)
self.parent = parent
self.project = project
self.title_idx = title_idx
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self["key_yellow"] = StaticText(_("Edit Title"))
self["key_blue"] = StaticText()
self["serviceinfo"] = StaticText()
self["thumbnail"] = Pixmap()
self.picload = ePicLoad()
self.picload.PictureData.get().append(self.paintThumbPixmapCB)
self.properties = project.titles[title_idx].properties
ConfigListScreen.__init__(self, [])
self.properties.crop = DVDTitle.ConfigFixedText("crop")
self.properties.autochapter.addNotifier(self.initConfigList)
self.properties.aspect.addNotifier(self.initConfigList)
for audiotrack in self.properties.audiotracks:
audiotrack.active.addNotifier(self.initConfigList)
self["setupActions"] = ActionMap(["SetupActions", "ColorActions"],
{
"green": self.exit,
"red": self.cancel,
"yellow": self.editTitle,
"cancel": self.cancel,
"ok": self.ok,
}, -2)
self.onShown.append(self.update)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(_("Properties of current title"))
def initConfigList(self, element=None):
try:
self.properties.position = ConfigInteger(default = self.title_idx+1, limits = (1, len(self.project.titles)))
title = self.project.titles[self.title_idx]
self.list = []
self.list.append(getConfigListEntry("DVD " + _("Track"), self.properties.position))
self.list.append(getConfigListEntry("DVD " + _("Title"), self.properties.menutitle))
self.list.append(getConfigListEntry("DVD " + _("Description"), self.properties.menusubtitle))
if config.usage.setup_level.index >= 2: # expert+
for audiotrack in self.properties.audiotracks:
DVB_aud = audiotrack.DVB_lang.getValue() or audiotrack.pid.getValue()
self.list.append(getConfigListEntry(_("burn audio track (%s)") % DVB_aud, audiotrack.active))
if audiotrack.active.getValue():
self.list.append(getConfigListEntry(_("audio track (%s) format") % DVB_aud, audiotrack.format))
self.list.append(getConfigListEntry(_("audio track (%s) language") % DVB_aud, audiotrack.language))
self.list.append(getConfigListEntry("DVD " + _("Aspect Ratio"), self.properties.aspect))
if self.properties.aspect.getValue() == "16:9":
self.list.append(getConfigListEntry("DVD " + "widescreen", self.properties.widescreen))
else:
self.list.append(getConfigListEntry("DVD " + "widescreen", self.properties.crop))
if len(title.chaptermarks) == 0:
self.list.append(getConfigListEntry(_("Auto chapter split every ? minutes (0=never)"), self.properties.autochapter))
infotext = "DVB " + _("Title") + ': ' + title.DVBname + "\n" + _("Description") + ': ' + title.DVBdescr + "\n" + _("Channel") + ': ' + title.DVBchannel + '\n' + _("Begin time") + title.formatDVDmenuText(": $D.$M.$Y, $T\n", self.title_idx+1)
chaptermarks = title.getChapterMarks(template="$h:$m:$s")
chapters_count = len(chaptermarks)
if chapters_count >= 1:
infotext += str(chapters_count+1) + ' ' + _("chapters") + ': '
infotext += ' / '.join(chaptermarks)
self["serviceinfo"].setText(infotext)
self["config"].setList(self.list)
except AttributeError:
pass
def editTitle(self):
self.parent.editTitle()
def update(self):
print "[onShown]"
self.initConfigList()
self.loadThumb()
def loadThumb(self):
thumbfile = self.project.titles[self.title_idx].inputfile.rsplit('.',1)[0] + ".png"
sc = AVSwitch().getFramebufferScale()
self.picload.setPara((self["thumbnail"].instance.size().width(), self["thumbnail"].instance.size().height(), sc[0], sc[1], False, 1, "#00000000"))
self.picload.startDecode(thumbfile)
def paintThumbPixmapCB(self, picInfo=None):
ptr = self.picload.getData()
if ptr != None:
self["thumbnail"].instance.setPixmap(ptr.__deref__())
def changedConfigList(self):
self.initConfigList()
def exit(self):
self.applySettings()
self.close()
def applySettings(self):
for x in self["config"].list:
x[1].save()
current_pos = self.title_idx+1
new_pos = self.properties.position.getValue()
if new_pos != current_pos:
print "title got repositioned from ", current_pos, "to", new_pos
swaptitle = self.project.titles.pop(current_pos-1)
self.project.titles.insert(new_pos-1, swaptitle)
def ok(self):
#key = self.keydict[self["config"].getCurrent()[1]]
#if key in self.project.filekeys:
#self.session.openWithCallback(self.FileBrowserClosed, FileBrowser, key, self.settings)
pass
def cancel(self):
self.close()
from Tools.ISO639 import LanguageCodes
class LanguageChoices():
def __init__(self):
from Components.Language import language as syslanguage
syslang = syslanguage.getLanguage()[:2]
self.langdict = { }
self.choices = []
for key, val in LanguageCodes.iteritems():
if len(key) == 2:
self.langdict[key] = val[0]
for key, val in self.langdict.iteritems():
if key not in (syslang, 'en'):
self.langdict[key] = val
self.choices.append((key, val))
self.choices.sort()
self.choices.insert(0,("nolang", ("unspecified")))
self.choices.insert(1,(syslang, self.langdict[syslang]))
if syslang != "en":
self.choices.insert(2,("en", self.langdict["en"]))
def getLanguage(self, DVB_lang):
DVB_lang = DVB_lang.lower()
for word in ("stereo", "audio", "description", "2ch", "dolby digital"):
DVB_lang = DVB_lang.replace(word,"").strip()
for key, val in LanguageCodes.iteritems():
if DVB_lang.find(key.lower()) == 0:
if len(key) == 2:
return key
else:
DVB_lang = (LanguageCodes[key])[0]
elif DVB_lang.find(val[0].lower()) > -1:
if len(key) == 2:
return key
else:
DVB_lang = (LanguageCodes[key])[0]
for key, val in self.langdict.iteritems():
if val == DVB_lang:
return key
return "nolang"
languageChoices = LanguageChoices()
| gpl-2.0 |
pculture/mirocommunity | localtv/templatetags/filters.py | 1 | 4151 | import datetime
import re
from bs4 import BeautifulSoup, Comment
from django.contrib.sites.models import Site
from django.template import Library
from django.utils.encoding import force_unicode
from django.utils.html import urlize
from django.utils.safestring import mark_safe
import lxml.html
register = Library()
@register.filter
def simpletimesince(value, arg=None):
"""Formats a date as the time since that date (i.e. "4 days, 6 hours")."""
from django.utils.timesince import timesince
if not value:
return u''
try:
if arg:
return timesince(value, arg)
return timesince(value, datetime.datetime.utcnow()).split(', ')[0]
except (ValueError, TypeError):
return u''
@register.filter
def sanitize(value, extra_filters=None):
"""
Sanitize the given HTML.
Based on code from:
* http://www.djangosnippets.org/snippets/1655/
* http://www.djangosnippets.org/snippets/205/
"""
if value is None:
return u''
if '<' not in value and '&#' not in value and \
re.search(r'&\w+;', value) is None: # no HTML
# convert plain-text links into HTML
return mark_safe(urlize(value,
nofollow=True,
autoescape=True).replace('\n', '<br/>'))
js_regex = re.compile(r'[\s]*(&#x.{1,7})?'.join(list('javascript')),
re.IGNORECASE)
allowed_tags = ('p i strong em b u a h1 h2 h3 h4 h5 h6 pre br img ul '
'ol li span').split()
allowed_attributes = 'href src style'.split()
whitelist = False
extra_tags = ()
extra_attributes = ()
if isinstance(extra_filters, basestring):
if '|' in extra_filters:
parts = extra_filters.split('|')
else:
parts = [extra_filters.split()]
if parts[0] == 'whitelist':
whitelist = True
parts = parts[1:]
extra_tags = parts[0].split()
if len(parts) > 1:
extra_attributes = parts[1].split()
elif extra_filters:
extra_tags = extra_filters
if whitelist:
allowed_tags, allowed_attributes = extra_tags, extra_attributes
else:
allowed_tags = set(allowed_tags) - set(extra_tags)
allowed_attributes = set(allowed_attributes) - set(extra_attributes)
soup = BeautifulSoup(value)
for comment in soup.find_all(text=lambda text: isinstance(text, Comment)):
# remove comments
comment.extract()
for tag in soup.find_all(True):
if tag.name not in allowed_tags:
tag.hidden = True
else:
tag.attrs = dict((key, js_regex.sub('', val))
for key, val in tag.attrs.iteritems()
if key in allowed_attributes)
return mark_safe(unicode(soup))
@register.filter
def wmode_transparent(value):
doc = lxml.html.fromstring('<div>' + value + '</div>')
# Find any object tag
tags = doc.cssselect('object')
for object_tag in tags:
WMODE_TRANSPARENT_PARAM = lxml.html.fragment_fromstring("""<param name="wmode" value="transparent"></param>""")
object_tag.insert(0, WMODE_TRANSPARENT_PARAM)
# Find any relevant flash embed
embeds = doc.cssselect('embed')
for embed in embeds:
if embed.get('type') == 'application/x-shockwave-flash':
embed.set('wmode', 'transparent')
wrapped_in_a_div = lxml.html.tostring(doc)
if (wrapped_in_a_div.startswith('<div>') and
wrapped_in_a_div.endswith('</div>')):
start = len('<div>')
end = - len('</div>')
return mark_safe(wrapped_in_a_div[start:end])
# else, uh, return the wrapped thing.
return mark_safe(wrapped_in_a_div)
@register.filter
def full_url(url):
"""
If necessary, adds protocol and host to a URL.
"""
url = force_unicode(url)
if not url:
return url
if not url.startswith(u'http://') or url.startswith(u'https://'):
site = Site.objects.get_current()
url = u'http://{host}{url}'.format(host=site.domain, url=url)
return url
| agpl-3.0 |
chineyting/project4-Info3180 | server/lib/werkzeug/security.py | 302 | 8407 | # -*- coding: utf-8 -*-
"""
werkzeug.security
~~~~~~~~~~~~~~~~~
Security related helpers such as secure password hashing tools.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
import hmac
import hashlib
import posixpath
import codecs
from struct import Struct
from random import SystemRandom
from operator import xor
from itertools import starmap
from werkzeug._compat import range_type, PY2, text_type, izip, to_bytes, \
string_types, to_native
SALT_CHARS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
DEFAULT_PBKDF2_ITERATIONS = 1000
_pack_int = Struct('>I').pack
_builtin_safe_str_cmp = getattr(hmac, 'compare_digest', None)
_sys_rng = SystemRandom()
_os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep]
if sep not in (None, '/'))
def _find_hashlib_algorithms():
algos = getattr(hashlib, 'algorithms', None)
if algos is None:
algos = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
rv = {}
for algo in algos:
func = getattr(hashlib, algo, None)
if func is not None:
rv[algo] = func
return rv
_hash_funcs = _find_hashlib_algorithms()
def pbkdf2_hex(data, salt, iterations=DEFAULT_PBKDF2_ITERATIONS,
keylen=None, hashfunc=None):
"""Like :func:`pbkdf2_bin` but returns a hex encoded string.
.. versionadded:: 0.9
:param data: the data to derive.
:param salt: the salt for the derivation.
:param iterations: the number of iterations.
:param keylen: the length of the resulting key. If not provided
the digest size will be used.
:param hashfunc: the hash function to use. This can either be the
string name of a known hash function or a function
from the hashlib module. Defaults to sha1.
"""
rv = pbkdf2_bin(data, salt, iterations, keylen, hashfunc)
return to_native(codecs.encode(rv, 'hex_codec'))
def pbkdf2_bin(data, salt, iterations=DEFAULT_PBKDF2_ITERATIONS,
keylen=None, hashfunc=None):
"""Returns a binary digest for the PBKDF2 hash algorithm of `data`
with the given `salt`. It iterates `iterations` time and produces a
key of `keylen` bytes. By default SHA-1 is used as hash function,
a different hashlib `hashfunc` can be provided.
.. versionadded:: 0.9
:param data: the data to derive.
:param salt: the salt for the derivation.
:param iterations: the number of iterations.
:param keylen: the length of the resulting key. If not provided
the digest size will be used.
:param hashfunc: the hash function to use. This can either be the
string name of a known hash function or a function
from the hashlib module. Defaults to sha1.
"""
if isinstance(hashfunc, string_types):
hashfunc = _hash_funcs[hashfunc]
elif not hashfunc:
hashfunc = hashlib.sha1
salt = to_bytes(salt)
mac = hmac.HMAC(to_bytes(data), None, hashfunc)
if not keylen:
keylen = mac.digest_size
def _pseudorandom(x, mac=mac):
h = mac.copy()
h.update(x)
return bytearray(h.digest())
buf = bytearray()
for block in range_type(1, -(-keylen // mac.digest_size) + 1):
rv = u = _pseudorandom(salt + _pack_int(block))
for i in range_type(iterations - 1):
u = _pseudorandom(bytes(u))
rv = bytearray(starmap(xor, izip(rv, u)))
buf.extend(rv)
return bytes(buf[:keylen])
def safe_str_cmp(a, b):
"""This function compares strings in somewhat constant time. This
requires that the length of at least one string is known in advance.
Returns `True` if the two strings are equal or `False` if they are not.
.. versionadded:: 0.7
"""
if _builtin_safe_str_cmp is not None:
return _builtin_safe_str_cmp(a, b)
if len(a) != len(b):
return False
rv = 0
if isinstance(a, bytes) and isinstance(b, bytes) and not PY2:
for x, y in izip(a, b):
rv |= x ^ y
else:
for x, y in izip(a, b):
rv |= ord(x) ^ ord(y)
return rv == 0
def gen_salt(length):
"""Generate a random string of SALT_CHARS with specified ``length``."""
if length <= 0:
raise ValueError('requested salt of length <= 0')
return ''.join(_sys_rng.choice(SALT_CHARS) for _ in range_type(length))
def _hash_internal(method, salt, password):
"""Internal password hash helper. Supports plaintext without salt,
unsalted and salted passwords. In case salted passwords are used
hmac is used.
"""
if method == 'plain':
return password, method
if isinstance(password, text_type):
password = password.encode('utf-8')
if method.startswith('pbkdf2:'):
args = method[7:].split(':')
if len(args) not in (1, 2):
raise ValueError('Invalid number of arguments for PBKDF2')
method = args.pop(0)
iterations = args and int(args[0] or 0) or DEFAULT_PBKDF2_ITERATIONS
is_pbkdf2 = True
actual_method = 'pbkdf2:%s:%d' % (method, iterations)
else:
is_pbkdf2 = False
actual_method = method
hash_func = _hash_funcs.get(method)
if hash_func is None:
raise TypeError('invalid method %r' % method)
if is_pbkdf2:
if not salt:
raise ValueError('Salt is required for PBKDF2')
rv = pbkdf2_hex(password, salt, iterations,
hashfunc=hash_func)
elif salt:
if isinstance(salt, text_type):
salt = salt.encode('utf-8')
rv = hmac.HMAC(salt, password, hash_func).hexdigest()
else:
h = hash_func()
h.update(password)
rv = h.hexdigest()
return rv, actual_method
def generate_password_hash(password, method='pbkdf2:sha1', salt_length=8):
"""Hash a password with the given method and salt with with a string of
the given length. The format of the string returned includes the method
that was used so that :func:`check_password_hash` can check the hash.
The format for the hashed string looks like this::
method$salt$hash
This method can **not** generate unsalted passwords but it is possible
to set the method to plain to enforce plaintext passwords. If a salt
is used, hmac is used internally to salt the password.
If PBKDF2 is wanted it can be enabled by setting the method to
``pbkdf2:method:iterations`` where iterations is optional::
pbkdf2:sha1:2000$salt$hash
pbkdf2:sha1$salt$hash
:param password: the password to hash
:param method: the hash method to use (one that hashlib supports), can
optionally be in the format ``pbpdf2:<method>[:iterations]``
to enable PBKDF2.
:param salt_length: the length of the salt in letters
"""
salt = method != 'plain' and gen_salt(salt_length) or ''
h, actual_method = _hash_internal(method, salt, password)
return '%s$%s$%s' % (actual_method, salt, h)
def check_password_hash(pwhash, password):
"""check a password against a given salted and hashed password value.
In order to support unsalted legacy passwords this method supports
plain text passwords, md5 and sha1 hashes (both salted and unsalted).
Returns `True` if the password matched, `False` otherwise.
:param pwhash: a hashed string like returned by
:func:`generate_password_hash`
:param password: the plaintext password to compare against the hash
"""
if pwhash.count('$') < 2:
return False
method, salt, hashval = pwhash.split('$', 2)
return safe_str_cmp(_hash_internal(method, salt, password)[0], hashval)
def safe_join(directory, filename):
"""Safely join `directory` and `filename`. If this cannot be done,
this function returns ``None``.
:param directory: the base directory.
:param filename: the untrusted filename relative to that directory.
"""
filename = posixpath.normpath(filename)
for sep in _os_alt_seps:
if sep in filename:
return None
if os.path.isabs(filename) or filename.startswith('../'):
return None
return os.path.join(directory, filename)
| apache-2.0 |
thiagoramos-luizalabs/django-material | tests/visual/tests/test_booleaninput.py | 10 | 2602 | from selenium.webdriver.common.keys import Keys
from tests.integration.tests.test_booleaninput import Test as TestBooleanInput
from . import VisualTest
class Test(VisualTest):
urls = TestBooleanInput.urls
def test_test_default_usecase(self):
self.driver.get('%s%s' % (self.live_server_url, TestBooleanInput.test_default_usecase.url))
self.assertScreenshot('form', 'booleaninput_default_usecase', threshold=1)
def test_invalid_value(self):
self.driver.get('%s%s' % (self.live_server_url, TestBooleanInput.test_invalid_value.url))
self.driver.find_element_by_css_selector("button").send_keys(Keys.RETURN)
self.assertScreenshot('form', 'booleaninput_missing_value_error', threshold=1)
def test_part_group_class(self):
self.driver.get('%s%s' % (self.live_server_url, TestBooleanInput.test_part_group_class.url))
self.assertScreenshot('form', 'booleaninput_part_group_class', threshold=1)
def test_part_add_group_class(self):
self.driver.get('%s%s' % (self.live_server_url, TestBooleanInput.test_part_add_group_class.url))
self.assertScreenshot('form', 'booleaninput_part_add_group_class', threshold=1)
def test_part_prefix(self):
self.driver.get('%s%s' % (self.live_server_url, TestBooleanInput.test_part_prefix.url))
self.assertScreenshot('form', 'booleaninput_part_prefix', threshold=1)
def test_part_add_control_class(self):
self.driver.get('%s%s' % (self.live_server_url, TestBooleanInput.test_part_add_control_class.url))
self.driver.find_element_by_css_selector("#id_test_field_container label").click()
self.assertScreenshot('form', 'booleaninput_part_add_control_class', threshold=1)
def test_part_label(self):
self.driver.get('%s%s' % (self.live_server_url, TestBooleanInput.test_part_label.url))
self.assertScreenshot('form', 'booleaninput_part_label', threshold=1)
def test_part_add_label_class(self):
self.driver.get('%s%s' % (self.live_server_url, TestBooleanInput.test_part_add_label_class.url))
self.assertScreenshot('form', 'booleaninput_part_add_label_class', threshold=1)
def test_part_help_text(self):
self.driver.get('%s%s' % (self.live_server_url, TestBooleanInput.test_part_help_text.url))
self.assertScreenshot('form', 'booleaninput_part_help_text', threshold=1)
def test_part_errors(self):
self.driver.get('%s%s' % (self.live_server_url, TestBooleanInput.test_part_errors.url))
self.assertScreenshot('form', 'booleaninput_part_errors', threshold=1)
| bsd-3-clause |
TeamEOS/external_chromium_org | tools/json_schema_compiler/cpp_type_generator_test.py | 71 | 7614 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from cpp_type_generator import CppTypeGenerator
from json_schema import CachedLoad
import model
import unittest
from collections import defaultdict
class _FakeSchemaLoader(object):
def __init__(self, model):
self._model = model
def ResolveType(self, type_name, default):
parts = type_name.rsplit('.', 1)
if len(parts) == 1:
return default if type_name in default.types else None
return self._model.namespaces[parts[0]]
class CppTypeGeneratorTest(unittest.TestCase):
def setUp(self):
self.models = defaultdict(model.Model)
self.forbidden_json = CachedLoad('test/forbidden.json')
self.forbidden = self.models['forbidden'].AddNamespace(
self.forbidden_json[0], 'path/to/forbidden.json')
self.permissions_json = CachedLoad('test/permissions.json')
self.permissions = self.models['permissions'].AddNamespace(
self.permissions_json[0], 'path/to/permissions.json')
self.windows_json = CachedLoad('test/windows.json')
self.windows = self.models['windows'].AddNamespace(self.windows_json[0],
'path/to/window.json')
self.tabs_json = CachedLoad('test/tabs.json')
self.tabs = self.models['tabs'].AddNamespace(self.tabs_json[0],
'path/to/tabs.json')
self.browser_action_json = CachedLoad('test/browser_action.json')
self.browser_action = self.models['browser_action'].AddNamespace(
self.browser_action_json[0], 'path/to/browser_action.json')
self.font_settings_json = CachedLoad('test/font_settings.json')
self.font_settings = self.models['font_settings'].AddNamespace(
self.font_settings_json[0], 'path/to/font_settings.json')
self.dependency_tester_json = CachedLoad('test/dependency_tester.json')
self.dependency_tester = self.models['dependency_tester'].AddNamespace(
self.dependency_tester_json[0], 'path/to/dependency_tester.json')
self.content_settings_json = CachedLoad('test/content_settings.json')
self.content_settings = self.models['content_settings'].AddNamespace(
self.content_settings_json[0], 'path/to/content_settings.json')
def testGenerateIncludesAndForwardDeclarations(self):
m = model.Model()
m.AddNamespace(self.windows_json[0], 'path/to/windows.json')
m.AddNamespace(self.tabs_json[0], 'path/to/tabs.json')
manager = CppTypeGenerator(m, _FakeSchemaLoader(m))
self.assertEquals('', manager.GenerateIncludes().Render())
self.assertEquals('#include "path/to/tabs.h"',
manager.GenerateIncludes(include_soft=True).Render())
self.assertEquals('namespace tabs {\n'
'struct Tab;\n'
'}',
manager.GenerateForwardDeclarations().Render())
manager = CppTypeGenerator(self.models.get('permissions'),
_FakeSchemaLoader(m))
self.assertEquals('', manager.GenerateIncludes().Render())
self.assertEquals('', manager.GenerateIncludes().Render())
self.assertEquals('', manager.GenerateForwardDeclarations().Render())
manager = CppTypeGenerator(self.models.get('content_settings'),
_FakeSchemaLoader(m))
self.assertEquals('', manager.GenerateIncludes().Render())
def testGenerateIncludesAndForwardDeclarationsDependencies(self):
m = model.Model()
# Insert 'font_settings' before 'browser_action' in order to test that
# CppTypeGenerator sorts them properly.
m.AddNamespace(self.font_settings_json[0], 'path/to/font_settings.json')
m.AddNamespace(self.browser_action_json[0], 'path/to/browser_action.json')
dependency_tester = m.AddNamespace(self.dependency_tester_json[0],
'path/to/dependency_tester.json')
manager = CppTypeGenerator(m,
_FakeSchemaLoader(m),
default_namespace=dependency_tester)
self.assertEquals('#include "path/to/browser_action.h"\n'
'#include "path/to/font_settings.h"',
manager.GenerateIncludes().Render())
self.assertEquals('namespace browser_action {\n'
'}\n'
'namespace font_settings {\n'
'}',
manager.GenerateForwardDeclarations().Render())
def testGetCppTypeSimple(self):
manager = CppTypeGenerator(self.models.get('tabs'), _FakeSchemaLoader(None))
self.assertEquals(
'int',
manager.GetCppType(self.tabs.types['Tab'].properties['id'].type_))
self.assertEquals(
'std::string',
manager.GetCppType(self.tabs.types['Tab'].properties['status'].type_))
self.assertEquals(
'bool',
manager.GetCppType(self.tabs.types['Tab'].properties['selected'].type_))
def testStringAsType(self):
manager = CppTypeGenerator(self.models.get('font_settings'),
_FakeSchemaLoader(None))
self.assertEquals(
'std::string',
manager.GetCppType(self.font_settings.types['FakeStringType']))
def testArrayAsType(self):
manager = CppTypeGenerator(self.models.get('browser_action'),
_FakeSchemaLoader(None))
self.assertEquals(
'std::vector<int>',
manager.GetCppType(self.browser_action.types['ColorArray']))
def testGetCppTypeArray(self):
manager = CppTypeGenerator(self.models.get('windows'),
_FakeSchemaLoader(None))
self.assertEquals(
'std::vector<linked_ptr<Window> >',
manager.GetCppType(
self.windows.functions['getAll'].callback.params[0].type_))
manager = CppTypeGenerator(self.models.get('permissions'),
_FakeSchemaLoader(None))
self.assertEquals(
'std::vector<std::string>',
manager.GetCppType(
self.permissions.types['Permissions'].properties['origins'].type_))
def testGetCppTypeLocalRef(self):
manager = CppTypeGenerator(self.models.get('tabs'), _FakeSchemaLoader(None))
self.assertEquals(
'Tab',
manager.GetCppType(self.tabs.functions['get'].callback.params[0].type_))
def testGetCppTypeIncludedRef(self):
m = model.Model()
m.AddNamespace(self.windows_json[0], 'path/to/windows.json')
m.AddNamespace(self.tabs_json[0], 'path/to/tabs.json')
manager = CppTypeGenerator(m, _FakeSchemaLoader(m))
self.assertEquals(
'std::vector<linked_ptr<tabs::Tab> >',
manager.GetCppType(
self.windows.types['Window'].properties['tabs'].type_))
def testGetCppTypeWithPadForGeneric(self):
manager = CppTypeGenerator(self.models.get('permissions'),
_FakeSchemaLoader(None))
self.assertEquals('std::vector<std::string>',
manager.GetCppType(
self.permissions.types['Permissions'].properties['origins'].type_,
is_in_container=False))
self.assertEquals('linked_ptr<std::vector<std::string> >',
manager.GetCppType(
self.permissions.types['Permissions'].properties['origins'].type_,
is_in_container=True))
self.assertEquals('bool',
manager.GetCppType(
self.permissions.functions['contains'].callback.params[0].type_,
is_in_container=True))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
patmcb/odoo | addons/l10n_be_invoice_bba/invoice.py | 141 | 12671 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re, time, random
from openerp import api
from openerp.osv import fields, osv
from openerp.tools.translate import _
import logging
_logger = logging.getLogger(__name__)
"""
account.invoice object:
- Add support for Belgian structured communication
- Rename 'reference' field labels to 'Communication'
"""
class account_invoice(osv.osv):
_inherit = 'account.invoice'
@api.cr_uid_context
def _get_reference_type(self, cursor, user, context=None):
"""Add BBA Structured Communication Type and change labels from 'reference' into 'communication' """
res = super(account_invoice, self)._get_reference_type(cursor, user,
context=context)
res[[i for i,x in enumerate(res) if x[0] == 'none'][0]] = ('none', 'Free Communication')
res.append(('bba', 'BBA Structured Communication'))
#l_logger.warning('reference_type = %s' %res )
return res
def check_bbacomm(self, val):
supported_chars = '0-9+*/ '
pattern = re.compile('[^' + supported_chars + ']')
if pattern.findall(val or ''):
return False
bbacomm = re.sub('\D', '', val or '')
if len(bbacomm) == 12:
base = int(bbacomm[:10])
mod = base % 97 or 97
if mod == int(bbacomm[-2:]):
return True
return False
def _check_communication(self, cr, uid, ids):
for inv in self.browse(cr, uid, ids):
if inv.reference_type == 'bba':
return self.check_bbacomm(inv.reference)
return True
def onchange_partner_id(self, cr, uid, ids, type, partner_id,
date_invoice=False, payment_term=False,
partner_bank_id=False, company_id=False,
context=None):
result = super(account_invoice, self).onchange_partner_id(cr, uid, ids, type, partner_id,
date_invoice, payment_term, partner_bank_id, company_id, context)
# reference_type = self.default_get(cr, uid, ['reference_type'])['reference_type']
# _logger.warning('partner_id %s' % partner_id)
reference = False
reference_type = 'none'
if partner_id:
if (type == 'out_invoice'):
reference_type = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context).out_inv_comm_type
if reference_type:
reference = self.generate_bbacomm(cr, uid, ids, type, reference_type, partner_id, '', context=context)['value']['reference']
res_update = {
'reference_type': reference_type or 'none',
'reference': reference,
}
result['value'].update(res_update)
return result
def generate_bbacomm(self, cr, uid, ids, type, reference_type, partner_id, reference, context=None):
partner_obj = self.pool.get('res.partner')
reference = reference or ''
algorithm = False
if partner_id:
algorithm = partner_obj.browse(cr, uid, partner_id, context=context).out_inv_comm_algorithm
algorithm = algorithm or 'random'
if (type == 'out_invoice'):
if reference_type == 'bba':
if algorithm == 'date':
if not self.check_bbacomm(reference):
doy = time.strftime('%j')
year = time.strftime('%Y')
seq = '001'
seq_ids = self.search(cr, uid,
[('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'),
('reference', 'like', '+++%s/%s/%%' % (doy, year))], order='reference')
if seq_ids:
prev_seq = int(self.browse(cr, uid, seq_ids[-1]).reference[12:15])
if prev_seq < 999:
seq = '%03d' % (prev_seq + 1)
else:
raise osv.except_osv(_('Warning!'),
_('The daily maximum of outgoing invoices with an automatically generated BBA Structured Communications has been exceeded!' \
'\nPlease create manually a unique BBA Structured Communication.'))
bbacomm = doy + year + seq
base = int(bbacomm)
mod = base % 97 or 97
reference = '+++%s/%s/%s%02d+++' % (doy, year, seq, mod)
elif algorithm == 'partner_ref':
if not self.check_bbacomm(reference):
partner_ref = self.pool.get('res.partner').browse(cr, uid, partner_id).ref
partner_ref_nr = re.sub('\D', '', partner_ref or '')
if (len(partner_ref_nr) < 3) or (len(partner_ref_nr) > 7):
raise osv.except_osv(_('Warning!'),
_('The Partner should have a 3-7 digit Reference Number for the generation of BBA Structured Communications!' \
'\nPlease correct the Partner record.'))
else:
partner_ref_nr = partner_ref_nr.ljust(7, '0')
seq = '001'
seq_ids = self.search(cr, uid,
[('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'),
('reference', 'like', '+++%s/%s/%%' % (partner_ref_nr[:3], partner_ref_nr[3:]))], order='reference')
if seq_ids:
prev_seq = int(self.browse(cr, uid, seq_ids[-1]).reference[12:15])
if prev_seq < 999:
seq = '%03d' % (prev_seq + 1)
else:
raise osv.except_osv(_('Warning!'),
_('The daily maximum of outgoing invoices with an automatically generated BBA Structured Communications has been exceeded!' \
'\nPlease create manually a unique BBA Structured Communication.'))
bbacomm = partner_ref_nr + seq
base = int(bbacomm)
mod = base % 97 or 97
reference = '+++%s/%s/%s%02d+++' % (partner_ref_nr[:3], partner_ref_nr[3:], seq, mod)
elif algorithm == 'random':
if not self.check_bbacomm(reference):
base = random.randint(1, 9999999999)
bbacomm = str(base).rjust(10, '0')
base = int(bbacomm)
mod = base % 97 or 97
mod = str(mod).rjust(2, '0')
reference = '+++%s/%s/%s%s+++' % (bbacomm[:3], bbacomm[3:7], bbacomm[7:], mod)
else:
raise osv.except_osv(_('Error!'),
_("Unsupported Structured Communication Type Algorithm '%s' !" \
"\nPlease contact your Odoo support channel.") % algorithm)
return {'value': {'reference': reference}}
def create(self, cr, uid, vals, context=None):
reference = vals.get('reference', False)
reference_type = vals.get('reference_type', False)
if vals.get('type') == 'out_invoice' and not reference_type:
# fallback on default communication type for partner
reference_type = self.pool.get('res.partner').browse(cr, uid, vals['partner_id']).out_inv_comm_type
if reference_type == 'bba':
reference = self.generate_bbacomm(cr, uid, [], vals['type'], reference_type, vals['partner_id'], '', context={})['value']['reference']
vals.update({
'reference_type': reference_type or 'none',
'reference': reference,
})
if reference_type == 'bba':
if not reference:
raise osv.except_osv(_('Warning!'),
_('Empty BBA Structured Communication!' \
'\nPlease fill in a unique BBA Structured Communication.'))
if self.check_bbacomm(reference):
reference = re.sub('\D', '', reference)
vals['reference'] = '+++' + reference[0:3] + '/' + reference[3:7] + '/' + reference[7:] + '+++'
same_ids = self.search(cr, uid,
[('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'),
('reference', '=', vals['reference'])])
if same_ids:
raise osv.except_osv(_('Warning!'),
_('The BBA Structured Communication has already been used!' \
'\nPlease create manually a unique BBA Structured Communication.'))
return super(account_invoice, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
for inv in self.browse(cr, uid, ids, context):
if vals.has_key('reference_type'):
reference_type = vals['reference_type']
else:
reference_type = inv.reference_type or ''
if reference_type == 'bba' and 'reference' in vals:
if self.check_bbacomm(vals['reference']):
reference = re.sub('\D', '', vals['reference'])
vals['reference'] = '+++' + reference[0:3] + '/' + reference[3:7] + '/' + reference[7:] + '+++'
same_ids = self.search(cr, uid,
[('id', '!=', inv.id), ('type', '=', 'out_invoice'),
('reference_type', '=', 'bba'), ('reference', '=', vals['reference'])])
if same_ids:
raise osv.except_osv(_('Warning!'),
_('The BBA Structured Communication has already been used!' \
'\nPlease create manually a unique BBA Structured Communication.'))
return super(account_invoice, self).write(cr, uid, ids, vals, context)
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
invoice = self.browse(cr, uid, id, context=context)
if invoice.type in ['out_invoice']:
reference_type = invoice.reference_type or 'none'
default['reference_type'] = reference_type
if reference_type == 'bba':
partner = invoice.partner_id
default['reference'] = self.generate_bbacomm(cr, uid, id,
invoice.type, reference_type,
partner.id, '', context=context)['value']['reference']
return super(account_invoice, self).copy(cr, uid, id, default, context=context)
_columns = {
'reference': fields.char('Communication', help="The partner reference of this invoice."),
'reference_type': fields.selection(_get_reference_type, 'Communication Type',
required=True, readonly=True),
}
_constraints = [
(_check_communication, 'Invalid BBA Structured Communication !', ['Communication']),
]
account_invoice()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
KevinOConnor/klipper | klippy/extras/tmc2130.py | 1 | 12763 | # TMC2130 configuration
#
# Copyright (C) 2018-2019 Kevin O'Connor <kevin@koconnor.net>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import math, logging
from . import bus, tmc
TMC_FREQUENCY=13200000.
Registers = {
"GCONF": 0x00, "GSTAT": 0x01, "IOIN": 0x04, "IHOLD_IRUN": 0x10,
"TPOWERDOWN": 0x11, "TSTEP": 0x12, "TPWMTHRS": 0x13, "TCOOLTHRS": 0x14,
"THIGH": 0x15, "XDIRECT": 0x2d, "MSLUT0": 0x60, "MSLUTSEL": 0x68,
"MSLUTSTART": 0x69, "MSCNT": 0x6a, "MSCURACT": 0x6b, "CHOPCONF": 0x6c,
"COOLCONF": 0x6d, "DCCTRL": 0x6e, "DRV_STATUS": 0x6f, "PWMCONF": 0x70,
"PWM_SCALE": 0x71, "ENCM_CTRL": 0x72, "LOST_STEPS": 0x73,
}
ReadRegisters = [
"GCONF", "GSTAT", "IOIN", "TSTEP", "XDIRECT", "MSCNT", "MSCURACT",
"CHOPCONF", "DRV_STATUS", "PWM_SCALE", "LOST_STEPS",
]
Fields = {}
Fields["GCONF"] = {
"I_scale_analog": 1<<0, "internal_Rsense": 1<<1, "en_pwm_mode": 1<<2,
"enc_commutation": 1<<3, "shaft": 1<<4, "diag0_error": 1<<5,
"diag0_otpw": 1<<6, "diag0_stall": 1<<7, "diag1_stall": 1<<8,
"diag1_index": 1<<9, "diag1_onstate": 1<<10, "diag1_steps_skipped": 1<<11,
"diag0_int_pushpull": 1<<12, "diag1_pushpull": 1<<13,
"small_hysteresis": 1<<14, "stop_enable": 1<<15, "direct_mode": 1<<16,
"test_mode": 1<<17
}
Fields["GSTAT"] = { "reset": 1<<0, "drv_err": 1<<1, "uv_cp": 1<<2 }
Fields["IOIN"] = {
"STEP": 1<<0, "DIR": 1<<1, "DCEN_CFG4": 1<<2, "DCIN_CFG5": 1<<3,
"DRV_ENN_CFG6": 1<<4, "DCO": 1<<5, "VERSION": 0xff << 24
}
Fields["IHOLD_IRUN"] = {
"IHOLD": 0x1f << 0, "IRUN": 0x1f << 8, "IHOLDDELAY": 0x0f << 16
}
Fields["TPOWERDOWN"] = { "TPOWERDOWN": 0xff }
Fields["TSTEP"] = { "TSTEP": 0xfffff }
Fields["TPWMTHRS"] = { "TPWMTHRS": 0xfffff }
Fields["TCOOLTHRS"] = { "TCOOLTHRS": 0xfffff }
Fields["THIGH"] = { "THIGH": 0xfffff }
Fields["MSCNT"] = { "MSCNT": 0x3ff }
Fields["MSCURACT"] = { "CUR_A": 0x1ff, "CUR_B": 0x1ff << 16 }
Fields["CHOPCONF"] = {
"toff": 0x0f, "hstrt": 0x07 << 4, "hend": 0x0f << 7, "fd3": 1<<11,
"disfdcc": 1<<12, "rndtf": 1<<13, "chm": 1<<14, "TBL": 0x03 << 15,
"vsense": 1<<17, "vhighfs": 1<<18, "vhighchm": 1<<19, "sync": 0x0f << 20,
"MRES": 0x0f << 24, "intpol": 1<<28, "dedge": 1<<29, "diss2g": 1<<30
}
Fields["COOLCONF"] = {
"semin": 0x0f, "seup": 0x03 << 5, "semax": 0x0f << 8, "sedn": 0x03 << 13,
"seimin": 1<<15, "sgt": 0x7f << 16, "sfilt": 1<<24
}
Fields["DRV_STATUS"] = {
"SG_RESULT": 0x3ff, "fsactive": 1<<15, "CS_ACTUAL": 0x1f << 16,
"stallGuard": 1<<24, "ot": 1<<25, "otpw": 1<<26, "s2ga": 1<<27,
"s2gb": 1<<28, "ola": 1<<29, "olb": 1<<30, "stst": 1<<31
}
Fields["PWMCONF"] = {
"PWM_AMPL": 0xff, "PWM_GRAD": 0xff << 8, "pwm_freq": 0x03 << 16,
"pwm_autoscale": 1<<18, "pwm_symmetric": 1<<19, "freewheel": 0x03 << 20
}
Fields["PWM_SCALE"] = { "PWM_SCALE": 0xff }
Fields["LOST_STEPS"] = { "LOST_STEPS": 0xfffff }
SignedFields = ["CUR_A", "CUR_B", "sgt"]
FieldFormatters = {
"I_scale_analog": (lambda v: "1(ExtVREF)" if v else ""),
"shaft": (lambda v: "1(Reverse)" if v else ""),
"reset": (lambda v: "1(Reset)" if v else ""),
"drv_err": (lambda v: "1(ErrorShutdown!)" if v else ""),
"uv_cp": (lambda v: "1(Undervoltage!)" if v else ""),
"VERSION": (lambda v: "%#x" % v),
"MRES": (lambda v: "%d(%dusteps)" % (v, 0x100 >> v)),
"otpw": (lambda v: "1(OvertempWarning!)" if v else ""),
"ot": (lambda v: "1(OvertempError!)" if v else ""),
"s2ga": (lambda v: "1(ShortToGND_A!)" if v else ""),
"s2gb": (lambda v: "1(ShortToGND_B!)" if v else ""),
"ola": (lambda v: "1(OpenLoad_A!)" if v else ""),
"olb": (lambda v: "1(OpenLoad_B!)" if v else ""),
"CS_ACTUAL": (lambda v: ("%d" % v) if v else "0(Reset?)"),
}
######################################################################
# TMC stepper current config helper
######################################################################
MAX_CURRENT = 2.000
class TMCCurrentHelper:
def __init__(self, config, mcu_tmc):
self.printer = config.get_printer()
self.name = config.get_name().split()[-1]
self.mcu_tmc = mcu_tmc
self.fields = mcu_tmc.get_fields()
run_current = config.getfloat('run_current',
above=0., maxval=MAX_CURRENT)
hold_current = config.getfloat('hold_current', run_current,
above=0., maxval=MAX_CURRENT)
self.sense_resistor = config.getfloat('sense_resistor', 0.110, above=0.)
vsense, irun, ihold = self._calc_current(run_current, hold_current)
self.fields.set_field("vsense", vsense)
self.fields.set_field("IHOLD", ihold)
self.fields.set_field("IRUN", irun)
def _calc_current_bits(self, current, vsense):
sense_resistor = self.sense_resistor + 0.020
vref = 0.32
if vsense:
vref = 0.18
cs = int(32. * current * sense_resistor * math.sqrt(2.) / vref
- 1. + .5)
return max(0, min(31, cs))
def _calc_current(self, run_current, hold_current):
vsense = False
irun = self._calc_current_bits(run_current, vsense)
ihold = self._calc_current_bits(min(hold_current, run_current),
vsense)
if irun < 16 and ihold < 16:
vsense = True
irun = self._calc_current_bits(run_current, vsense)
ihold = self._calc_current_bits(min(hold_current, run_current),
vsense)
return vsense, irun, ihold
def _calc_current_from_field(self, field_name):
bits = self.fields.get_field(field_name)
sense_resistor = self.sense_resistor + 0.020
vref = 0.32
if self.fields.get_field("vsense"):
vref = 0.18
return (bits + 1) * vref / (32 * sense_resistor * math.sqrt(2.))
def get_current(self):
run_current = self._calc_current_from_field("IRUN")
hold_current = self._calc_current_from_field("IHOLD")
return run_current, hold_current, MAX_CURRENT
def set_current(self, run_current, hold_current, print_time):
vsense, irun, ihold = self._calc_current(run_current, hold_current)
if vsense != self.fields.get_field("vsense"):
val = self.fields.set_field("vsense", vsense)
self.mcu_tmc.set_register("CHOPCONF", val, print_time)
self.fields.set_field("IHOLD", ihold)
val = self.fields.set_field("IRUN", irun)
self.mcu_tmc.set_register("IHOLD_IRUN", val, print_time)
######################################################################
# TMC2130 SPI
######################################################################
class MCU_TMC_SPI_chain:
def __init__(self, config, chain_len=1):
self.printer = config.get_printer()
self.chain_len = chain_len
self.mutex = self.printer.get_reactor().mutex()
share = None
if chain_len > 1:
share = "tmc_spi_cs"
self.spi = bus.MCU_SPI_from_config(config, 3, default_speed=4000000,
share_type=share)
self.taken_chain_positions = []
def _build_cmd(self, data, chain_pos):
return ([0x00] * ((self.chain_len - chain_pos) * 5) +
data + [0x00] * ((chain_pos - 1) * 5))
def reg_read(self, reg, chain_pos):
cmd = self._build_cmd([reg, 0x00, 0x00, 0x00, 0x00], chain_pos)
self.spi.spi_send(cmd)
if self.printer.get_start_args().get('debugoutput') is not None:
return 0
params = self.spi.spi_transfer(cmd)
pr = bytearray(params['response'])
pr = pr[(self.chain_len - chain_pos) * 5 :
(self.chain_len - chain_pos + 1) * 5]
return (pr[1] << 24) | (pr[2] << 16) | (pr[3] << 8) | pr[4]
def reg_write(self, reg, val, chain_pos, print_time=None):
minclock = 0
if print_time is not None:
minclock = self.spi.get_mcu().print_time_to_clock(print_time)
data = [(reg | 0x80) & 0xff, (val >> 24) & 0xff, (val >> 16) & 0xff,
(val >> 8) & 0xff, val & 0xff]
if self.printer.get_start_args().get('debugoutput') is not None:
self.spi.spi_send(self._build_cmd(data, chain_pos), minclock)
return val
write_cmd = self._build_cmd(data, chain_pos)
dummy_read = self._build_cmd([0x00, 0x00, 0x00, 0x00, 0x00], chain_pos)
params = self.spi.spi_transfer_with_preface(write_cmd, dummy_read,
minclock=minclock)
pr = bytearray(params['response'])
pr = pr[(self.chain_len - chain_pos) * 5 :
(self.chain_len - chain_pos + 1) * 5]
return (pr[1] << 24) | (pr[2] << 16) | (pr[3] << 8) | pr[4]
# Helper to setup an spi daisy chain bus from settings in a config section
def lookup_tmc_spi_chain(config):
chain_len = config.getint('chain_length', None, minval=2)
if chain_len is None:
# Simple, non daisy chained SPI connection
return MCU_TMC_SPI_chain(config, 1), 1
# Shared SPI bus - lookup existing MCU_TMC_SPI_chain
ppins = config.get_printer().lookup_object("pins")
cs_pin_params = ppins.lookup_pin(config.get('cs_pin'),
share_type="tmc_spi_cs")
tmc_spi = cs_pin_params.get('class')
if tmc_spi is None:
tmc_spi = cs_pin_params['class'] = MCU_TMC_SPI_chain(config, chain_len)
if chain_len != tmc_spi.chain_len:
raise config.error("TMC SPI chain must have same length")
chain_pos = config.getint('chain_position', minval=1, maxval=chain_len)
if chain_pos in tmc_spi.taken_chain_positions:
raise config.error("TMC SPI chain can not have duplicate position")
tmc_spi.taken_chain_positions.append(chain_pos)
return tmc_spi, chain_pos
# Helper code for working with TMC devices via SPI
class MCU_TMC_SPI:
def __init__(self, config, name_to_reg, fields):
self.printer = config.get_printer()
self.name = config.get_name().split()[-1]
self.tmc_spi, self.chain_pos = lookup_tmc_spi_chain(config)
self.mutex = self.tmc_spi.mutex
self.name_to_reg = name_to_reg
self.fields = fields
def get_fields(self):
return self.fields
def get_register(self, reg_name):
reg = self.name_to_reg[reg_name]
with self.mutex:
read = self.tmc_spi.reg_read(reg, self.chain_pos)
return read
def set_register(self, reg_name, val, print_time=None):
reg = self.name_to_reg[reg_name]
with self.mutex:
for retry in range(5):
v = self.tmc_spi.reg_write(reg, val, self.chain_pos, print_time)
if v == val:
return
raise self.printer.command_error(
"Unable to write tmc spi '%s' register %s" % (self.name, reg_name))
######################################################################
# TMC2130 printer object
######################################################################
class TMC2130:
def __init__(self, config):
# Setup mcu communication
self.fields = tmc.FieldHelper(Fields, SignedFields, FieldFormatters)
self.mcu_tmc = MCU_TMC_SPI(config, Registers, self.fields)
# Allow virtual pins to be created
tmc.TMCVirtualPinHelper(config, self.mcu_tmc)
# Register commands
current_helper = TMCCurrentHelper(config, self.mcu_tmc)
cmdhelper = tmc.TMCCommandHelper(config, self.mcu_tmc, current_helper)
cmdhelper.setup_register_dump(ReadRegisters)
# Setup basic register values
mh = tmc.TMCMicrostepHelper(config, self.mcu_tmc)
self.get_microsteps = mh.get_microsteps
self.get_phase = mh.get_phase
tmc.TMCStealthchopHelper(config, self.mcu_tmc, TMC_FREQUENCY)
# Allow other registers to be set from the config
set_config_field = self.fields.set_config_field
set_config_field(config, "toff", 4)
set_config_field(config, "hstrt", 0)
set_config_field(config, "hend", 7)
set_config_field(config, "TBL", 1)
set_config_field(config, "IHOLDDELAY", 8)
set_config_field(config, "TPOWERDOWN", 0)
set_config_field(config, "PWM_AMPL", 128)
set_config_field(config, "PWM_GRAD", 4)
set_config_field(config, "pwm_freq", 1)
set_config_field(config, "pwm_autoscale", True)
set_config_field(config, "sgt", 0)
def load_config_prefix(config):
return TMC2130(config)
| gpl-3.0 |
mnicholl/MOSFiT | mosfit/modules/energetics/bns_ejecta_generative.py | 2 | 7578 | """Definitions for the `BNSEjecta` class."""
# import astropy.constants as c
import numpy as np
from astrocats.catalog.source import SOURCE
from mosfit.constants import FOE, KM_CGS, M_SUN_CGS, C_CGS, G_CGS
from mosfit.modules.energetics.energetic import Energetic
# G_CGS = c.G.cgs.value
class BNSEjecta(Energetic):
"""
Generate `mejecta`, `vejecta` and `kappa` from neutron star binary
parameters.
Includes tidal and shocked dynamical and disk wind ejecta following
Dietrich+ 2017 and Coughlin+ 2019, with opacities from Sekiguchi+ 2016,
Tanaka+ 2019, Metzger and Fernandez 2014, Lippuner+ 2017
Also includes an ignorance parameter `alpha` for NS-driven winds to
increase the fraction of blue ejecta: Mdyn_blue /= alpha
- therefore NS surface winds turned off by setting alpha = 1
"""
_REFERENCES = [
{SOURCE.BIBCODE: '2017CQGra..34j5014D'},
{SOURCE.BIBCODE: '2019MNRAS.489L..91C'},
{SOURCE.BIBCODE: '2013PhRvD..88b3007M'},
{SOURCE.BIBCODE: '2016PhRvD..93l4046S'},
{SOURCE.BIBCODE: '2014MNRAS.441.3444M'},
{SOURCE.BIBCODE: '2017MNRAS.472..904L'},
{SOURCE.BIBCODE: '2019LRR....23....1M'},
{SOURCE.BIBCODE: '2020MNRAS.496.1369T'},
{SOURCE.BIBCODE: '2018PhRvL.121i1102D'}
]
def process(self, **kwargs):
"""Process module."""
ckm = C_CGS / KM_CGS
self._mchirp = kwargs[self.key('Mchirp')]
self._q = kwargs[self.key('q')]
# Mass of heavier NS
self._m1 = self._mchirp * self._q**-0.6 * (self._q+1)**0.2
# Mass of lighter NS
self._m2 = self._m1*self._q
self._m_total = self._m1 + self._m2
# How much of disk is ejected
self._disk_frac = kwargs[self.key('disk_frac')]
# Max mass for non-rotating NS
self._m_tov = kwargs[self.key('Mtov')]
# NS radius
self._radius_ns = kwargs[self.key('radius_ns')]
# Fraction of blue ejecta from dynamical shocks (Coughlin+ 2019)
# Here we are assuming remainder is a NS wind
# So only applicable if merger product avoids prompt collapse
self._alpha = kwargs[self.key('alpha')]
# Opening angle
self._cos_theta_open = kwargs[self.key('cos_theta_open')]
theta_open = np.arccos(self._cos_theta_open)
# Additional systematic scatter (if desired)
self._errMdyn = kwargs[self.key('errMdyn')]
self._errMdisk = kwargs[self.key('errMdisk')]
C1 = G_CGS * self._m1 * M_SUN_CGS /(self._radius_ns*1e5 * C_CGS**2)
C2 = G_CGS * self._m2 * M_SUN_CGS /(self._radius_ns*1e5 * C_CGS**2)
# Baryonic masses, Gao 2019
Mb1 = self._m1 + 0.08*self._m1**2
Mb2 = self._m2 + 0.08*self._m2**2
# Dynamical ejecta:
# Fitting function from Dietrich and Ujevic 2017
a_1 = -1.35695
b_1 = 6.11252
c_1 = -49.43355
d_1 = 16.1144
n = -2.5484
Mejdyn = 1e-3* (a_1*((self._m2/self._m1)**(1/3)*(1-2*C1)/C1*Mb1 +
(self._m1/self._m2)**(1/3)*(1-2*C2)/C2*Mb2) +
b_1*((self._m2/self._m1)**n*Mb1 + (self._m1/self._m2)**n*Mb2) +
c_1*(Mb1-self._m1 + Mb2-self._m2) + d_1)
Mejdyn *= self._errMdyn
if Mejdyn < 0:
Mejdyn = 0
# Calculate fraction of ejecta with Ye<0.25 from fits to Sekiguchi 2016
# Also consistent with Dietrich: mostly blue at M1/M2=1, all red by M1/M2=1.2.
# And see Bauswein 2013, shocked (blue) component decreases with M1/M2
a_4 = 14.8609
b_4 = -28.6148
c_4 = 13.9597
f_red = min([a_4*(self._m1/self._m2)**2+b_4*(self._m1/self._m2)+c_4,1]) # fraction can't exceed 100%
# Velocity of dynamical ejecta
a_2 = -0.219479
b_2 = 0.444836
c_2 = -2.67385
vdynp = a_2*((self._m1/self._m2)*(1+c_2*C1) + (self._m2/self._m1)*(1+c_2*C2)) + b_2
a_3 = -0.315585
b_3 = 0.63808
c_3 = -1.00757
vdynz = a_3*((self._m1/self._m2)*(1+c_3*C1) + (self._m2/self._m1)*(1+c_3*C2)) + b_3
vdyn = np.sqrt(vdynp**2+vdynz**2)
# average velocity over angular ranges (< and > theta_open)
theta1 = np.arange(0,theta_open,0.01)
theta2 = np.arange(theta_open,np.pi/2,0.01)
vtheta1 = np.sqrt((vdynz*np.cos(theta1))**2+(vdynp*np.sin(theta1))**2)
vtheta2 = np.sqrt((vdynz*np.cos(theta2))**2+(vdynp*np.sin(theta2))**2)
atheta1 = 2*np.pi*np.sin(theta1)
atheta2 = 2*np.pi*np.sin(theta2)
vejecta_blue = np.trapz(vtheta1*atheta1,x=theta1)/np.trapz(atheta1,x=theta1)
vejecta_red = np.trapz(vtheta2*atheta2,x=theta2)/np.trapz(atheta2,x=theta2)
mejecta_red = Mejdyn * f_red
vejecta_red *= ckm
mejecta_blue = Mejdyn * (1-f_red)
vejecta_blue *= ckm
# Bauswein 2013, cut-off for prompt collapse to BH
Mthr = (2.38-3.606*self._m_tov/self._radius_ns)*self._m_tov
if self._m_total < Mthr:
mejecta_blue /= self._alpha
# Now compute disk ejecta following Coughlin+ 2019
a_5 = -31.335
b_5 = -0.9760
c_5 = 1.0474
d_5 = 0.05957
logMdisk = np.max([-3, a_5*(1+b_5*np.tanh((c_5-self._m_total/Mthr)/d_5))])
Mdisk = 10**logMdisk
Mdisk *= self._errMdisk
Mejdisk = Mdisk * self._disk_frac
mejecta_purple = Mejdisk
# Fit for disk velocity using Metzger and Fernandez
vdisk_max = 0.15
vdisk_min = 0.03
vfit = np.polyfit([self._m_tov,Mthr],[vdisk_max,vdisk_min],deg=1)
# Get average opacity of 'purple' (disk) component
# Mass-averaged Ye as a function of remnant lifetime from Lippuner 2017
# Lifetime related to Mtot using Metzger handbook table 3
if self._m_total < self._m_tov:
# stable NS
Ye = 0.38
vdisk = vdisk_max
elif self._m_total < 1.2*self._m_tov:
# long-lived (>>100 ms) NS remnant Ye = 0.34-0.38,
# smooth interpolation
Yfit = np.polyfit([self._m_tov,1.2*self._m_tov],[0.38,0.34],deg=1)
Ye = Yfit[0]*self._m_total + Yfit[1]
vdisk = vfit[0]*self._m_total + vfit[1]
elif self._m_total < Mthr:
# short-lived (hypermassive) NS, Ye = 0.25-0.34, smooth interpolation
Yfit = np.polyfit([1.2*self._m_tov,Mthr],[0.34,0.25],deg=1)
Ye = Yfit[0]*self._m_total + Yfit[1]
vdisk = vfit[0]*self._m_total + vfit[1]
else:
# prompt collapse to BH, disk is red
Ye = 0.25
vdisk = vdisk_min
# Convert Ye to opacity using Tanaka et al 2019 for Ye >= 0.25:
a_6 = 2112.0
b_6 = -2238.9
c_6 = 742.35
d_6 = -73.14
kappa_purple = a_6*Ye**3 + b_6*Ye**2 + c_6*Ye + d_6
vejecta_purple = vdisk * ckm
return {self.key('mejecta_blue'): mejecta_blue,
self.key('mejecta_red'): mejecta_red,
self.key('mejecta_purple'): mejecta_purple,
self.key('mejecta_dyn'): Mejdyn,
self.key('vejecta_blue'): vejecta_blue,
self.key('vejecta_red'): vejecta_red,
self.key('vejecta_purple'): vejecta_purple,
self.key('kappa_purple'): kappa_purple,
self.key('M1'): self._m1,
self.key('M2'): self._m2,
self.key('radius_ns'): self._radius_ns
}
| mit |
SujaySKumar/django | django/contrib/postgres/forms/array.py | 258 | 6743 | import copy
from django import forms
from django.contrib.postgres.validators import (
ArrayMaxLengthValidator, ArrayMinLengthValidator,
)
from django.core.exceptions import ValidationError
from django.utils import six
from django.utils.safestring import mark_safe
from django.utils.translation import string_concat, ugettext_lazy as _
class SimpleArrayField(forms.CharField):
default_error_messages = {
'item_invalid': _('Item %(nth)s in the array did not validate: '),
}
def __init__(self, base_field, delimiter=',', max_length=None, min_length=None, *args, **kwargs):
self.base_field = base_field
self.delimiter = delimiter
super(SimpleArrayField, self).__init__(*args, **kwargs)
if min_length is not None:
self.min_length = min_length
self.validators.append(ArrayMinLengthValidator(int(min_length)))
if max_length is not None:
self.max_length = max_length
self.validators.append(ArrayMaxLengthValidator(int(max_length)))
def prepare_value(self, value):
if isinstance(value, list):
return self.delimiter.join(six.text_type(self.base_field.prepare_value(v)) for v in value)
return value
def to_python(self, value):
if value:
items = value.split(self.delimiter)
else:
items = []
errors = []
values = []
for i, item in enumerate(items):
try:
values.append(self.base_field.to_python(item))
except ValidationError as e:
for error in e.error_list:
errors.append(ValidationError(
string_concat(self.error_messages['item_invalid'], error.message),
code='item_invalid',
params={'nth': i},
))
if errors:
raise ValidationError(errors)
return values
def validate(self, value):
super(SimpleArrayField, self).validate(value)
errors = []
for i, item in enumerate(value):
try:
self.base_field.validate(item)
except ValidationError as e:
for error in e.error_list:
errors.append(ValidationError(
string_concat(self.error_messages['item_invalid'], error.message),
code='item_invalid',
params={'nth': i},
))
if errors:
raise ValidationError(errors)
def run_validators(self, value):
super(SimpleArrayField, self).run_validators(value)
errors = []
for i, item in enumerate(value):
try:
self.base_field.run_validators(item)
except ValidationError as e:
for error in e.error_list:
errors.append(ValidationError(
string_concat(self.error_messages['item_invalid'], error.message),
code='item_invalid',
params={'nth': i},
))
if errors:
raise ValidationError(errors)
class SplitArrayWidget(forms.Widget):
def __init__(self, widget, size, **kwargs):
self.widget = widget() if isinstance(widget, type) else widget
self.size = size
super(SplitArrayWidget, self).__init__(**kwargs)
@property
def is_hidden(self):
return self.widget.is_hidden
def value_from_datadict(self, data, files, name):
return [self.widget.value_from_datadict(data, files, '%s_%s' % (name, index))
for index in range(self.size)]
def id_for_label(self, id_):
# See the comment for RadioSelect.id_for_label()
if id_:
id_ += '_0'
return id_
def render(self, name, value, attrs=None):
if self.is_localized:
self.widget.is_localized = self.is_localized
value = value or []
output = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id')
for i in range(max(len(value), self.size)):
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
final_attrs = dict(final_attrs, id='%s_%s' % (id_, i))
output.append(self.widget.render(name + '_%s' % i, widget_value, final_attrs))
return mark_safe(self.format_output(output))
def format_output(self, rendered_widgets):
return ''.join(rendered_widgets)
@property
def media(self):
return self.widget.media
def __deepcopy__(self, memo):
obj = super(SplitArrayWidget, self).__deepcopy__(memo)
obj.widget = copy.deepcopy(self.widget)
return obj
@property
def needs_multipart_form(self):
return self.widget.needs_multipart_form
class SplitArrayField(forms.Field):
default_error_messages = {
'item_invalid': _('Item %(nth)s in the array did not validate: '),
}
def __init__(self, base_field, size, remove_trailing_nulls=False, **kwargs):
self.base_field = base_field
self.size = size
self.remove_trailing_nulls = remove_trailing_nulls
widget = SplitArrayWidget(widget=base_field.widget, size=size)
kwargs.setdefault('widget', widget)
super(SplitArrayField, self).__init__(**kwargs)
def clean(self, value):
cleaned_data = []
errors = []
if not any(value) and self.required:
raise ValidationError(self.error_messages['required'])
max_size = max(self.size, len(value))
for i in range(max_size):
item = value[i]
try:
cleaned_data.append(self.base_field.clean(item))
errors.append(None)
except ValidationError as error:
errors.append(ValidationError(
string_concat(self.error_messages['item_invalid'], error.message),
code='item_invalid',
params={'nth': i},
))
cleaned_data.append(None)
if self.remove_trailing_nulls:
null_index = None
for i, value in reversed(list(enumerate(cleaned_data))):
if value in self.base_field.empty_values:
null_index = i
else:
break
if null_index:
cleaned_data = cleaned_data[:null_index]
errors = errors[:null_index]
errors = list(filter(None, errors))
if errors:
raise ValidationError(errors)
return cleaned_data
| bsd-3-clause |
guegue/forocacao | config/wsgi.py | 3 | 1621 | """
WSGI config for forocacao project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Use Whitenoise to serve static files
# See: https://whitenoise.readthedocs.org/
application = DjangoWhiteNoise(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| bsd-3-clause |
hellodata/hellodate | 2/site-packages/django/utils/translation/__init__.py | 49 | 6780 | """
Internationalization support.
"""
from __future__ import unicode_literals
import re
from django.utils.encoding import force_text
from django.utils.functional import lazy
from django.utils import six
__all__ = [
'activate', 'deactivate', 'override', 'deactivate_all',
'get_language', 'get_language_from_request',
'get_language_info', 'get_language_bidi',
'check_for_language', 'to_locale', 'templatize', 'string_concat',
'gettext', 'gettext_lazy', 'gettext_noop',
'ugettext', 'ugettext_lazy', 'ugettext_noop',
'ngettext', 'ngettext_lazy',
'ungettext', 'ungettext_lazy',
'pgettext', 'pgettext_lazy',
'npgettext', 'npgettext_lazy',
'LANGUAGE_SESSION_KEY',
]
LANGUAGE_SESSION_KEY = '_language'
class TranslatorCommentWarning(SyntaxWarning):
pass
# Here be dragons, so a short explanation of the logic won't hurt:
# We are trying to solve two problems: (1) access settings, in particular
# settings.USE_I18N, as late as possible, so that modules can be imported
# without having to first configure Django, and (2) if some other code creates
# a reference to one of these functions, don't break that reference when we
# replace the functions with their real counterparts (once we do access the
# settings).
class Trans(object):
"""
The purpose of this class is to store the actual translation function upon
receiving the first call to that function. After this is done, changes to
USE_I18N will have no effect to which function is served upon request. If
your tests rely on changing USE_I18N, you can delete all the functions
from _trans.__dict__.
Note that storing the function with setattr will have a noticeable
performance effect, as access to the function goes the normal path,
instead of using __getattr__.
"""
def __getattr__(self, real_name):
from django.conf import settings
if settings.USE_I18N:
from django.utils.translation import trans_real as trans
else:
from django.utils.translation import trans_null as trans
setattr(self, real_name, getattr(trans, real_name))
return getattr(trans, real_name)
_trans = Trans()
# The Trans class is no more needed, so remove it from the namespace.
del Trans
def gettext_noop(message):
return _trans.gettext_noop(message)
ugettext_noop = gettext_noop
def gettext(message):
return _trans.gettext(message)
def ngettext(singular, plural, number):
return _trans.ngettext(singular, plural, number)
def ugettext(message):
return _trans.ugettext(message)
def ungettext(singular, plural, number):
return _trans.ungettext(singular, plural, number)
def pgettext(context, message):
return _trans.pgettext(context, message)
def npgettext(context, singular, plural, number):
return _trans.npgettext(context, singular, plural, number)
gettext_lazy = lazy(gettext, str)
ugettext_lazy = lazy(ugettext, six.text_type)
pgettext_lazy = lazy(pgettext, six.text_type)
def lazy_number(func, resultclass, number=None, **kwargs):
if isinstance(number, six.integer_types):
kwargs['number'] = number
proxy = lazy(func, resultclass)(**kwargs)
else:
class NumberAwareString(resultclass):
def __mod__(self, rhs):
if isinstance(rhs, dict) and number:
try:
number_value = rhs[number]
except KeyError:
raise KeyError('Your dictionary lacks key \'%s\'. '
'Please provide it, because it is required to '
'determine whether string is singular or plural.'
% number)
else:
number_value = rhs
kwargs['number'] = number_value
translated = func(**kwargs)
try:
translated = translated % rhs
except TypeError:
# String doesn't contain a placeholder for the number
pass
return translated
proxy = lazy(lambda **kwargs: NumberAwareString(), NumberAwareString)(**kwargs)
return proxy
def ngettext_lazy(singular, plural, number=None):
return lazy_number(ngettext, str, singular=singular, plural=plural, number=number)
def ungettext_lazy(singular, plural, number=None):
return lazy_number(ungettext, six.text_type, singular=singular, plural=plural, number=number)
def npgettext_lazy(context, singular, plural, number=None):
return lazy_number(npgettext, six.text_type, context=context, singular=singular, plural=plural, number=number)
def activate(language):
return _trans.activate(language)
def deactivate():
return _trans.deactivate()
class override(object):
def __init__(self, language, deactivate=False):
self.language = language
self.deactivate = deactivate
self.old_language = get_language()
def __enter__(self):
if self.language is not None:
activate(self.language)
else:
deactivate_all()
def __exit__(self, exc_type, exc_value, traceback):
if self.deactivate:
deactivate()
else:
activate(self.old_language)
def get_language():
return _trans.get_language()
def get_language_bidi():
return _trans.get_language_bidi()
def check_for_language(lang_code):
return _trans.check_for_language(lang_code)
def to_locale(language):
return _trans.to_locale(language)
def get_language_from_request(request, check_path=False):
return _trans.get_language_from_request(request, check_path)
def get_language_from_path(path):
return _trans.get_language_from_path(path)
def templatize(src, origin=None):
return _trans.templatize(src, origin)
def deactivate_all():
return _trans.deactivate_all()
def _string_concat(*strings):
"""
Lazy variant of string concatenation, needed for translations that are
constructed from multiple parts.
"""
return ''.join(force_text(s) for s in strings)
string_concat = lazy(_string_concat, six.text_type)
def get_language_info(lang_code):
from django.conf.locale import LANG_INFO
try:
return LANG_INFO[lang_code]
except KeyError:
if '-' not in lang_code:
raise KeyError("Unknown language code %s." % lang_code)
generic_lang_code = lang_code.split('-')[0]
try:
return LANG_INFO[generic_lang_code]
except KeyError:
raise KeyError("Unknown language code %s and %s." % (lang_code, generic_lang_code))
trim_whitespace_re = re.compile('\s*\n\s*')
def trim_whitespace(s):
return trim_whitespace_re.sub(' ', s.strip())
| lgpl-3.0 |
pannarale/pycbc | pycbc/results/dq.py | 12 | 2187 | '''This module contains utilities for following up search triggers'''
# JavaScript for searching the aLOG
redirect_javascript = """<script type="text/javascript">
function redirect(form,way)
{
// Set location to form and submit.
if(form != '')
{
document.forms[form].action=way;
document.forms[form].submit();
}
else
{
window.top.location = way;
}
}
</script>"""
search_form_string="""<form name="%s_alog_search" id="%s_alog_search" method="post">
<input type="hidden" name="srcDateFrom" id="srcDateFrom" value="%s" size="20"/>
<input type="hidden" name="srcDateTo" id="srcDateTo" value="%s" size="20"/>
</form>"""
data_h1_string = """H1
<a href=https://ldas-jobs.ligo-wa.caltech.edu/~detchar/summary/day/%s>
Summary</a>
<a onclick="redirect('h1_alog_search',
'https://alog.ligo-wa.caltech.edu/aLOG/includes/search.php?adminType=search');
return true;">aLOG</a>"""
data_l1_string="""L1
<a href=https://ldas-jobs.ligo-la.caltech.edu/~detchar/summary/day/%s>
Summary</a>
<a onclick="redirect('l1_alog_search',
'https://alog.ligo-la.caltech.edu/aLOG/includes/search.php?adminType=search');
return true;">aLOG</a>"""
def get_summary_page_link(ifo, utc_time):
"""Return a string that links to the summary page and aLOG for this ifo
Parameters
----------
ifo : string
The detector name
utc_time : sequence
First three elements must be strings giving year, month, day resp.
Returns
-------
return_string : string
String containing HTML for links to summary page and aLOG search
"""
search_form = search_form_string
data = {'H1': data_h1_string, 'L1': data_l1_string}
if ifo not in data:
return ifo
else:
# alog format is day-month-year
alog_utc = '%02d-%02d-%4d' % (utc_time[2], utc_time[1], utc_time[0])
# summary page is exactly the reverse
ext = '%4d%02d%02d' % (utc_time[0], utc_time[1], utc_time[2])
return_string = search_form % (ifo.lower(), ifo.lower(), alog_utc, alog_utc)
return return_string + data[ifo] % ext
| gpl-3.0 |
josenavas/qiime | scripts/make_per_library_sff.py | 15 | 2642 | #!/usr/bin/env python
# File created on 09 Feb 2010
from __future__ import division
__author__ = "Rob Knight"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Rob Knight", "Daniel McDonald", "Kyle Bittinger"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Kyle Bittinger"
__email__ = "kylebittinger@gmail.com"
from qiime.util import make_option
from qiime.make_per_library_sff import make_per_library_sffs
from qiime.util import parse_command_line_parameters
# make_per_library_sff.py
script_info = {}
script_info[
'brief_description'] = """Make per-library sff files from ID lists"""
script_info['script_description'] = """This script generates per-library sff files using a directory of text files, one per library, which list read ID's to be included.
The ID list files should contain one read ID per line. If a line contains multiple words (separated by whitespace), then only the first word is used. A '>' character is stripped from the beginning of the line, if present. Blank lines in the file are skipped.
"""
script_info['script_usage'] = []
script_info['script_usage'].append(
("""Example:""",
"""Make per-library sff files using input.sff and a directory of libs where each file in the directory contains the id lists for each library:""",
"""make_per_library_sff.py -i input.sff -l libs"""))
script_info[
'output_description'] = """The result of this script generates sff files for each library."""
script_info['required_options'] = [
make_option("-i", "--input_sff", type='existing_filepaths',
help="Input sff file (separate multiple files w/ comma)"),
make_option("-l", "--libdir", type='existing_dirpath',
help="Directory containing ID list text files, one per library"),
]
script_info['optional_options'] = [
make_option("-p", "--sfffile_path", type='string',
help="Path to sfffile binary [default: use sfffile in $PATH]"),
make_option('--use_sfftools', action='store_true', default=False,
help=('Use external sfffile program instead of equivalent Python '
'routines.')),
make_option('--debug', action='store_true', default=False,
help="Print debugging output to stdout [default: %default]"),
]
script_info['version'] = __version__
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
sff_fps = opts.input_sff
make_per_library_sffs(
sff_fps,
opts.libdir,
opts.use_sfftools,
opts.sfffile_path,
opts.debug,
)
if __name__ == "__main__":
main()
| gpl-2.0 |
40223229/2015cdb_g9 | static/Brython3.1.1-20150328-091302/Lib/types.py | 756 | 3167 | """
Define names for built-in types that aren't directly accessible as a builtin.
"""
import sys
# Iterators in Python aren't a matter of type but of protocol. A large
# and changing number of builtin types implement *some* flavor of
# iterator. Don't check the type! Use hasattr to check for both
# "__iter__" and "__next__" attributes instead.
def _f(): pass
FunctionType = type(_f)
LambdaType = type(lambda: None) # Same as FunctionType
CodeType = type(_f.__code__)
MappingProxyType = type(type.__dict__)
SimpleNamespace = type(sys.implementation)
def _g():
yield 1
GeneratorType = type(_g())
class _C:
def _m(self): pass
MethodType = type(_C()._m)
BuiltinFunctionType = type(len)
BuiltinMethodType = type([].append) # Same as BuiltinFunctionType
ModuleType = type(sys)
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
TracebackType = type(tb)
FrameType = type(tb.tb_frame)
tb = None; del tb
# For Jython, the following two types are identical
GetSetDescriptorType = type(FunctionType.__code__)
MemberDescriptorType = type(FunctionType.__globals__)
del sys, _f, _g, _C, # Not for export
# Provide a PEP 3115 compliant mechanism for class creation
def new_class(name, bases=(), kwds=None, exec_body=None):
"""Create a class object dynamically using the appropriate metaclass."""
meta, ns, kwds = prepare_class(name, bases, kwds)
if exec_body is not None:
exec_body(ns)
return meta(name, bases, ns, **kwds)
def prepare_class(name, bases=(), kwds=None):
"""Call the __prepare__ method of the appropriate metaclass.
Returns (metaclass, namespace, kwds) as a 3-tuple
*metaclass* is the appropriate metaclass
*namespace* is the prepared class namespace
*kwds* is an updated copy of the passed in kwds argument with any
'metaclass' entry removed. If no kwds argument is passed in, this will
be an empty dict.
"""
if kwds is None:
kwds = {}
else:
kwds = dict(kwds) # Don't alter the provided mapping
if 'metaclass' in kwds:
meta = kwds.pop('metaclass')
else:
if bases:
meta = type(bases[0])
else:
meta = type
if isinstance(meta, type):
# when meta is a type, we first determine the most-derived metaclass
# instead of invoking the initial candidate directly
meta = _calculate_meta(meta, bases)
if hasattr(meta, '__prepare__'):
ns = meta.__prepare__(name, bases, **kwds)
else:
ns = {}
return meta, ns, kwds
def _calculate_meta(meta, bases):
"""Calculate the most derived metaclass."""
winner = meta
for base in bases:
base_meta = type(base)
if issubclass(winner, base_meta):
continue
if issubclass(base_meta, winner):
winner = base_meta
continue
# else:
raise TypeError("metaclass conflict: "
"the metaclass of a derived class "
"must be a (non-strict) subclass "
"of the metaclasses of all its bases")
return winner
| gpl-2.0 |
zhenzhai/edx-platform | lms/djangoapps/django_comment_client/base/tests.py | 1 | 74458 | """Tests for django comment client views."""
from contextlib import contextmanager
import logging
import json
import ddt
from django.conf import settings
from django.core.cache import caches
from django.test.client import RequestFactory
from django.contrib.auth.models import User
from django.core.management import call_command
from django.core.urlresolvers import reverse
from mock import patch, ANY, Mock
from nose.tools import assert_true, assert_equal
from nose.plugins.attrib import attr
from opaque_keys.edx.keys import CourseKey
from lms.lib.comment_client import Thread
from common.test.utils import MockSignalHandlerMixin, disable_signal
from django_comment_client.base import views
from django_comment_client.tests.group_id import CohortedTopicGroupIdTestMixin, NonCohortedTopicGroupIdTestMixin, GroupIdAssertionMixin
from django_comment_client.tests.utils import CohortedTestCase
from django_comment_client.tests.unicode import UnicodeTestMixin
from django_comment_common.models import Role
from django_comment_common.utils import seed_permissions_roles, ThreadContext
from lms.djangoapps.teams.tests.factories import CourseTeamFactory, CourseTeamMembershipFactory
from student.tests.factories import CourseEnrollmentFactory, UserFactory, CourseAccessRoleFactory
from util.testing import UrlResetMixin
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import check_mongo_calls
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import ModuleStoreEnum
log = logging.getLogger(__name__)
CS_PREFIX = "http://localhost:4567/api/v1"
# pylint: disable=missing-docstring
class MockRequestSetupMixin(object):
def _create_response_mock(self, data):
return Mock(text=json.dumps(data), json=Mock(return_value=data))
def _set_mock_request_data(self, mock_request, data):
mock_request.return_value = self._create_response_mock(data)
@attr('shard_2')
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class CreateThreadGroupIdTestCase(
MockRequestSetupMixin,
CohortedTestCase,
CohortedTopicGroupIdTestMixin,
NonCohortedTopicGroupIdTestMixin
):
cs_endpoint = "/threads"
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True):
self._set_mock_request_data(mock_request, {})
mock_request.return_value.status_code = 200
request_data = {"body": "body", "title": "title", "thread_type": "discussion"}
if pass_group_id:
request_data["group_id"] = group_id
request = RequestFactory().post("dummy_url", request_data)
request.user = user
request.view_name = "create_thread"
return views.create_thread(
request,
course_id=unicode(self.course.id),
commentable_id=commentable_id
)
def test_group_info_in_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
None
)
self._assert_json_response_contains_group_info(response)
@attr('shard_2')
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
@disable_signal(views, 'thread_edited')
@disable_signal(views, 'thread_voted')
@disable_signal(views, 'thread_deleted')
class ThreadActionGroupIdTestCase(
MockRequestSetupMixin,
CohortedTestCase,
GroupIdAssertionMixin
):
def call_view(
self,
view_name,
mock_request,
user=None,
post_params=None,
view_args=None
):
self._set_mock_request_data(
mock_request,
{
"user_id": str(self.student.id),
"group_id": self.student_cohort.id,
"closed": False,
"type": "thread",
"commentable_id": "non_team_dummy_id"
}
)
mock_request.return_value.status_code = 200
request = RequestFactory().post("dummy_url", post_params or {})
request.user = user or self.student
request.view_name = view_name
return getattr(views, view_name)(
request,
course_id=unicode(self.course.id),
thread_id="dummy",
**(view_args or {})
)
def test_update(self, mock_request):
response = self.call_view(
"update_thread",
mock_request,
post_params={"body": "body", "title": "title"}
)
self._assert_json_response_contains_group_info(response)
def test_delete(self, mock_request):
response = self.call_view("delete_thread", mock_request)
self._assert_json_response_contains_group_info(response)
def test_vote(self, mock_request):
response = self.call_view(
"vote_for_thread",
mock_request,
view_args={"value": "up"}
)
self._assert_json_response_contains_group_info(response)
response = self.call_view("undo_vote_for_thread", mock_request)
self._assert_json_response_contains_group_info(response)
def test_flag(self, mock_request):
response = self.call_view("flag_abuse_for_thread", mock_request)
self._assert_json_response_contains_group_info(response)
response = self.call_view("un_flag_abuse_for_thread", mock_request)
self._assert_json_response_contains_group_info(response)
def test_pin(self, mock_request):
response = self.call_view(
"pin_thread",
mock_request,
user=self.moderator
)
self._assert_json_response_contains_group_info(response)
response = self.call_view(
"un_pin_thread",
mock_request,
user=self.moderator
)
self._assert_json_response_contains_group_info(response)
def test_openclose(self, mock_request):
response = self.call_view(
"openclose_thread",
mock_request,
user=self.moderator
)
self._assert_json_response_contains_group_info(
response,
lambda d: d['content']
)
class ViewsTestCaseMixin(object):
def set_up_course(self, module_count=0):
"""
Creates a course, optionally with module_count discussion modules, and
a user with appropriate permissions.
"""
# create a course
self.course = CourseFactory.create(
org='MITx', course='999',
discussion_topics={"Some Topic": {"id": "some_topic"}},
display_name='Robot Super Course',
)
self.course_id = self.course.id
# add some discussion modules
for i in range(module_count):
ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id='id_module_{}'.format(i),
discussion_category='Category {}'.format(i),
discussion_target='Discussion {}'.format(i)
)
# seed the forums permissions and roles
call_command('seed_permissions_roles', unicode(self.course_id))
# Patch the comment client user save method so it does not try
# to create a new cc user when creating a django user
with patch('student.models.cc.User.save'):
uname = 'student'
email = 'student@edx.org'
self.password = 'test' # pylint: disable=attribute-defined-outside-init
# Create the user and make them active so we can log them in.
self.student = User.objects.create_user(uname, email, self.password) # pylint: disable=attribute-defined-outside-init
self.student.is_active = True
self.student.save()
# Add a discussion moderator
self.moderator = UserFactory.create(password=self.password) # pylint: disable=attribute-defined-outside-init
# Enroll the student in the course
CourseEnrollmentFactory(user=self.student,
course_id=self.course_id)
# Enroll the moderator and give them the appropriate roles
CourseEnrollmentFactory(user=self.moderator, course_id=self.course.id)
self.moderator.roles.add(Role.objects.get(name="Moderator", course_id=self.course.id))
assert_true(self.client.login(username='student', password=self.password))
def _setup_mock_request(self, mock_request, include_depth=False):
"""
Ensure that mock_request returns the data necessary to make views
function correctly
"""
mock_request.return_value.status_code = 200
data = {
"user_id": str(self.student.id),
"closed": False,
"commentable_id": "non_team_dummy_id"
}
if include_depth:
data["depth"] = 0
self._set_mock_request_data(mock_request, data)
def create_thread_helper(self, mock_request, extra_request_data=None, extra_response_data=None):
"""
Issues a request to create a thread and verifies the result.
"""
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"thread_type": "discussion",
"title": "Hello",
"body": "this is a post",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": False,
"id": "518d4237b023791dca00000d",
"user_id": "1",
"username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [],
"type": "thread",
"group_id": None,
"pinned": False,
"endorsed": False,
"unread_comments_count": 0,
"read": False,
"comments_count": 0,
})
thread = {
"thread_type": "discussion",
"body": ["this is a post"],
"anonymous_to_peers": ["false"],
"auto_subscribe": ["false"],
"anonymous": ["false"],
"title": ["Hello"],
}
if extra_request_data:
thread.update(extra_request_data)
url = reverse('create_thread', kwargs={'commentable_id': 'i4x-MITx-999-course-Robot_Super_Course',
'course_id': unicode(self.course_id)})
response = self.client.post(url, data=thread)
assert_true(mock_request.called)
expected_data = {
'thread_type': 'discussion',
'body': u'this is a post',
'context': ThreadContext.COURSE,
'anonymous_to_peers': False, 'user_id': 1,
'title': u'Hello',
'commentable_id': u'i4x-MITx-999-course-Robot_Super_Course',
'anonymous': False,
'course_id': unicode(self.course_id),
}
if extra_response_data:
expected_data.update(extra_response_data)
mock_request.assert_called_with(
'post',
'{prefix}/i4x-MITx-999-course-Robot_Super_Course/threads'.format(prefix=CS_PREFIX),
data=expected_data,
params={'request_id': ANY},
headers=ANY,
timeout=5
)
assert_equal(response.status_code, 200)
def update_thread_helper(self, mock_request):
"""
Issues a request to update a thread and verifies the result.
"""
self._setup_mock_request(mock_request)
# Mock out saving in order to test that content is correctly
# updated. Otherwise, the call to thread.save() receives the
# same mocked request data that the original call to retrieve
# the thread did, overwriting any changes.
with patch.object(Thread, 'save'):
response = self.client.post(
reverse("update_thread", kwargs={
"thread_id": "dummy",
"course_id": unicode(self.course_id)
}),
data={"body": "foo", "title": "foo", "commentable_id": "some_topic"}
)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEqual(data['body'], 'foo')
self.assertEqual(data['title'], 'foo')
self.assertEqual(data['commentable_id'], 'some_topic')
@attr('shard_2')
@ddt.ddt
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
@disable_signal(views, 'thread_created')
@disable_signal(views, 'thread_edited')
class ViewsQueryCountTestCase(UrlResetMixin, ModuleStoreTestCase, MockRequestSetupMixin, ViewsTestCaseMixin):
CREATE_USER = False
ENABLED_CACHES = ['default', 'mongo_metadata_inheritance', 'loc_cache']
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(ViewsQueryCountTestCase, self).setUp()
def count_queries(func): # pylint: disable=no-self-argument
"""
Decorates test methods to count mongo and SQL calls for a
particular modulestore.
"""
def inner(self, default_store, module_count, mongo_calls, sql_queries, *args, **kwargs):
with modulestore().default_store(default_store):
self.set_up_course(module_count=module_count)
self.clear_caches()
with self.assertNumQueries(sql_queries):
with check_mongo_calls(mongo_calls):
func(self, *args, **kwargs)
return inner
@ddt.data(
(ModuleStoreEnum.Type.mongo, 3, 4, 30),
(ModuleStoreEnum.Type.split, 3, 13, 30),
)
@ddt.unpack
@count_queries
def test_create_thread(self, mock_request):
self.create_thread_helper(mock_request)
@ddt.data(
(ModuleStoreEnum.Type.mongo, 3, 3, 24),
(ModuleStoreEnum.Type.split, 3, 10, 24),
)
@ddt.unpack
@count_queries
def test_update_thread(self, mock_request):
self.update_thread_helper(mock_request)
@attr('shard_2')
@ddt.ddt
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class ViewsTestCase(
UrlResetMixin,
SharedModuleStoreTestCase,
MockRequestSetupMixin,
ViewsTestCaseMixin,
MockSignalHandlerMixin
):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(ViewsTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create(
org='MITx', course='999',
discussion_topics={"Some Topic": {"id": "some_topic"}},
display_name='Robot Super Course',
)
@classmethod
def setUpTestData(cls):
super(ViewsTestCase, cls).setUpTestData()
cls.course_id = cls.course.id
# seed the forums permissions and roles
call_command('seed_permissions_roles', unicode(cls.course_id))
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
# Patching the ENABLE_DISCUSSION_SERVICE value affects the contents of urls.py,
# so we need to call super.setUp() which reloads urls.py (because
# of the UrlResetMixin)
super(ViewsTestCase, self).setUp()
# Patch the comment client user save method so it does not try
# to create a new cc user when creating a django user
with patch('student.models.cc.User.save'):
uname = 'student'
email = 'student@edx.org'
self.password = 'test' # pylint: disable=attribute-defined-outside-init
# Create the user and make them active so we can log them in.
self.student = User.objects.create_user(uname, email, self.password) # pylint: disable=attribute-defined-outside-init
self.student.is_active = True
self.student.save()
# Add a discussion moderator
self.moderator = UserFactory.create(password=self.password) # pylint: disable=attribute-defined-outside-init
# Enroll the student in the course
CourseEnrollmentFactory(user=self.student,
course_id=self.course_id)
# Enroll the moderator and give them the appropriate roles
CourseEnrollmentFactory(user=self.moderator, course_id=self.course.id)
self.moderator.roles.add(Role.objects.get(name="Moderator", course_id=self.course.id))
assert_true(self.client.login(username='student', password=self.password))
@contextmanager
def assert_discussion_signals(self, signal, user=None):
if user is None:
user = self.student
with self.assert_signal_sent(views, signal, sender=None, user=user, exclude_args=('post',)):
yield
def test_create_thread(self, mock_request):
with self.assert_discussion_signals('thread_created'):
self.create_thread_helper(mock_request)
def test_create_thread_standalone(self, mock_request):
team = CourseTeamFactory.create(
name="A Team",
course_id=self.course_id,
topic_id='topic_id',
discussion_topic_id="i4x-MITx-999-course-Robot_Super_Course"
)
# Add the student to the team so they can post to the commentable.
team.add_user(self.student)
# create_thread_helper verifies that extra data are passed through to the comments service
self.create_thread_helper(mock_request, extra_response_data={'context': ThreadContext.STANDALONE})
def test_delete_thread(self, mock_request):
self._set_mock_request_data(mock_request, {
"user_id": str(self.student.id),
"closed": False,
})
test_thread_id = "test_thread_id"
request = RequestFactory().post("dummy_url", {"id": test_thread_id})
request.user = self.student
request.view_name = "delete_thread"
with self.assert_discussion_signals('thread_deleted'):
response = views.delete_thread(
request,
course_id=unicode(self.course.id),
thread_id=test_thread_id
)
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
def test_delete_comment(self, mock_request):
self._set_mock_request_data(mock_request, {
"user_id": str(self.student.id),
"closed": False,
})
test_comment_id = "test_comment_id"
request = RequestFactory().post("dummy_url", {"id": test_comment_id})
request.user = self.student
request.view_name = "delete_comment"
with self.assert_discussion_signals('comment_deleted'):
response = views.delete_comment(
request,
course_id=unicode(self.course.id),
comment_id=test_comment_id
)
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
args = mock_request.call_args[0]
self.assertEqual(args[0], "delete")
self.assertTrue(args[1].endswith("/{}".format(test_comment_id)))
def _test_request_error(self, view_name, view_kwargs, data, mock_request):
"""
Submit a request against the given view with the given data and ensure
that the result is a 400 error and that no data was posted using
mock_request
"""
self._setup_mock_request(mock_request, include_depth=(view_name == "create_sub_comment"))
response = self.client.post(reverse(view_name, kwargs=view_kwargs), data=data)
self.assertEqual(response.status_code, 400)
for call in mock_request.call_args_list:
self.assertEqual(call[0][0].lower(), "get")
def test_create_thread_no_title(self, mock_request):
self._test_request_error(
"create_thread",
{"commentable_id": "dummy", "course_id": unicode(self.course_id)},
{"body": "foo"},
mock_request
)
def test_create_thread_empty_title(self, mock_request):
self._test_request_error(
"create_thread",
{"commentable_id": "dummy", "course_id": unicode(self.course_id)},
{"body": "foo", "title": " "},
mock_request
)
def test_create_thread_no_body(self, mock_request):
self._test_request_error(
"create_thread",
{"commentable_id": "dummy", "course_id": unicode(self.course_id)},
{"title": "foo"},
mock_request
)
def test_create_thread_empty_body(self, mock_request):
self._test_request_error(
"create_thread",
{"commentable_id": "dummy", "course_id": unicode(self.course_id)},
{"body": " ", "title": "foo"},
mock_request
)
def test_update_thread_no_title(self, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": unicode(self.course_id)},
{"body": "foo"},
mock_request
)
def test_update_thread_empty_title(self, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": unicode(self.course_id)},
{"body": "foo", "title": " "},
mock_request
)
def test_update_thread_no_body(self, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": unicode(self.course_id)},
{"title": "foo"},
mock_request
)
def test_update_thread_empty_body(self, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": unicode(self.course_id)},
{"body": " ", "title": "foo"},
mock_request
)
def test_update_thread_course_topic(self, mock_request):
with self.assert_discussion_signals('thread_edited'):
self.update_thread_helper(mock_request)
@patch('django_comment_client.utils.get_discussion_categories_ids', return_value=["test_commentable"])
def test_update_thread_wrong_commentable_id(self, mock_get_discussion_id_map, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": unicode(self.course_id)},
{"body": "foo", "title": "foo", "commentable_id": "wrong_commentable"},
mock_request
)
def test_create_comment(self, mock_request):
self._setup_mock_request(mock_request)
with self.assert_discussion_signals('comment_created'):
response = self.client.post(
reverse(
"create_comment",
kwargs={"course_id": unicode(self.course_id), "thread_id": "dummy"}
),
data={"body": "body"}
)
self.assertEqual(response.status_code, 200)
def test_create_comment_no_body(self, mock_request):
self._test_request_error(
"create_comment",
{"thread_id": "dummy", "course_id": unicode(self.course_id)},
{},
mock_request
)
def test_create_comment_empty_body(self, mock_request):
self._test_request_error(
"create_comment",
{"thread_id": "dummy", "course_id": unicode(self.course_id)},
{"body": " "},
mock_request
)
def test_create_sub_comment_no_body(self, mock_request):
self._test_request_error(
"create_sub_comment",
{"comment_id": "dummy", "course_id": unicode(self.course_id)},
{},
mock_request
)
def test_create_sub_comment_empty_body(self, mock_request):
self._test_request_error(
"create_sub_comment",
{"comment_id": "dummy", "course_id": unicode(self.course_id)},
{"body": " "},
mock_request
)
def test_update_comment_no_body(self, mock_request):
self._test_request_error(
"update_comment",
{"comment_id": "dummy", "course_id": unicode(self.course_id)},
{},
mock_request
)
def test_update_comment_empty_body(self, mock_request):
self._test_request_error(
"update_comment",
{"comment_id": "dummy", "course_id": unicode(self.course_id)},
{"body": " "},
mock_request
)
def test_update_comment_basic(self, mock_request):
self._setup_mock_request(mock_request)
comment_id = "test_comment_id"
updated_body = "updated body"
with self.assert_discussion_signals('comment_edited'):
response = self.client.post(
reverse(
"update_comment",
kwargs={"course_id": unicode(self.course_id), "comment_id": comment_id}
),
data={"body": updated_body}
)
self.assertEqual(response.status_code, 200)
mock_request.assert_called_with(
"put",
"{prefix}/comments/{comment_id}".format(prefix=CS_PREFIX, comment_id=comment_id),
headers=ANY,
params=ANY,
timeout=ANY,
data={"body": updated_body}
)
def test_flag_thread_open(self, mock_request):
self.flag_thread(mock_request, False)
def test_flag_thread_close(self, mock_request):
self.flag_thread(mock_request, True)
def flag_thread(self, mock_request, is_closed):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"title": "Hello",
"body": "this is a post",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": is_closed,
"id": "518d4237b023791dca00000d",
"user_id": "1", "username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [1],
"type": "thread",
"group_id": None,
"pinned": False,
"endorsed": False,
"unread_comments_count": 0,
"read": False,
"comments_count": 0,
})
url = reverse('flag_abuse_for_thread', kwargs={
'thread_id': '518d4237b023791dca00000d',
'course_id': unicode(self.course_id)
})
response = self.client.post(url)
assert_true(mock_request.called)
call_list = [
(
('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('put', '{prefix}/threads/518d4237b023791dca00000d/abuse_flag'.format(prefix=CS_PREFIX)),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY},
'headers': ANY,
'timeout': 5
}
)
]
assert_equal(call_list, mock_request.call_args_list)
assert_equal(response.status_code, 200)
def test_un_flag_thread_open(self, mock_request):
self.un_flag_thread(mock_request, False)
def test_un_flag_thread_close(self, mock_request):
self.un_flag_thread(mock_request, True)
def un_flag_thread(self, mock_request, is_closed):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"title": "Hello",
"body": "this is a post",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": is_closed,
"id": "518d4237b023791dca00000d",
"user_id": "1",
"username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [],
"type": "thread",
"group_id": None,
"pinned": False,
"endorsed": False,
"unread_comments_count": 0,
"read": False,
"comments_count": 0
})
url = reverse('un_flag_abuse_for_thread', kwargs={
'thread_id': '518d4237b023791dca00000d',
'course_id': unicode(self.course_id)
})
response = self.client.post(url)
assert_true(mock_request.called)
call_list = [
(
('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('put', '{prefix}/threads/518d4237b023791dca00000d/abuse_unflag'.format(prefix=CS_PREFIX)),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY},
'headers': ANY,
'timeout': 5
}
)
]
assert_equal(call_list, mock_request.call_args_list)
assert_equal(response.status_code, 200)
def test_flag_comment_open(self, mock_request):
self.flag_comment(mock_request, False)
def test_flag_comment_close(self, mock_request):
self.flag_comment(mock_request, True)
def flag_comment(self, mock_request, is_closed):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"body": "this is a comment",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": is_closed,
"id": "518d4237b023791dca00000d",
"user_id": "1",
"username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [1],
"type": "comment",
"endorsed": False
})
url = reverse('flag_abuse_for_comment', kwargs={
'comment_id': '518d4237b023791dca00000d',
'course_id': unicode(self.course_id)
})
response = self.client.post(url)
assert_true(mock_request.called)
call_list = [
(
('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('put', '{prefix}/comments/518d4237b023791dca00000d/abuse_flag'.format(prefix=CS_PREFIX)),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
)
]
assert_equal(call_list, mock_request.call_args_list)
assert_equal(response.status_code, 200)
def test_un_flag_comment_open(self, mock_request):
self.un_flag_comment(mock_request, False)
def test_un_flag_comment_close(self, mock_request):
self.un_flag_comment(mock_request, True)
def un_flag_comment(self, mock_request, is_closed):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"body": "this is a comment",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": is_closed,
"id": "518d4237b023791dca00000d",
"user_id": "1",
"username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [],
"type": "comment",
"endorsed": False
})
url = reverse('un_flag_abuse_for_comment', kwargs={
'comment_id': '518d4237b023791dca00000d',
'course_id': unicode(self.course_id)
})
response = self.client.post(url)
assert_true(mock_request.called)
call_list = [
(
('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('put', '{prefix}/comments/518d4237b023791dca00000d/abuse_unflag'.format(prefix=CS_PREFIX)),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
)
]
assert_equal(call_list, mock_request.call_args_list)
assert_equal(response.status_code, 200)
@ddt.data(
('upvote_thread', 'thread_id', 'thread_voted'),
('upvote_comment', 'comment_id', 'comment_voted'),
('downvote_thread', 'thread_id', 'thread_voted'),
('downvote_comment', 'comment_id', 'comment_voted')
)
@ddt.unpack
def test_voting(self, view_name, item_id, signal, mock_request):
self._setup_mock_request(mock_request)
with self.assert_discussion_signals(signal):
response = self.client.post(
reverse(
view_name,
kwargs={item_id: 'dummy', 'course_id': unicode(self.course_id)}
)
)
self.assertEqual(response.status_code, 200)
def test_endorse_comment(self, mock_request):
self._setup_mock_request(mock_request)
self.client.login(username=self.moderator.username, password=self.password)
with self.assert_discussion_signals('comment_endorsed', user=self.moderator):
response = self.client.post(
reverse(
'endorse_comment',
kwargs={'comment_id': 'dummy', 'course_id': unicode(self.course_id)}
)
)
self.assertEqual(response.status_code, 200)
@attr('shard_2')
@patch("lms.lib.comment_client.utils.requests.request", autospec=True)
@disable_signal(views, 'comment_endorsed')
class ViewPermissionsTestCase(UrlResetMixin, SharedModuleStoreTestCase, MockRequestSetupMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(ViewPermissionsTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(ViewPermissionsTestCase, cls).setUpTestData()
seed_permissions_roles(cls.course.id)
cls.password = "test password"
cls.student = UserFactory.create(password=cls.password)
cls.moderator = UserFactory.create(password=cls.password)
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
CourseEnrollmentFactory(user=cls.moderator, course_id=cls.course.id)
cls.moderator.roles.add(Role.objects.get(name="Moderator", course_id=cls.course.id))
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(ViewPermissionsTestCase, self).setUp()
def test_pin_thread_as_student(self, mock_request):
self._set_mock_request_data(mock_request, {})
self.client.login(username=self.student.username, password=self.password)
response = self.client.post(
reverse("pin_thread", kwargs={"course_id": unicode(self.course.id), "thread_id": "dummy"})
)
self.assertEqual(response.status_code, 401)
def test_pin_thread_as_moderator(self, mock_request):
self._set_mock_request_data(mock_request, {})
self.client.login(username=self.moderator.username, password=self.password)
response = self.client.post(
reverse("pin_thread", kwargs={"course_id": unicode(self.course.id), "thread_id": "dummy"})
)
self.assertEqual(response.status_code, 200)
def test_un_pin_thread_as_student(self, mock_request):
self._set_mock_request_data(mock_request, {})
self.client.login(username=self.student.username, password=self.password)
response = self.client.post(
reverse("un_pin_thread", kwargs={"course_id": unicode(self.course.id), "thread_id": "dummy"})
)
self.assertEqual(response.status_code, 401)
def test_un_pin_thread_as_moderator(self, mock_request):
self._set_mock_request_data(mock_request, {})
self.client.login(username=self.moderator.username, password=self.password)
response = self.client.post(
reverse("un_pin_thread", kwargs={"course_id": unicode(self.course.id), "thread_id": "dummy"})
)
self.assertEqual(response.status_code, 200)
def _set_mock_request_thread_and_comment(self, mock_request, thread_data, comment_data):
def handle_request(*args, **kwargs):
url = args[1]
if "/threads/" in url:
return self._create_response_mock(thread_data)
elif "/comments/" in url:
return self._create_response_mock(comment_data)
else:
raise ArgumentError("Bad url to mock request")
mock_request.side_effect = handle_request
def test_endorse_response_as_staff(self, mock_request):
self._set_mock_request_thread_and_comment(
mock_request,
{"type": "thread", "thread_type": "question", "user_id": str(self.student.id)},
{"type": "comment", "thread_id": "dummy"}
)
self.client.login(username=self.moderator.username, password=self.password)
response = self.client.post(
reverse("endorse_comment", kwargs={"course_id": unicode(self.course.id), "comment_id": "dummy"})
)
self.assertEqual(response.status_code, 200)
def test_endorse_response_as_student(self, mock_request):
self._set_mock_request_thread_and_comment(
mock_request,
{"type": "thread", "thread_type": "question", "user_id": str(self.moderator.id)},
{"type": "comment", "thread_id": "dummy"}
)
self.client.login(username=self.student.username, password=self.password)
response = self.client.post(
reverse("endorse_comment", kwargs={"course_id": unicode(self.course.id), "comment_id": "dummy"})
)
self.assertEqual(response.status_code, 401)
def test_endorse_response_as_student_question_author(self, mock_request):
self._set_mock_request_thread_and_comment(
mock_request,
{"type": "thread", "thread_type": "question", "user_id": str(self.student.id)},
{"type": "comment", "thread_id": "dummy"}
)
self.client.login(username=self.student.username, password=self.password)
response = self.client.post(
reverse("endorse_comment", kwargs={"course_id": unicode(self.course.id), "comment_id": "dummy"})
)
self.assertEqual(response.status_code, 200)
@attr('shard_2')
class CreateThreadUnicodeTestCase(SharedModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(CreateThreadUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(CreateThreadUnicodeTestCase, cls).setUpTestData()
seed_permissions_roles(cls.course.id)
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request,):
"""
Test to make sure unicode data in a thread doesn't break it.
"""
self._set_mock_request_data(mock_request, {})
request = RequestFactory().post("dummy_url", {"thread_type": "discussion", "body": text, "title": text})
request.user = self.student
request.view_name = "create_thread"
response = views.create_thread(
request, course_id=unicode(self.course.id), commentable_id="non_team_dummy_id"
)
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
self.assertEqual(mock_request.call_args[1]["data"]["title"], text)
@attr('shard_2')
@disable_signal(views, 'thread_edited')
class UpdateThreadUnicodeTestCase(SharedModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(UpdateThreadUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(UpdateThreadUnicodeTestCase, cls).setUpTestData()
seed_permissions_roles(cls.course.id)
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('django_comment_client.utils.get_discussion_categories_ids', return_value=["test_commentable"])
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request, mock_get_discussion_id_map):
self._set_mock_request_data(mock_request, {
"user_id": str(self.student.id),
"closed": False,
})
request = RequestFactory().post("dummy_url", {"body": text, "title": text, "thread_type": "question", "commentable_id": "test_commentable"})
request.user = self.student
request.view_name = "update_thread"
response = views.update_thread(request, course_id=unicode(self.course.id), thread_id="dummy_thread_id")
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
self.assertEqual(mock_request.call_args[1]["data"]["title"], text)
self.assertEqual(mock_request.call_args[1]["data"]["thread_type"], "question")
self.assertEqual(mock_request.call_args[1]["data"]["commentable_id"], "test_commentable")
@attr('shard_2')
@disable_signal(views, 'comment_created')
class CreateCommentUnicodeTestCase(SharedModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(CreateCommentUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(CreateCommentUnicodeTestCase, cls).setUpTestData()
seed_permissions_roles(cls.course.id)
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
commentable_id = "non_team_dummy_id"
self._set_mock_request_data(mock_request, {
"closed": False,
"commentable_id": commentable_id
})
# We have to get clever here due to Thread's setters and getters.
# Patch won't work with it.
try:
Thread.commentable_id = commentable_id
request = RequestFactory().post("dummy_url", {"body": text})
request.user = self.student
request.view_name = "create_comment"
response = views.create_comment(
request, course_id=unicode(self.course.id), thread_id="dummy_thread_id"
)
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
finally:
del Thread.commentable_id
@attr('shard_2')
@disable_signal(views, 'comment_edited')
class UpdateCommentUnicodeTestCase(SharedModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(UpdateCommentUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(UpdateCommentUnicodeTestCase, cls).setUpTestData()
seed_permissions_roles(cls.course.id)
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
self._set_mock_request_data(mock_request, {
"user_id": str(self.student.id),
"closed": False,
})
request = RequestFactory().post("dummy_url", {"body": text})
request.user = self.student
request.view_name = "update_comment"
response = views.update_comment(request, course_id=unicode(self.course.id), comment_id="dummy_comment_id")
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
@attr('shard_2')
@disable_signal(views, 'comment_created')
class CreateSubCommentUnicodeTestCase(SharedModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
"""
Make sure comments under a response can handle unicode.
"""
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(CreateSubCommentUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(CreateSubCommentUnicodeTestCase, cls).setUpTestData()
seed_permissions_roles(cls.course.id)
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
"""
Create a comment with unicode in it.
"""
self._set_mock_request_data(mock_request, {
"closed": False,
"depth": 1,
"thread_id": "test_thread",
"commentable_id": "non_team_dummy_id"
})
request = RequestFactory().post("dummy_url", {"body": text})
request.user = self.student
request.view_name = "create_sub_comment"
Thread.commentable_id = "test_commentable"
try:
response = views.create_sub_comment(
request, course_id=unicode(self.course.id), comment_id="dummy_comment_id"
)
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
finally:
del Thread.commentable_id
@attr('shard_2')
@ddt.ddt
@patch("lms.lib.comment_client.utils.requests.request", autospec=True)
@disable_signal(views, 'thread_voted')
@disable_signal(views, 'thread_edited')
@disable_signal(views, 'comment_created')
@disable_signal(views, 'comment_voted')
@disable_signal(views, 'comment_deleted')
class TeamsPermissionsTestCase(UrlResetMixin, SharedModuleStoreTestCase, MockRequestSetupMixin):
# Most of the test points use the same ddt data.
# args: user, commentable_id, status_code
ddt_permissions_args = [
# Student in team can do operations on threads/comments within the team commentable.
('student_in_team', 'team_commentable_id', 200),
# Non-team commentables can be edited by any student.
('student_in_team', 'course_commentable_id', 200),
# Student not in team cannot do operations within the team commentable.
('student_not_in_team', 'team_commentable_id', 401),
# Non-team commentables can be edited by any student.
('student_not_in_team', 'course_commentable_id', 200),
# Moderators can always operator on threads within a team, regardless of team membership.
('moderator', 'team_commentable_id', 200)
]
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(TeamsPermissionsTestCase, cls).setUpClassAndTestData():
teams_configuration = {
'topics': [{'id': "topic_id", 'name': 'Solar Power', 'description': 'Solar power is hot'}]
}
cls.course = CourseFactory.create(teams_configuration=teams_configuration)
@classmethod
def setUpTestData(cls):
super(TeamsPermissionsTestCase, cls).setUpTestData()
cls.password = "test password"
seed_permissions_roles(cls.course.id)
# Create 3 users-- student in team, student not in team, discussion moderator
cls.student_in_team = UserFactory.create(password=cls.password)
cls.student_not_in_team = UserFactory.create(password=cls.password)
cls.moderator = UserFactory.create(password=cls.password)
CourseEnrollmentFactory(user=cls.student_in_team, course_id=cls.course.id)
CourseEnrollmentFactory(user=cls.student_not_in_team, course_id=cls.course.id)
CourseEnrollmentFactory(user=cls.moderator, course_id=cls.course.id)
cls.moderator.roles.add(Role.objects.get(name="Moderator", course_id=cls.course.id))
# Create a team.
cls.team_commentable_id = "team_discussion_id"
cls.team = CourseTeamFactory.create(
name=u'The Only Team',
course_id=cls.course.id,
topic_id='topic_id',
discussion_topic_id=cls.team_commentable_id
)
cls.team.add_user(cls.student_in_team)
# Dummy commentable ID not linked to a team
cls.course_commentable_id = "course_level_commentable"
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(TeamsPermissionsTestCase, self).setUp()
def _setup_mock(self, user, mock_request, data):
user = getattr(self, user)
self._set_mock_request_data(mock_request, data)
self.client.login(username=user.username, password=self.password)
@ddt.data(
# student_in_team will be able to update his own post, regardless of team membership
('student_in_team', 'student_in_team', 'team_commentable_id', 200),
('student_in_team', 'student_in_team', 'course_commentable_id', 200),
# students can only update their own posts
('student_in_team', 'moderator', 'team_commentable_id', 401),
# Even though student_not_in_team is not in the team, he can still modify posts he created while in the team.
('student_not_in_team', 'student_not_in_team', 'team_commentable_id', 200),
# Moderators can change their own posts and other people's posts.
('moderator', 'moderator', 'team_commentable_id', 200),
('moderator', 'student_in_team', 'team_commentable_id', 200),
)
@ddt.unpack
def test_update_thread(self, user, thread_author, commentable_id, status_code, mock_request):
"""
Verify that update_thread is limited to thread authors and privileged users (team membership does not matter).
"""
commentable_id = getattr(self, commentable_id)
# thread_author is who is marked as the author of the thread being updated.
thread_author = getattr(self, thread_author)
self._setup_mock(
user, mock_request, # user is the person making the request.
{
"user_id": str(thread_author.id),
"closed": False, "commentable_id": commentable_id,
"context": "standalone"
}
)
response = self.client.post(
reverse(
"update_thread",
kwargs={
"course_id": unicode(self.course.id),
"thread_id": "dummy"
}
),
data={"body": "foo", "title": "foo", "commentable_id": commentable_id}
)
self.assertEqual(response.status_code, status_code)
@ddt.data(
# Students can delete their own posts
('student_in_team', 'student_in_team', 'team_commentable_id', 200),
# Moderators can delete any post
('moderator', 'student_in_team', 'team_commentable_id', 200),
# Others cannot delete posts
('student_in_team', 'moderator', 'team_commentable_id', 401),
('student_not_in_team', 'student_in_team', 'team_commentable_id', 401)
)
@ddt.unpack
def test_delete_comment(self, user, comment_author, commentable_id, status_code, mock_request):
commentable_id = getattr(self, commentable_id)
comment_author = getattr(self, comment_author)
self._setup_mock(user, mock_request, {
"closed": False,
"commentable_id": commentable_id,
"user_id": str(comment_author.id)
})
response = self.client.post(
reverse(
"delete_comment",
kwargs={
"course_id": unicode(self.course.id),
"comment_id": "dummy"
}
),
data={"body": "foo", "title": "foo"}
)
self.assertEqual(response.status_code, status_code)
@ddt.data(*ddt_permissions_args)
@ddt.unpack
def test_create_comment(self, user, commentable_id, status_code, mock_request):
"""
Verify that create_comment is limited to members of the team or users with 'edit_content' permission.
"""
commentable_id = getattr(self, commentable_id)
self._setup_mock(user, mock_request, {"closed": False, "commentable_id": commentable_id})
response = self.client.post(
reverse(
"create_comment",
kwargs={
"course_id": unicode(self.course.id),
"thread_id": "dummy"
}
),
data={"body": "foo", "title": "foo"}
)
self.assertEqual(response.status_code, status_code)
@ddt.data(*ddt_permissions_args)
@ddt.unpack
def test_create_sub_comment(self, user, commentable_id, status_code, mock_request):
"""
Verify that create_subcomment is limited to members of the team or users with 'edit_content' permission.
"""
commentable_id = getattr(self, commentable_id)
self._setup_mock(
user, mock_request,
{"closed": False, "commentable_id": commentable_id, "thread_id": "dummy_thread"},
)
response = self.client.post(
reverse(
"create_sub_comment",
kwargs={
"course_id": unicode(self.course.id),
"comment_id": "dummy_comment"
}
),
data={"body": "foo", "title": "foo"}
)
self.assertEqual(response.status_code, status_code)
@ddt.data(*ddt_permissions_args)
@ddt.unpack
def test_comment_actions(self, user, commentable_id, status_code, mock_request):
"""
Verify that voting and flagging of comments is limited to members of the team or users with
'edit_content' permission.
"""
commentable_id = getattr(self, commentable_id)
self._setup_mock(
user, mock_request,
{"closed": False, "commentable_id": commentable_id, "thread_id": "dummy_thread"},
)
for action in ["upvote_comment", "downvote_comment", "un_flag_abuse_for_comment", "flag_abuse_for_comment"]:
response = self.client.post(
reverse(
action,
kwargs={"course_id": unicode(self.course.id), "comment_id": "dummy_comment"}
)
)
self.assertEqual(response.status_code, status_code)
@ddt.data(*ddt_permissions_args)
@ddt.unpack
def test_threads_actions(self, user, commentable_id, status_code, mock_request):
"""
Verify that voting, flagging, and following of threads is limited to members of the team or users with
'edit_content' permission.
"""
commentable_id = getattr(self, commentable_id)
self._setup_mock(
user, mock_request,
{"closed": False, "commentable_id": commentable_id},
)
for action in ["upvote_thread", "downvote_thread", "un_flag_abuse_for_thread", "flag_abuse_for_thread",
"follow_thread", "unfollow_thread"]:
response = self.client.post(
reverse(
action,
kwargs={"course_id": unicode(self.course.id), "thread_id": "dummy_thread"}
)
)
self.assertEqual(response.status_code, status_code)
@ddt.data(*ddt_permissions_args)
@ddt.unpack
def test_create_thread(self, user, commentable_id, status_code, __):
"""
Verify that creation of threads is limited to members of the team or users with 'edit_content' permission.
"""
commentable_id = getattr(self, commentable_id)
# mock_request is not used because Commentables don't exist in comment service.
self.client.login(username=getattr(self, user).username, password=self.password)
response = self.client.post(
reverse(
"create_thread",
kwargs={"course_id": unicode(self.course.id), "commentable_id": commentable_id}
),
data={"body": "foo", "title": "foo", "thread_type": "discussion"}
)
self.assertEqual(response.status_code, status_code)
@ddt.data(*ddt_permissions_args)
@ddt.unpack
def test_commentable_actions(self, user, commentable_id, status_code, __):
"""
Verify that following of commentables is limited to members of the team or users with
'edit_content' permission.
"""
commentable_id = getattr(self, commentable_id)
# mock_request is not used because Commentables don't exist in comment service.
self.client.login(username=getattr(self, user).username, password=self.password)
for action in ["follow_commentable", "unfollow_commentable"]:
response = self.client.post(
reverse(
action,
kwargs={"course_id": unicode(self.course.id), "commentable_id": commentable_id}
)
)
self.assertEqual(response.status_code, status_code)
TEAM_COMMENTABLE_ID = 'test-team-discussion'
@attr('shard_2')
@disable_signal(views, 'comment_created')
@ddt.ddt
class ForumEventTestCase(SharedModuleStoreTestCase, MockRequestSetupMixin):
"""
Forum actions are expected to launch analytics events. Test these here.
"""
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(ForumEventTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(ForumEventTestCase, cls).setUpTestData()
seed_permissions_roles(cls.course.id)
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
cls.student.roles.add(Role.objects.get(name="Student", course_id=cls.course.id))
CourseAccessRoleFactory(course_id=cls.course.id, user=cls.student, role='Wizard')
@patch('eventtracking.tracker.emit')
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def test_thread_event(self, __, mock_emit):
request = RequestFactory().post(
"dummy_url", {
"thread_type": "discussion",
"body": "Test text",
"title": "Test",
"auto_subscribe": True
}
)
request.user = self.student
request.view_name = "create_thread"
views.create_thread(request, course_id=unicode(self.course.id), commentable_id="test_commentable")
event_name, event = mock_emit.call_args[0]
self.assertEqual(event_name, 'edx.forum.thread.created')
self.assertEqual(event['body'], 'Test text')
self.assertEqual(event['title'], 'Test')
self.assertEqual(event['commentable_id'], 'test_commentable')
self.assertEqual(event['user_forums_roles'], ['Student'])
self.assertEqual(event['options']['followed'], True)
self.assertEqual(event['user_course_roles'], ['Wizard'])
self.assertEqual(event['anonymous'], False)
self.assertEqual(event['group_id'], None)
self.assertEqual(event['thread_type'], 'discussion')
self.assertEquals(event['anonymous_to_peers'], False)
@patch('eventtracking.tracker.emit')
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def test_response_event(self, mock_request, mock_emit):
"""
Check to make sure an event is fired when a user responds to a thread.
"""
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"closed": False,
"commentable_id": 'test_commentable_id',
'thread_id': 'test_thread_id',
})
request = RequestFactory().post("dummy_url", {"body": "Test comment", 'auto_subscribe': True})
request.user = self.student
request.view_name = "create_comment"
views.create_comment(request, course_id=unicode(self.course.id), thread_id='test_thread_id')
event_name, event = mock_emit.call_args[0]
self.assertEqual(event_name, 'edx.forum.response.created')
self.assertEqual(event['body'], "Test comment")
self.assertEqual(event['commentable_id'], 'test_commentable_id')
self.assertEqual(event['user_forums_roles'], ['Student'])
self.assertEqual(event['user_course_roles'], ['Wizard'])
self.assertEqual(event['discussion']['id'], 'test_thread_id')
self.assertEqual(event['options']['followed'], True)
@patch('eventtracking.tracker.emit')
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def test_comment_event(self, mock_request, mock_emit):
"""
Ensure an event is fired when someone comments on a response.
"""
self._set_mock_request_data(mock_request, {
"closed": False,
"depth": 1,
"thread_id": "test_thread_id",
"commentable_id": "test_commentable_id",
"parent_id": "test_response_id"
})
request = RequestFactory().post("dummy_url", {"body": "Another comment"})
request.user = self.student
request.view_name = "create_sub_comment"
views.create_sub_comment(request, course_id=unicode(self.course.id), comment_id="dummy_comment_id")
event_name, event = mock_emit.call_args[0]
self.assertEqual(event_name, "edx.forum.comment.created")
self.assertEqual(event['body'], 'Another comment')
self.assertEqual(event['discussion']['id'], 'test_thread_id')
self.assertEqual(event['response']['id'], 'test_response_id')
self.assertEqual(event['user_forums_roles'], ['Student'])
self.assertEqual(event['user_course_roles'], ['Wizard'])
self.assertEqual(event['options']['followed'], False)
@patch('eventtracking.tracker.emit')
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
@ddt.data((
'create_thread',
'edx.forum.thread.created', {
'thread_type': 'discussion',
'body': 'Test text',
'title': 'Test',
'auto_subscribe': True
},
{'commentable_id': TEAM_COMMENTABLE_ID}
), (
'create_comment',
'edx.forum.response.created',
{'body': 'Test comment', 'auto_subscribe': True},
{'thread_id': 'test_thread_id'}
), (
'create_sub_comment',
'edx.forum.comment.created',
{'body': 'Another comment'},
{'comment_id': 'dummy_comment_id'}
))
@ddt.unpack
def test_team_events(self, view_name, event_name, view_data, view_kwargs, mock_request, mock_emit):
user = self.student
team = CourseTeamFactory.create(discussion_topic_id=TEAM_COMMENTABLE_ID)
CourseTeamMembershipFactory.create(team=team, user=user)
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
'closed': False,
'commentable_id': TEAM_COMMENTABLE_ID,
'thread_id': 'test_thread_id',
})
request = RequestFactory().post('dummy_url', view_data)
request.user = user
request.view_name = view_name
getattr(views, view_name)(request, course_id=unicode(self.course.id), **view_kwargs)
name, event = mock_emit.call_args[0]
self.assertEqual(name, event_name)
self.assertEqual(event['team_id'], team.team_id)
@ddt.data(
('vote_for_thread', 'thread_id', 'thread'),
('undo_vote_for_thread', 'thread_id', 'thread'),
('vote_for_comment', 'comment_id', 'response'),
('undo_vote_for_comment', 'comment_id', 'response'),
)
@ddt.unpack
@patch('eventtracking.tracker.emit')
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def test_thread_voted_event(self, view_name, obj_id_name, obj_type, mock_request, mock_emit):
undo = view_name.startswith('undo')
self._set_mock_request_data(mock_request, {
'closed': False,
'commentable_id': 'test_commentable_id',
'username': 'gumprecht',
})
request = RequestFactory().post('dummy_url', {})
request.user = self.student
request.view_name = view_name
view_function = getattr(views, view_name)
kwargs = dict(course_id=unicode(self.course.id))
kwargs[obj_id_name] = obj_id_name
if not undo:
kwargs.update(value='up')
view_function(request, **kwargs)
self.assertTrue(mock_emit.called)
event_name, event = mock_emit.call_args[0]
self.assertEqual(event_name, 'edx.forum.{}.voted'.format(obj_type))
self.assertEqual(event['target_username'], 'gumprecht')
self.assertEqual(event['undo_vote'], undo)
self.assertEqual(event['vote_value'], 'up')
@attr('shard_2')
class UsersEndpointTestCase(SharedModuleStoreTestCase, MockRequestSetupMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(UsersEndpointTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(UsersEndpointTestCase, cls).setUpTestData()
seed_permissions_roles(cls.course.id)
cls.student = UserFactory.create()
cls.enrollment = CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
cls.other_user = UserFactory.create(username="other")
CourseEnrollmentFactory(user=cls.other_user, course_id=cls.course.id)
def set_post_counts(self, mock_request, threads_count=1, comments_count=1):
"""
sets up a mock response from the comments service for getting post counts for our other_user
"""
self._set_mock_request_data(mock_request, {
"threads_count": threads_count,
"comments_count": comments_count,
})
def make_request(self, method='get', course_id=None, **kwargs):
course_id = course_id or self.course.id
request = getattr(RequestFactory(), method)("dummy_url", kwargs)
request.user = self.student
request.view_name = "users"
return views.users(request, course_id=course_id.to_deprecated_string())
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def test_finds_exact_match(self, mock_request):
self.set_post_counts(mock_request)
response = self.make_request(username="other")
self.assertEqual(response.status_code, 200)
self.assertEqual(
json.loads(response.content)["users"],
[{"id": self.other_user.id, "username": self.other_user.username}]
)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def test_finds_no_match(self, mock_request):
self.set_post_counts(mock_request)
response = self.make_request(username="othor")
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)["users"], [])
def test_requires_GET(self):
response = self.make_request(method='post', username="other")
self.assertEqual(response.status_code, 405)
def test_requires_username_param(self):
response = self.make_request()
self.assertEqual(response.status_code, 400)
content = json.loads(response.content)
self.assertIn("errors", content)
self.assertNotIn("users", content)
def test_course_does_not_exist(self):
course_id = CourseKey.from_string("does/not/exist")
response = self.make_request(course_id=course_id, username="other")
self.assertEqual(response.status_code, 404)
content = json.loads(response.content)
self.assertIn("errors", content)
self.assertNotIn("users", content)
def test_requires_requestor_enrolled_in_course(self):
# unenroll self.student from the course.
self.enrollment.delete()
response = self.make_request(username="other")
self.assertEqual(response.status_code, 404)
content = json.loads(response.content)
self.assertIn("errors", content)
self.assertNotIn("users", content)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def test_requires_matched_user_has_forum_content(self, mock_request):
self.set_post_counts(mock_request, 0, 0)
response = self.make_request(username="other")
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)["users"], [])
| agpl-3.0 |
kprkpr/kernel-e400 | venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/_base.py | 915 | 13711 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from ..constants import scopingElements, tableInsertModeElements, namespaces
# The scope markers are inserted when entering object elements,
# marquees, table cells, and table captions, and are used to prevent formatting
# from "leaking" into tables, object elements, and marquees.
Marker = None
listElementsMap = {
None: (frozenset(scopingElements), False),
"button": (frozenset(scopingElements | set([(namespaces["html"], "button")])), False),
"list": (frozenset(scopingElements | set([(namespaces["html"], "ol"),
(namespaces["html"], "ul")])), False),
"table": (frozenset([(namespaces["html"], "html"),
(namespaces["html"], "table")]), False),
"select": (frozenset([(namespaces["html"], "optgroup"),
(namespaces["html"], "option")]), True)
}
class Node(object):
def __init__(self, name):
"""Node representing an item in the tree.
name - The tag name associated with the node
parent - The parent of the current node (or None for the document node)
value - The value of the current node (applies to text nodes and
comments
attributes - a dict holding name, value pairs for attributes of the node
childNodes - a list of child nodes of the current node. This must
include all elements but not necessarily other node types
_flags - A list of miscellaneous flags that can be set on the node
"""
self.name = name
self.parent = None
self.value = None
self.attributes = {}
self.childNodes = []
self._flags = []
def __str__(self):
attributesStr = " ".join(["%s=\"%s\"" % (name, value)
for name, value in
self.attributes.items()])
if attributesStr:
return "<%s %s>" % (self.name, attributesStr)
else:
return "<%s>" % (self.name)
def __repr__(self):
return "<%s>" % (self.name)
def appendChild(self, node):
"""Insert node as a child of the current node
"""
raise NotImplementedError
def insertText(self, data, insertBefore=None):
"""Insert data as text in the current node, positioned before the
start of node insertBefore or to the end of the node's text.
"""
raise NotImplementedError
def insertBefore(self, node, refNode):
"""Insert node as a child of the current node, before refNode in the
list of child nodes. Raises ValueError if refNode is not a child of
the current node"""
raise NotImplementedError
def removeChild(self, node):
"""Remove node from the children of the current node
"""
raise NotImplementedError
def reparentChildren(self, newParent):
"""Move all the children of the current node to newParent.
This is needed so that trees that don't store text as nodes move the
text in the correct way
"""
# XXX - should this method be made more general?
for child in self.childNodes:
newParent.appendChild(child)
self.childNodes = []
def cloneNode(self):
"""Return a shallow copy of the current node i.e. a node with the same
name and attributes but with no parent or child nodes
"""
raise NotImplementedError
def hasContent(self):
"""Return true if the node has children or text, false otherwise
"""
raise NotImplementedError
class ActiveFormattingElements(list):
def append(self, node):
equalCount = 0
if node != Marker:
for element in self[::-1]:
if element == Marker:
break
if self.nodesEqual(element, node):
equalCount += 1
if equalCount == 3:
self.remove(element)
break
list.append(self, node)
def nodesEqual(self, node1, node2):
if not node1.nameTuple == node2.nameTuple:
return False
if not node1.attributes == node2.attributes:
return False
return True
class TreeBuilder(object):
"""Base treebuilder implementation
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
"""
# Document class
documentClass = None
# The class to use for creating a node
elementClass = None
# The class to use for creating comments
commentClass = None
# The class to use for creating doctypes
doctypeClass = None
# Fragment class
fragmentClass = None
def __init__(self, namespaceHTMLElements):
if namespaceHTMLElements:
self.defaultNamespace = "http://www.w3.org/1999/xhtml"
else:
self.defaultNamespace = None
self.reset()
def reset(self):
self.openElements = []
self.activeFormattingElements = ActiveFormattingElements()
# XXX - rename these to headElement, formElement
self.headPointer = None
self.formPointer = None
self.insertFromTable = False
self.document = self.documentClass()
def elementInScope(self, target, variant=None):
# If we pass a node in we match that. if we pass a string
# match any node with that name
exactNode = hasattr(target, "nameTuple")
listElements, invert = listElementsMap[variant]
for node in reversed(self.openElements):
if (node.name == target and not exactNode or
node == target and exactNode):
return True
elif (invert ^ (node.nameTuple in listElements)):
return False
assert False # We should never reach this point
def reconstructActiveFormattingElements(self):
# Within this algorithm the order of steps described in the
# specification is not quite the same as the order of steps in the
# code. It should still do the same though.
# Step 1: stop the algorithm when there's nothing to do.
if not self.activeFormattingElements:
return
# Step 2 and step 3: we start with the last element. So i is -1.
i = len(self.activeFormattingElements) - 1
entry = self.activeFormattingElements[i]
if entry == Marker or entry in self.openElements:
return
# Step 6
while entry != Marker and entry not in self.openElements:
if i == 0:
# This will be reset to 0 below
i = -1
break
i -= 1
# Step 5: let entry be one earlier in the list.
entry = self.activeFormattingElements[i]
while True:
# Step 7
i += 1
# Step 8
entry = self.activeFormattingElements[i]
clone = entry.cloneNode() # Mainly to get a new copy of the attributes
# Step 9
element = self.insertElement({"type": "StartTag",
"name": clone.name,
"namespace": clone.namespace,
"data": clone.attributes})
# Step 10
self.activeFormattingElements[i] = element
# Step 11
if element == self.activeFormattingElements[-1]:
break
def clearActiveFormattingElements(self):
entry = self.activeFormattingElements.pop()
while self.activeFormattingElements and entry != Marker:
entry = self.activeFormattingElements.pop()
def elementInActiveFormattingElements(self, name):
"""Check if an element exists between the end of the active
formatting elements and the last marker. If it does, return it, else
return false"""
for item in self.activeFormattingElements[::-1]:
# Check for Marker first because if it's a Marker it doesn't have a
# name attribute.
if item == Marker:
break
elif item.name == name:
return item
return False
def insertRoot(self, token):
element = self.createElement(token)
self.openElements.append(element)
self.document.appendChild(element)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = self.doctypeClass(name, publicId, systemId)
self.document.appendChild(doctype)
def insertComment(self, token, parent=None):
if parent is None:
parent = self.openElements[-1]
parent.appendChild(self.commentClass(token["data"]))
def createElement(self, token):
"""Create an element but don't insert it anywhere"""
name = token["name"]
namespace = token.get("namespace", self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token["data"]
return element
def _getInsertFromTable(self):
return self._insertFromTable
def _setInsertFromTable(self, value):
"""Switch the function used to insert an element from the
normal one to the misnested table one and back again"""
self._insertFromTable = value
if value:
self.insertElement = self.insertElementTable
else:
self.insertElement = self.insertElementNormal
insertFromTable = property(_getInsertFromTable, _setInsertFromTable)
def insertElementNormal(self, token):
name = token["name"]
assert isinstance(name, text_type), "Element %s not unicode" % name
namespace = token.get("namespace", self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token["data"]
self.openElements[-1].appendChild(element)
self.openElements.append(element)
return element
def insertElementTable(self, token):
"""Create an element and insert it into the tree"""
element = self.createElement(token)
if self.openElements[-1].name not in tableInsertModeElements:
return self.insertElementNormal(token)
else:
# We should be in the InTable mode. This means we want to do
# special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
if insertBefore is None:
parent.appendChild(element)
else:
parent.insertBefore(element, insertBefore)
self.openElements.append(element)
return element
def insertText(self, data, parent=None):
"""Insert text data."""
if parent is None:
parent = self.openElements[-1]
if (not self.insertFromTable or (self.insertFromTable and
self.openElements[-1].name
not in tableInsertModeElements)):
parent.insertText(data)
else:
# We should be in the InTable mode. This means we want to do
# special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
parent.insertText(data, insertBefore)
def getTableMisnestedNodePosition(self):
"""Get the foster parent element, and sibling to insert before
(or None) when inserting a misnested table node"""
# The foster parent element is the one which comes before the most
# recently opened table element
# XXX - this is really inelegant
lastTable = None
fosterParent = None
insertBefore = None
for elm in self.openElements[::-1]:
if elm.name == "table":
lastTable = elm
break
if lastTable:
# XXX - we should really check that this parent is actually a
# node here
if lastTable.parent:
fosterParent = lastTable.parent
insertBefore = lastTable
else:
fosterParent = self.openElements[
self.openElements.index(lastTable) - 1]
else:
fosterParent = self.openElements[0]
return fosterParent, insertBefore
def generateImpliedEndTags(self, exclude=None):
name = self.openElements[-1].name
# XXX td, th and tr are not actually needed
if (name in frozenset(("dd", "dt", "li", "option", "optgroup", "p", "rp", "rt"))
and name != exclude):
self.openElements.pop()
# XXX This is not entirely what the specification says. We should
# investigate it more closely.
self.generateImpliedEndTags(exclude)
def getDocument(self):
"Return the final tree"
return self.document
def getFragment(self):
"Return the final fragment"
# assert self.innerHTML
fragment = self.fragmentClass()
self.openElements[0].reparentChildren(fragment)
return fragment
def testSerializer(self, node):
"""Serialize the subtree of node in the format required by unit tests
node - the node from which to start serializing"""
raise NotImplementedError
| gpl-2.0 |
leki75/ansible | lib/ansible/plugins/action/include_vars.py | 18 | 10206 | # (c) 2016, Allen Sanabria <asanabria@linuxdynasty.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from os import path, walk
import re
from ansible.errors import AnsibleError
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_native, to_text
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = False
VALID_FILE_EXTENSIONS = ['yaml', 'yml', 'json']
VALID_DIR_ARGUMENTS = ['dir', 'depth', 'files_matching', 'ignore_files', 'extensions']
VALID_FILE_ARGUMENTS = ['file', '_raw_params']
VALID_ALL = ['name']
def _set_dir_defaults(self):
if not self.depth:
self.depth = 0
if self.files_matching:
self.matcher = re.compile(r'{0}'.format(self.files_matching))
else:
self.matcher = None
if not self.ignore_files:
self.ignore_files = list()
if isinstance(self.ignore_files, str):
self.ignore_files = self.ignore_files.split()
elif isinstance(self.ignore_files, dict):
return {
'failed': True,
'message': '{0} must be a list'.format(self.ignore_files)
}
def _set_args(self):
""" Set instance variables based on the arguments that were passed """
self.return_results_as_name = self._task.args.get('name', None)
self.source_dir = self._task.args.get('dir', None)
self.source_file = self._task.args.get('file', None)
if not self.source_dir and not self.source_file:
self.source_file = self._task.args.get('_raw_params')
self.depth = self._task.args.get('depth', None)
self.files_matching = self._task.args.get('files_matching', None)
self.ignore_files = self._task.args.get('ignore_files', None)
self.valid_extensions = self._task.args.get('extensions', self.VALID_FILE_EXTENSIONS)
# convert/validate extensions list
if isinstance(self.valid_extensions, string_types):
self.valid_extensions = list(self.valid_extensions)
if not isinstance(self.valid_extensions, list):
raise AnsibleError('Invalid type for "extensions" option, it must be a list')
def run(self, tmp=None, task_vars=None):
""" Load yml files recursively from a directory.
"""
if task_vars is None:
task_vars = dict()
self.show_content = True
self.included_files = []
# Validate arguments
dirs = 0
files = 0
for arg in self._task.args:
if arg in self.VALID_DIR_ARGUMENTS:
dirs += 1
elif arg in self.VALID_FILE_ARGUMENTS:
files += 1
elif arg in self.VALID_ALL:
pass
else:
raise AnsibleError('{0} is not a valid option in debug'.format(arg))
if dirs and files:
raise AnsibleError("Your are mixing file only and dir only arguments, these are incompatible")
# set internal vars from args
self._set_args()
results = dict()
if self.source_dir:
self._set_dir_defaults()
self._set_root_dir()
if path.exists(self.source_dir):
for root_dir, filenames in self._traverse_dir_depth():
failed, err_msg, updated_results = (self._load_files_in_dir(root_dir, filenames))
if failed:
break
results.update(updated_results)
else:
failed = True
err_msg = ('{0} directory does not exist'.format(self.source_dir))
else:
try:
self.source_file = self._find_needle('vars', self.source_file)
failed, err_msg, updated_results = (
self._load_files(self.source_file)
)
if not failed:
results.update(updated_results)
except AnsibleError as e:
failed = True
err_msg = to_native(e)
if self.return_results_as_name:
scope = dict()
scope[self.return_results_as_name] = results
results = scope
result = super(ActionModule, self).run(tmp, task_vars)
if failed:
result['failed'] = failed
result['message'] = err_msg
result['ansible_included_var_files'] = self.included_files
result['ansible_facts'] = results
result['_ansible_no_log'] = not self.show_content
return result
def _set_root_dir(self):
if self._task._role:
if self.source_dir.split('/')[0] == 'vars':
path_to_use = (
path.join(self._task._role._role_path, self.source_dir)
)
if path.exists(path_to_use):
self.source_dir = path_to_use
else:
path_to_use = (
path.join(
self._task._role._role_path, 'vars', self.source_dir
)
)
self.source_dir = path_to_use
else:
current_dir = (
"/".join(self._task._ds._data_source.split('/')[:-1])
)
self.source_dir = path.join(current_dir, self.source_dir)
def _traverse_dir_depth(self):
""" Recursively iterate over a directory and sort the files in
alphabetical order. Do not iterate pass the set depth.
The default depth is unlimited.
"""
current_depth = 0
sorted_walk = list(walk(self.source_dir))
sorted_walk.sort(key=lambda x: x[0])
for current_root, current_dir, current_files in sorted_walk:
current_depth += 1
if current_depth <= self.depth or self.depth == 0:
current_files.sort()
yield (current_root, current_files)
else:
break
def _ignore_file(self, filename):
""" Return True if a file matches the list of ignore_files.
Args:
filename (str): The filename that is being matched against.
Returns:
Boolean
"""
for file_type in self.ignore_files:
try:
if re.search(r'{0}$'.format(file_type), filename):
return True
except Exception:
err_msg = 'Invalid regular expression: {0}'.format(file_type)
raise AnsibleError(err_msg)
return False
def _is_valid_file_ext(self, source_file):
""" Verify if source file has a valid extension
Args:
source_file (str): The full path of source file or source file.
Returns:
Bool
"""
file_ext = path.splitext(source_file)
print(file_ext[-1][2:])
return bool(len(file_ext) > 1 and file_ext[-1][1:] in self.valid_extensions)
def _load_files(self, filename, validate_extensions=False):
""" Loads a file and converts the output into a valid Python dict.
Args:
filename (str): The source file.
Returns:
Tuple (bool, str, dict)
"""
results = dict()
failed = False
err_msg = ''
if validate_extensions and not self._is_valid_file_ext(filename):
failed = True
err_msg = ('{0} does not have a valid extension: {1}' .format(filename, ', '.join(self.valid_extensions)))
else:
b_data, show_content = self._loader._get_file_contents(filename)
data = to_text(b_data, errors='surrogate_or_strict')
self.show_content = show_content
data = self._loader.load(data, show_content)
if not data:
data = dict()
if not isinstance(data, dict):
failed = True
err_msg = ('{0} must be stored as a dictionary/hash' .format(filename))
else:
self.included_files.append(filename)
results.update(data)
return failed, err_msg, results
def _load_files_in_dir(self, root_dir, var_files):
""" Load the found yml files and update/overwrite the dictionary.
Args:
root_dir (str): The base directory of the list of files that is being passed.
var_files: (list): List of files to iterate over and load into a dictionary.
Returns:
Tuple (bool, str, dict)
"""
results = dict()
failed = False
err_msg = ''
for filename in var_files:
stop_iter = False
# Never include main.yml from a role, as that is the default included by the role
if self._task._role:
if filename == 'main.yml':
stop_iter = True
continue
filepath = path.join(root_dir, filename)
if self.files_matching:
if not self.matcher.search(filename):
stop_iter = True
if not stop_iter and not failed:
if path.exists(filepath) and not self._ignore_file(filename):
failed, err_msg, loaded_data = self._load_files(filepath, validate_extensions=True)
if not failed:
results.update(loaded_data)
return failed, err_msg, results
| gpl-3.0 |
emedvedev/st2 | st2actions/st2actions/config.py | 3 | 3227 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Configuration options registration and useful routines.
"""
from oslo_config import cfg
import st2common.config as common_config
from st2common.constants.system import VERSION_STRING
CONF = cfg.CONF
def parse_args(args=None):
CONF(args=args, version=VERSION_STRING)
def register_opts():
_register_common_opts()
_register_action_runner_opts()
def _register_common_opts():
common_config.register_opts()
def _register_action_runner_opts():
logging_opts = [
cfg.StrOpt('logging', default='conf/logging.conf',
help='location of the logging.conf file'),
]
CONF.register_opts(logging_opts, group='actionrunner')
dispatcher_pool_opts = [
cfg.IntOpt('workflows_pool_size', default=40,
help='Internal pool size for dispatcher used by workflow actions.'),
cfg.IntOpt('actions_pool_size', default=60,
help='Internal pool size for dispatcher used by regular actions.')
]
CONF.register_opts(dispatcher_pool_opts, group='actionrunner')
db_opts = [
cfg.StrOpt('host', default='0.0.0.0', help='host of db server'),
cfg.IntOpt('port', default=27017, help='port of db server'),
cfg.StrOpt('db_name', default='st2', help='name of database')
]
CONF.register_opts(db_opts, group='database')
ssh_runner_opts = [
cfg.StrOpt('remote_dir',
default='/tmp',
help='Location of the script on the remote filesystem.'),
cfg.BoolOpt('allow_partial_failure',
default=False,
help='How partial success of actions run on multiple nodes ' +
'should be treated.'),
cfg.IntOpt('max_parallel_actions', default=50,
help='Max number of parallel remote SSH actions that should be run. ' +
'Works only with Paramiko SSH runner.'),
cfg.BoolOpt('use_ssh_config',
default=False,
help='Use the .ssh/config file. Useful to override ports etc.')
]
CONF.register_opts(ssh_runner_opts, group='ssh_runner')
cloudslang_opts = [
cfg.StrOpt('home_dir', default='/opt/cslang',
help='CloudSlang home directory.'),
]
CONF.register_opts(cloudslang_opts, group='cloudslang')
def get_logging_config_path():
return CONF.actionrunner.logging
register_opts()
| apache-2.0 |
trabacus-softapps/openerp-8.0-cc | openerp/addons/account_asset/report/__init__.py | 445 | 1074 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_asset_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
radupaul/profitpy | profit/lib/widgets/filterbar.py | 18 | 1069 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2007 Troy Melhase <troy@gci.net>
# Distributed under the terms of the GNU General Public License v2
from PyQt4.QtCore import pyqtSignature
from PyQt4.QtGui import QWidget
from profit.lib import Signals
from profit.lib.widgets.ui_filterbar import Ui_FilterBar
class FilterBar(QWidget, Ui_FilterBar):
""" Widget with filter line edit and clear button.
"""
def __init__(self, parent=None):
""" Constructor.
@param parent ancestor of this widget
"""
QWidget.__init__(self, parent)
self.setupUi(self)
@pyqtSignature('')
def on_clearButton_clicked(self):
""" signal handler called when clear button is pressed
@return None
"""
self.filterEdit.clear()
self.filterEdit.emit(Signals.editingFinished)
def on_filterEdit_textChanged(self, text):
""" signal handler called when line edit text changed
@param text current value of line edit as QString instance
@return None
"""
| gpl-2.0 |
ntasfi/PyGame-Learning-Environment | ple/games/monsterkong/onBoard.py | 2 | 1433 | __author__ = 'Batchu Vishal'
import pygame
class OnBoard(pygame.sprite.Sprite):
'''
This class defines all inanimate objects that we need to display on our board.
Any object that is on the board and not a person, comes under this class (ex. Coins,Ladders,Walls etc)
Sets up the image and its position for all its child classes.
'''
def __init__(self, raw_image, position):
pygame.sprite.Sprite.__init__(self)
self.__position = position
self.image = raw_image
self.image = pygame.transform.scale(self.image,
(15, 15)) # Image and Rect required for the draw function on sprites
self.rect = self.image.get_rect()
self.rect.center = self.__position
# Getters and Setters
def setCenter(self, position):
self.rect.center = position
def getPosition(self):
return self.__position
def setPosition(self, position):
self.__position = position
# Update Image, this is an abstract method, needs to be implemented in the
# subclass with whatever size required
def updateImage(self, raw_image): # Abstract Method
raise NotImplementedError("Subclass must implement this")
# Modify the size of the image
def modifySize(self, raw_image, height, width):
self.image = raw_image
self.image = pygame.transform.scale(self.image, (width, height))
| mit |
Fiedzia/Django-facebook | docs/docs_env/Lib/site-packages/pip-1.0-py2.5.egg/pip/download.py | 25 | 16373 | import re
import getpass
import sys
import os
import mimetypes
import shutil
import tempfile
from pip.backwardcompat import (md5, copytree, xmlrpclib, urllib, urllib2,
urlparse, string_types, HTTPError)
from pip.exceptions import InstallationError
from pip.util import (splitext, rmtree,
format_size, display_path, backup_dir, ask,
unpack_file, create_download_cache_folder, cache_download)
from pip.vcs import vcs
from pip.log import logger
__all__ = ['xmlrpclib_transport', 'get_file_content', 'urlopen',
'is_url', 'url_to_path', 'path_to_url', 'path_to_url2',
'geturl', 'is_archive_file', 'unpack_vcs_link',
'unpack_file_url', 'is_vcs_url', 'is_file_url', 'unpack_http_url']
xmlrpclib_transport = xmlrpclib.Transport()
def get_file_content(url, comes_from=None):
"""Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content)"""
match = _scheme_re.search(url)
if match:
scheme = match.group(1).lower()
if (scheme == 'file' and comes_from
and comes_from.startswith('http')):
raise InstallationError(
'Requirements file %s references URL %s, which is local'
% (comes_from, url))
if scheme == 'file':
path = url.split(':', 1)[1]
path = path.replace('\\', '/')
match = _url_slash_drive_re.match(path)
if match:
path = match.group(1) + ':' + path.split('|', 1)[1]
path = urllib.unquote(path)
if path.startswith('/'):
path = '/' + path.lstrip('/')
url = path
else:
## FIXME: catch some errors
resp = urlopen(url)
return geturl(resp), resp.read()
try:
f = open(url)
content = f.read()
except IOError:
e = sys.exc_info()[1]
raise InstallationError('Could not open requirements file: %s' % str(e))
else:
f.close()
return url, content
_scheme_re = re.compile(r'^(http|https|file):', re.I)
_url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I)
class URLOpener(object):
"""
pip's own URL helper that adds HTTP auth and proxy support
"""
def __init__(self):
self.passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
def __call__(self, url):
"""
If the given url contains auth info or if a normal request gets a 401
response, an attempt is made to fetch the resource using basic HTTP
auth.
"""
url, username, password = self.extract_credentials(url)
if username is None:
try:
response = urllib2.urlopen(self.get_request(url))
except urllib2.HTTPError:
e = sys.exc_info()[1]
if e.code != 401:
raise
response = self.get_response(url)
else:
response = self.get_response(url, username, password)
return response
def get_request(self, url):
"""
Wraps the URL to retrieve to protects against "creative"
interpretation of the RFC: http://bugs.python.org/issue8732
"""
if isinstance(url, string_types):
url = urllib2.Request(url, headers={'Accept-encoding': 'identity'})
return url
def get_response(self, url, username=None, password=None):
"""
does the dirty work of actually getting the rsponse object using urllib2
and its HTTP auth builtins.
"""
scheme, netloc, path, query, frag = urlparse.urlsplit(url)
req = self.get_request(url)
stored_username, stored_password = self.passman.find_user_password(None, netloc)
# see if we have a password stored
if stored_username is None:
if username is None and self.prompting:
username = urllib.quote(raw_input('User for %s: ' % netloc))
password = urllib.quote(getpass.getpass('Password: '))
if username and password:
self.passman.add_password(None, netloc, username, password)
stored_username, stored_password = self.passman.find_user_password(None, netloc)
authhandler = urllib2.HTTPBasicAuthHandler(self.passman)
opener = urllib2.build_opener(authhandler)
# FIXME: should catch a 401 and offer to let the user reenter credentials
return opener.open(req)
def setup(self, proxystr='', prompting=True):
"""
Sets the proxy handler given the option passed on the command
line. If an empty string is passed it looks at the HTTP_PROXY
environment variable.
"""
self.prompting = prompting
proxy = self.get_proxy(proxystr)
if proxy:
proxy_support = urllib2.ProxyHandler({"http": proxy, "ftp": proxy})
opener = urllib2.build_opener(proxy_support, urllib2.CacheFTPHandler)
urllib2.install_opener(opener)
def parse_credentials(self, netloc):
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
return userinfo.split(":", 1)
return userinfo, None
return None, None
def extract_credentials(self, url):
"""
Extracts user/password from a url.
Returns a tuple:
(url-without-auth, username, password)
"""
if isinstance(url, urllib2.Request):
result = urlparse.urlsplit(url.get_full_url())
else:
result = urlparse.urlsplit(url)
scheme, netloc, path, query, frag = result
username, password = self.parse_credentials(netloc)
if username is None:
return url, None, None
elif password is None and self.prompting:
# remove the auth credentials from the url part
netloc = netloc.replace('%s@' % username, '', 1)
# prompt for the password
prompt = 'Password for %s@%s: ' % (username, netloc)
password = urllib.quote(getpass.getpass(prompt))
else:
# remove the auth credentials from the url part
netloc = netloc.replace('%s:%s@' % (username, password), '', 1)
target_url = urlparse.urlunsplit((scheme, netloc, path, query, frag))
return target_url, username, password
def get_proxy(self, proxystr=''):
"""
Get the proxy given the option passed on the command line.
If an empty string is passed it looks at the HTTP_PROXY
environment variable.
"""
if not proxystr:
proxystr = os.environ.get('HTTP_PROXY', '')
if proxystr:
if '@' in proxystr:
user_password, server_port = proxystr.split('@', 1)
if ':' in user_password:
user, password = user_password.split(':', 1)
else:
user = user_password
prompt = 'Password for %s@%s: ' % (user, server_port)
password = urllib.quote(getpass.getpass(prompt))
return '%s:%s@%s' % (user, password, server_port)
else:
return proxystr
else:
return None
urlopen = URLOpener()
def is_url(name):
"""Returns true if the name looks like a URL"""
if ':' not in name:
return False
scheme = name.split(':', 1)[0].lower()
return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes
def url_to_path(url):
"""
Convert a file: URL to a path.
"""
assert url.startswith('file:'), (
"You can only turn file: urls into filenames (not %r)" % url)
path = url[len('file:'):].lstrip('/')
path = urllib.unquote(path)
if _url_drive_re.match(path):
path = path[0] + ':' + path[2:]
else:
path = '/' + path
return path
_drive_re = re.compile('^([a-z]):', re.I)
_url_drive_re = re.compile('^([a-z])[:|]', re.I)
def path_to_url(path):
"""
Convert a path to a file: URL. The path will be made absolute.
"""
path = os.path.normcase(os.path.abspath(path))
if _drive_re.match(path):
path = path[0] + '|' + path[2:]
url = urllib.quote(path)
url = url.replace(os.path.sep, '/')
url = url.lstrip('/')
return 'file:///' + url
def path_to_url2(path):
"""
Convert a path to a file: URL. The path will be made absolute and have
quoted path parts.
"""
path = os.path.normpath(os.path.abspath(path))
drive, path = os.path.splitdrive(path)
filepath = path.split(os.path.sep)
url = '/'.join([urllib.quote(part) for part in filepath])
if not drive:
url = url.lstrip('/')
return 'file:///' + drive + url
def geturl(urllib2_resp):
"""
Use instead of urllib.addinfourl.geturl(), which appears to have
some issues with dropping the double slash for certain schemes
(e.g. file://). This implementation is probably over-eager, as it
always restores '://' if it is missing, and it appears some url
schemata aren't always followed by '//' after the colon, but as
far as I know pip doesn't need any of those.
The URI RFC can be found at: http://tools.ietf.org/html/rfc1630
This function assumes that
scheme:/foo/bar
is the same as
scheme:///foo/bar
"""
url = urllib2_resp.geturl()
scheme, rest = url.split(':', 1)
if rest.startswith('//'):
return url
else:
# FIXME: write a good test to cover it
return '%s://%s' % (scheme, rest)
def is_archive_file(name):
"""Return True if `name` is a considered as an archive file."""
archives = ('.zip', '.tar.gz', '.tar.bz2', '.tgz', '.tar', '.pybundle')
ext = splitext(name)[1].lower()
if ext in archives:
return True
return False
def unpack_vcs_link(link, location, only_download=False):
vcs_backend = _get_used_vcs_backend(link)
if only_download:
vcs_backend.export(location)
else:
vcs_backend.unpack(location)
def unpack_file_url(link, location):
source = url_to_path(link.url)
content_type = mimetypes.guess_type(source)[0]
if os.path.isdir(source):
# delete the location since shutil will create it again :(
if os.path.isdir(location):
rmtree(location)
copytree(source, location)
else:
unpack_file(source, location, content_type, link)
def _get_used_vcs_backend(link):
for backend in vcs.backends:
if link.scheme in backend.schemes:
vcs_backend = backend(link.url)
return vcs_backend
def is_vcs_url(link):
return bool(_get_used_vcs_backend(link))
def is_file_url(link):
return link.url.lower().startswith('file:')
def _check_md5(download_hash, link):
download_hash = download_hash.hexdigest()
if download_hash != link.md5_hash:
logger.fatal("MD5 hash of the package %s (%s) doesn't match the expected hash %s!"
% (link, download_hash, link.md5_hash))
raise InstallationError('Bad MD5 hash for package %s' % link)
def _get_md5_from_file(target_file, link):
download_hash = md5()
fp = open(target_file, 'rb')
while 1:
chunk = fp.read(4096)
if not chunk:
break
download_hash.update(chunk)
fp.close()
return download_hash
def _download_url(resp, link, temp_location):
fp = open(temp_location, 'wb')
download_hash = None
if link.md5_hash:
download_hash = md5()
try:
total_length = int(resp.info()['content-length'])
except (ValueError, KeyError):
total_length = 0
downloaded = 0
show_progress = total_length > 40*1000 or not total_length
show_url = link.show_url
try:
if show_progress:
## FIXME: the URL can get really long in this message:
if total_length:
logger.start_progress('Downloading %s (%s): ' % (show_url, format_size(total_length)))
else:
logger.start_progress('Downloading %s (unknown size): ' % show_url)
else:
logger.notify('Downloading %s' % show_url)
logger.debug('Downloading from URL %s' % link)
while 1:
chunk = resp.read(4096)
if not chunk:
break
downloaded += len(chunk)
if show_progress:
if not total_length:
logger.show_progress('%s' % format_size(downloaded))
else:
logger.show_progress('%3i%% %s' % (100*downloaded/total_length, format_size(downloaded)))
if link.md5_hash:
download_hash.update(chunk)
fp.write(chunk)
fp.close()
finally:
if show_progress:
logger.end_progress('%s downloaded' % format_size(downloaded))
return download_hash
def _copy_file(filename, location, content_type, link):
copy = True
download_location = os.path.join(location, link.filename)
if os.path.exists(download_location):
response = ask('The file %s exists. (i)gnore, (w)ipe, (b)ackup '
% display_path(download_location), ('i', 'w', 'b'))
if response == 'i':
copy = False
elif response == 'w':
logger.warn('Deleting %s' % display_path(download_location))
os.remove(download_location)
elif response == 'b':
dest_file = backup_dir(download_location)
logger.warn('Backing up %s to %s'
% (display_path(download_location), display_path(dest_file)))
shutil.move(download_location, dest_file)
if copy:
shutil.copy(filename, download_location)
logger.indent -= 2
logger.notify('Saved %s' % display_path(download_location))
def unpack_http_url(link, location, download_cache, only_download):
temp_dir = tempfile.mkdtemp('-unpack', 'pip-')
target_url = link.url.split('#', 1)[0]
target_file = None
download_hash = None
if download_cache:
target_file = os.path.join(download_cache,
urllib.quote(target_url, ''))
if not os.path.isdir(download_cache):
create_download_cache_folder(download_cache)
if (target_file
and os.path.exists(target_file)
and os.path.exists(target_file+'.content-type')):
fp = open(target_file+'.content-type')
content_type = fp.read().strip()
fp.close()
if link.md5_hash:
download_hash = _get_md5_from_file(target_file, link)
temp_location = target_file
logger.notify('Using download cache from %s' % target_file)
else:
resp = _get_response_from_url(target_url, link)
content_type = resp.info()['content-type']
filename = link.filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != geturl(resp):
ext = os.path.splitext(geturl(resp))[1]
if ext:
filename += ext
temp_location = os.path.join(temp_dir, filename)
download_hash = _download_url(resp, link, temp_location)
if link.md5_hash:
_check_md5(download_hash, link)
if only_download:
_copy_file(temp_location, location, content_type, link)
else:
unpack_file(temp_location, location, content_type, link)
if target_file and target_file != temp_location:
cache_download(target_file, temp_location, content_type)
if target_file is None:
os.unlink(temp_location)
os.rmdir(temp_dir)
def _get_response_from_url(target_url, link):
try:
resp = urlopen(target_url)
except urllib2.HTTPError:
e = sys.exc_info()[1]
logger.fatal("HTTP error %s while getting %s" % (e.code, link))
raise
except IOError:
e = sys.exc_info()[1]
# Typically an FTP error
logger.fatal("Error %s while getting %s" % (e, link))
raise
return resp
class Urllib2HeadRequest(urllib2.Request):
def get_method(self):
return "HEAD"
| bsd-3-clause |
lpramuk/robottelo | tests/foreman/ui/test_computeresource_libvirt.py | 2 | 4804 | # -*- encoding: utf-8 -*-
"""Test for Compute Resource UI
:Requirement: Computeresource Libvirt
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: ComputeResources-libvirt
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from random import choice
from fauxfactory import gen_string
from nailgun import entities
from pytest import skip
from robottelo.config import settings
from robottelo.constants import COMPUTE_PROFILE_SMALL
from robottelo.constants import FOREMAN_PROVIDERS
from robottelo.constants import LIBVIRT_RESOURCE_URL
from robottelo.decorators import fixture
from robottelo.decorators import setting_is_set
from robottelo.decorators import tier2
if not setting_is_set('compute_resources'):
skip('skipping tests due to missing compute_resources settings', allow_module_level=True)
@fixture(scope='module')
def module_libvirt_url():
return LIBVIRT_RESOURCE_URL % settings.compute_resources.libvirt_hostname
@tier2
def test_positive_end_to_end(session, module_org, module_loc, module_libvirt_url):
"""Perform end to end testing for compute resource Libvirt component.
:id: 4f4650c8-32f3-4dab-b3bf-9c54d0cda3b2
:expectedresults: All expected CRUD actions finished successfully.
:CaseLevel: Integration
:CaseImportance: High
:BZ: 1662164
"""
cr_name = gen_string('alpha')
cr_description = gen_string('alpha')
new_cr_name = gen_string('alpha')
new_cr_description = gen_string('alpha')
new_org = entities.Organization().create()
new_loc = entities.Location().create()
display_type = choice(('VNC', 'SPICE'))
console_passwords = choice((True, False))
with session:
session.computeresource.create(
{
'name': cr_name,
'description': cr_description,
'provider': FOREMAN_PROVIDERS['libvirt'],
'provider_content.url': module_libvirt_url,
'provider_content.display_type': display_type,
'provider_content.console_passwords': console_passwords,
'organizations.resources.assigned': [module_org.name],
'locations.resources.assigned': [module_loc.name],
}
)
cr_values = session.computeresource.read(cr_name)
assert cr_values['name'] == cr_name
assert cr_values['description'] == cr_description
assert cr_values['provider_content']['url'] == module_libvirt_url
assert cr_values['provider_content']['display_type'] == display_type
assert cr_values['provider_content']['console_passwords'] == console_passwords
assert cr_values['organizations']['resources']['assigned'] == [module_org.name]
assert cr_values['locations']['resources']['assigned'] == [module_loc.name]
session.computeresource.edit(
cr_name,
{
'name': new_cr_name,
'description': new_cr_description,
'organizations.resources.assigned': [new_org.name],
'locations.resources.assigned': [new_loc.name],
},
)
assert not session.computeresource.search(cr_name)
cr_values = session.computeresource.read(new_cr_name)
assert cr_values['name'] == new_cr_name
assert cr_values['description'] == new_cr_description
assert set(cr_values['organizations']['resources']['assigned']) == {
module_org.name,
new_org.name,
}
assert set(cr_values['locations']['resources']['assigned']) == {
module_loc.name,
new_loc.name,
}
# check that the compute resource is listed in one of the default compute profiles
profile_cr_values = session.computeprofile.list_resources(COMPUTE_PROFILE_SMALL)
profile_cr_names = [cr['Compute Resource'] for cr in profile_cr_values]
assert '{0} ({1})'.format(new_cr_name, FOREMAN_PROVIDERS['libvirt']) in profile_cr_names
session.computeresource.update_computeprofile(
new_cr_name,
COMPUTE_PROFILE_SMALL,
{'provider_content.cpus': '16', 'provider_content.memory': '8 GB'},
)
cr_profile_values = session.computeresource.read_computeprofile(
new_cr_name, COMPUTE_PROFILE_SMALL
)
assert cr_profile_values['compute_profile'] == COMPUTE_PROFILE_SMALL
assert cr_profile_values['compute_resource'] == '{0} ({1})'.format(
new_cr_name, FOREMAN_PROVIDERS['libvirt']
)
assert cr_profile_values['provider_content']['cpus'] == '16'
assert cr_profile_values['provider_content']['memory'] == '8 GB'
session.computeresource.delete(new_cr_name)
assert not session.computeresource.search(new_cr_name)
| gpl-3.0 |
h2oai/h2o-dev | h2o-py/tests/testdir_apis/H2OAssembly/pyunit_h2oassembly_static_methods.py | 5 | 3073 | from __future__ import print_function
import sys
sys.path.insert(1,"../../")
from tests import pyunit_utils
from h2o.assembly import *
from h2o.utils.typechecks import assert_is_type
def h2oassembly_divide():
"""
Python API test: test all H2OAssembly static methods and they are:
H2OAssembly.divide(frame1, frame2)
H2OAssembly.plus(frame1, frame2)
H2OAssembly.multiply(frame1, frame2)
H2OAssembly.minus(frame1, frame2)
H2OAssembly.less_than(frame1, frame2)
H2OAssembly.less_than_equal(frame1, frame2)
H2OAssembly.equal_equal(frame1, frame2)
H2OAssembly.not_equal(frame1, frame2)
H2OAssembly.greater_than(frame1, frame2)
H2OAssembly.greater_than_equal(frame1, frame2)
"""
python_list1 = [[4,4,4,4],[4,4,4,4]]
python_list2 = [[2,2,2,2], [2,2,2,2]]
frame1 = h2o.H2OFrame(python_obj=python_list1)
frame2 = h2o.H2OFrame(python_obj=python_list2)
verify_results(H2OAssembly.divide(frame1, frame2), 2, "H2OAssembly.divide()") # test H2OAssembly.divide()
verify_results(H2OAssembly.plus(frame1, frame2), 6, "H2OAssembly.plus()") # test H2OAssembly.plus()
verify_results(H2OAssembly.multiply(frame1, frame2), 8, "H2OAssembly.multiply()") # test H2OAssembly.multiply()
verify_results(H2OAssembly.minus(frame1, frame2), 2, "H2OAssembly.minus()") # test H2OAssembly.minus()
# test H2OAssembly.less_than()
verify_results(H2OAssembly.less_than(frame2, frame1), 1.0, "H2OAssembly.less_than()")
verify_results(H2OAssembly.less_than(frame2, frame2), 0.0, "H2OAssembly.less_than()")
# test H2OAssembly.less_than_equal()
verify_results(H2OAssembly.less_than_equal(frame2, frame1), 1.0, "H2OAssembly.less_than_equal()")
verify_results(H2OAssembly.less_than_equal(frame2, frame2), 1.0, "H2OAssembly.less_than_equal()")
# test H2OAssembly.equal_equal()
verify_results(H2OAssembly.equal_equal(frame2, frame1), 0.0, "H2OAssembly.equal_equal()")
verify_results(H2OAssembly.equal_equal(frame2, frame2), 1.0, "H2OAssembly.equal_equal()")
# test H2OAssembly.not_equal()
verify_results(H2OAssembly.not_equal(frame2, frame1), 1.0, "H2OAssembly.not_equal()")
verify_results(H2OAssembly.not_equal(frame2, frame2), 0.0, "H2OAssembly.not_equal()")
# test H2OAssembly.greater_than()
verify_results(H2OAssembly.greater_than(frame1, frame2), 1.0, "H2OAssembly.greater_than()")
verify_results(H2OAssembly.greater_than(frame2, frame2), 0.0, "H2OAssembly.greater_than()")
# test H2OAssembly.greater_than_equal()
verify_results(H2OAssembly.greater_than_equal(frame1, frame2), 1.0, "H2OAssembly.greater_than_equal()")
verify_results(H2OAssembly.greater_than_equal(frame2, frame2), 1.0, "H2OAssembly.greater_than_equal()")
def verify_results(resultFrame, matchValue, commandName):
assert_is_type(resultFrame, H2OFrame)
assert (resultFrame==matchValue).all(), commandName+" command is not working."
if __name__ == "__main__":
pyunit_utils.standalone_test(h2oassembly_divide)
else:
h2oassembly_divide()
| apache-2.0 |
Ahmad31/Web_Flask_Cassandra | flask/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/sbcsgroupprober.py | 2936 | 3291 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetgroupprober import CharSetGroupProber
from .sbcharsetprober import SingleByteCharSetProber
from .langcyrillicmodel import (Win1251CyrillicModel, Koi8rModel,
Latin5CyrillicModel, MacCyrillicModel,
Ibm866Model, Ibm855Model)
from .langgreekmodel import Latin7GreekModel, Win1253GreekModel
from .langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel
from .langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel
from .langthaimodel import TIS620ThaiModel
from .langhebrewmodel import Win1255HebrewModel
from .hebrewprober import HebrewProber
class SBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [
SingleByteCharSetProber(Win1251CyrillicModel),
SingleByteCharSetProber(Koi8rModel),
SingleByteCharSetProber(Latin5CyrillicModel),
SingleByteCharSetProber(MacCyrillicModel),
SingleByteCharSetProber(Ibm866Model),
SingleByteCharSetProber(Ibm855Model),
SingleByteCharSetProber(Latin7GreekModel),
SingleByteCharSetProber(Win1253GreekModel),
SingleByteCharSetProber(Latin5BulgarianModel),
SingleByteCharSetProber(Win1251BulgarianModel),
SingleByteCharSetProber(Latin2HungarianModel),
SingleByteCharSetProber(Win1250HungarianModel),
SingleByteCharSetProber(TIS620ThaiModel),
]
hebrewProber = HebrewProber()
logicalHebrewProber = SingleByteCharSetProber(Win1255HebrewModel,
False, hebrewProber)
visualHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, True,
hebrewProber)
hebrewProber.set_model_probers(logicalHebrewProber, visualHebrewProber)
self._mProbers.extend([hebrewProber, logicalHebrewProber,
visualHebrewProber])
self.reset()
| apache-2.0 |
sivasankariit/linux-rl | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
intel-analytics/analytics-zoo | pyzoo/zoo/serving/schema.py | 1 | 4815 | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pyarrow as pa
import numpy as np
import cv2
import base64
def get_field_and_data(key, value):
if isinstance(value, list):
assert len(value) > 0, "empty list is not supported"
sample = value[0]
if isinstance(sample, str):
# list of string will be converted to Tensor of String
# use | to split
str_concat = '|'.join(value)
field = pa.field(key, pa.string())
data = pa.array([str_concat])
return field, data
elif isinstance(sample, np.ndarray):
assert len(value) == 3, "Sparse Tensor must have list of ndarray" \
"with length 3, which represent indices, " \
"values, shape respectively"
indices_field = pa.field("indiceData", pa.list_(pa.int32()))
indices_shape_field = pa.field("indiceShape", pa.list_(pa.int32()))
value_field = pa.field("data", pa.list_(pa.float32()))
shape_field = pa.field("shape", pa.list_(pa.int32()))
sparse_tensor_type = pa.struct(
[indices_field, indices_shape_field, value_field, shape_field])
field = pa.field(key, sparse_tensor_type)
shape = value[2]
values = value[1]
indices = value[0].astype("float32").flatten()
indices_shape = value[0].shape
data = pa.array([{'indiceData': indices},
{'indiceShape': indices_shape},
{'data': values},
{'shape': shape}], type=sparse_tensor_type)
return field, data
else:
raise TypeError("List of string and ndarray is supported,"
"but your input does not match")
elif isinstance(value, str):
# str value will be considered as image path
field = pa.field(key, pa.string())
data = encode_image(value)
# b = bytes(data, "utf-8")
data = pa.array([data])
# ba = pa.array(b, type=pa.binary())
return field, data
elif isinstance(value, dict):
if "path" in value.keys():
path = value["path"]
data = encode_image(path)
elif "b64" in value.keys():
data = value["b64"]
else:
raise TypeError("Your input dict must contain"
" either 'path' or 'b64' key")
field = pa.field(key, pa.string())
data = pa.array([data])
return field, data
elif isinstance(value, np.ndarray):
# ndarray value will be considered as tensor
indices_field = pa.field("indiceData", pa.list_(pa.int32()))
indices_shape_field = pa.field("indiceShape", pa.list_(pa.int32()))
data_field = pa.field("data", pa.list_(pa.float32()))
shape_field = pa.field("shape", pa.list_(pa.int32()))
tensor_type = pa.struct(
[indices_field, indices_shape_field, data_field, shape_field])
field = pa.field(key, tensor_type)
shape = np.array(value.shape)
d = value.astype("float32").flatten()
# data = pa.array([{'data': d}, {'shape': shape}, {}],
# type=tensor_type)
data = pa.array([{'indiceData': []},
{'indiceShape': []},
{'data': d},
{'shape': shape}], type=tensor_type)
return field, data
else:
raise TypeError("Your request does not match any schema, "
"please check.")
def encode_image(img):
"""
:param id: String you use to identify this record
:param data: Data, ndarray type
:return:
"""
if isinstance(img, str):
img = cv2.imread(img)
if img.size == 0:
print("You have pushed an image with path: ",
img, "the path is invalid, skipped.")
return
# force resize here to avoid input image shape inconsistent
# if the shape is consistent, it would not affect the data
data = cv2.imencode(".jpg", img)[1]
img_encoded = base64.b64encode(data).decode("utf-8")
return img_encoded
| apache-2.0 |
Chibin/gpdb | src/test/tinc/tincrepo/mpp/models/regress/sql_related/regress_sql_concurrency_test_case/regress_sql_scenario_test_case.py | 12 | 3705 | """
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import datetime
from time import sleep
import os
import fnmatch
import unittest2 as unittest
from tinctest.models.scenario import ScenarioTestCase
from mpp.models import SQLConcurrencyTestCase
t_prefix = 'mpp.models.regress.sql_related.regress_sql_concurrency_test_case.regress_sql_scenario_test_case.'
@unittest.skip('mock')
class MockSQLScenaioTestCase(ScenarioTestCase):
"""
@description test case with metadata
@created 2012-07-05 12:00:00
@modified 2012-07-05 12:00:02
@tags orca hashagg
"""
def __init__(self, methodName):
super(MockSQLScenaioTestCase, self).__init__(methodName)
from mpp.models.regress.sql_related.regress_sql_concurrency_test_case.regress_sql_scenario_test_case import MockSQLConcurrencyTestCase as mctc
mctc.__unittest_skip__ = False
def test_serial_execution(self):
test_case_list = []
test_case_list.append(t_prefix+'MockSQLConcurrencyTestCase.test_query02')
self.test_case_scenario.append(test_case_list)
@unittest.skip('mock')
class MockSQLConcurrencyTestCase(SQLConcurrencyTestCase):
"""
@description test case with metadata
@created 2012-07-05 12:00:00
@modified 2012-07-05 12:00:02
@tags orca hashagg
@concurrency 2
@iterations 1
"""
def __init__(self, methodName):
super(MockSQLConcurrencyTestCase, self).__init__(methodName)
def setUp(self):
pass
self.tested = True
self.assertTrue(self.tested)
def test_explicit_definition(self):
sleep(1)
def test_failure(self):
self.fail("Just like that !")
class ScenarioTestCaseTests(unittest.TestCase):
def test_construction(self):
tinc_test_case = MockSQLScenaioTestCase('test_serial_execution')
self.assertEqual(tinc_test_case.name, 'MockSQLScenaioTestCase.test_serial_execution')
self.assertEqual(tinc_test_case.author, "balasr3")
self.assertEqual(tinc_test_case.description, "test case with metadata")
self.assertEqual(tinc_test_case.created_datetime, datetime.strptime('2012-07-05 12:00:00', '%Y-%m-%d %H:%M:%S'))
self.assertEqual(tinc_test_case.modified_datetime, datetime.strptime('2012-07-05 12:00:02', '%Y-%m-%d %H:%M:%S'))
self.assertEqual(len(tinc_test_case.tags), 2)
self.assertEqual(len(tinc_test_case.test_case_scenario), 0)
self.assertTrue(tinc_test_case.fail_fast)
def test_sanity_run(self):
tinc_test_case = MockSQLScenaioTestCase('test_serial_execution')
tinc_test_case.__class__.__unittest_skip__ = False
tinc_test_case.run()
self.assertEqual(len(tinc_test_case.test_case_scenario), 1)
self.assertEqual(len(tinc_test_case.test_case_scenario[0][0]), 1)
# Cleanup
if os.path.exists('output'):
for file in os.listdir('output'):
path = os.path.join('output', file)
if fnmatch.fnmatch(file, 'query02*.*'):
os.remove(os.path.join('output', file))
| apache-2.0 |
ShaolongHu/Nitrate | tcms/testcases/urls/case_urls.py | 1 | 2085 | # -*- coding: utf-8 -*-
from django.conf.urls import url, patterns
from tcms.testcases.views import SimpleTestCaseView
from tcms.testcases.views import TestCaseCaseRunDetailPanelView
from tcms.testcases.views import TestCaseCaseRunListPaneView
from tcms.testcases.views import TestCaseReviewPaneView
from tcms.testcases.views import TestCaseSimpleCaseRunView
urlpatterns = patterns('tcms.testcases.views',
url(r'^(?P<case_id>\d+)/$', 'get'),
url(r'^(?P<case_id>\d+)/edit/$', 'edit'),
url(r'^(?P<case_id>\d+)/history/$', 'text_history'),
url(r'^(?P<case_id>\d+)/attachment/$', 'attachment'),
url(r'^(?P<case_id>\d+)/log/$', 'get_log'),
url(r'^(?P<case_id>\d+)/bug/$', 'bug'),
url(r'^(?P<case_id>\d+)/plan/$', 'plan'),
url(r'^(?P<case_id>\d+)/readonly-pane/$',
SimpleTestCaseView.as_view(),
name='case-readonly-pane'),
url(r'^(?P<case_id>\d+)/review-pane/$',
TestCaseReviewPaneView.as_view(),
name='case-review-pane'),
url(r'^(?P<case_id>\d+)/caserun-list-pane/$',
TestCaseCaseRunListPaneView.as_view(),
name='caserun-list-pane'),
url(r'^(?P<case_id>\d+)/caserun-simple-pane/$',
TestCaseSimpleCaseRunView.as_view(),
name='caserun-simple-pane'),
url(r'^(?P<case_id>\d+)/caserun-detail-pane/$',
TestCaseCaseRunDetailPanelView.as_view(),
name='caserun-detail-pane'),
)
urlpatterns += patterns('tcms.testruns.views',
url(r'^(?P<plan_id>\d+)/runs/$',
'load_runs_of_one_plan',
name='load_runs_of_one_plan_url'),
)
| gpl-2.0 |
renhaoqi/gem5-stable | scons-local-2.2.0/SCons/Scanner/Dir.py | 14 | 3838 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Scanner/Dir.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import SCons.Node.FS
import SCons.Scanner
def only_dirs(nodes):
is_Dir = lambda n: isinstance(n.disambiguate(), SCons.Node.FS.Dir)
return list(filter(is_Dir, nodes))
def DirScanner(**kw):
"""Return a prototype Scanner instance for scanning
directories for on-disk files"""
kw['node_factory'] = SCons.Node.FS.Entry
kw['recursive'] = only_dirs
return SCons.Scanner.Base(scan_on_disk, "DirScanner", **kw)
def DirEntryScanner(**kw):
"""Return a prototype Scanner instance for "scanning"
directory Nodes for their in-memory entries"""
kw['node_factory'] = SCons.Node.FS.Entry
kw['recursive'] = None
return SCons.Scanner.Base(scan_in_memory, "DirEntryScanner", **kw)
skip_entry = {}
skip_entry_list = [
'.',
'..',
'.sconsign',
# Used by the native dblite.py module.
'.sconsign.dblite',
# Used by dbm and dumbdbm.
'.sconsign.dir',
# Used by dbm.
'.sconsign.pag',
# Used by dumbdbm.
'.sconsign.dat',
'.sconsign.bak',
# Used by some dbm emulations using Berkeley DB.
'.sconsign.db',
]
for skip in skip_entry_list:
skip_entry[skip] = 1
skip_entry[SCons.Node.FS._my_normcase(skip)] = 1
do_not_scan = lambda k: k not in skip_entry
def scan_on_disk(node, env, path=()):
"""
Scans a directory for on-disk files and directories therein.
Looking up the entries will add these to the in-memory Node tree
representation of the file system, so all we have to do is just
that and then call the in-memory scanning function.
"""
try:
flist = node.fs.listdir(node.abspath)
except (IOError, OSError):
return []
e = node.Entry
for f in filter(do_not_scan, flist):
# Add ./ to the beginning of the file name so if it begins with a
# '#' we don't look it up relative to the top-level directory.
e('./' + f)
return scan_in_memory(node, env, path)
def scan_in_memory(node, env, path=()):
"""
"Scans" a Node.FS.Dir for its in-memory entries.
"""
try:
entries = node.entries
except AttributeError:
# It's not a Node.FS.Dir (or doesn't look enough like one for
# our purposes), which can happen if a target list containing
# mixed Node types (Dirs and Files, for example) has a Dir as
# the first entry.
return []
entry_list = sorted(filter(do_not_scan, list(entries.keys())))
return [entries[n] for n in entry_list]
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| bsd-3-clause |
davidavdav/SprekendNederland | bin/answers.py | 1 | 3284 | #!/usr/bin/env python
## (c) 2015 David A. van Leeuwen
from sn.db import *
from sqlalchemy import or_
import logging
def norm(s):
return s.strip().decode("unicode_escape").encode("ascii", "ignore")
def addto(d, key, value):
if not key in d:
d[key] = []
d[key].append(value)
def string(x):
if x == None:
return "NA"
elif type(x) == str:
return '"' + x + '"'
else:
return str(x)
def prompt(task):
prompttext = text.get(task.question_recording_id)
if prompttext:
return "/".join(prompttext)
else:
return None
def textinfo(rid):
tt = texttype.get(rid)
if tt:
return [tt.key, tt.type]
else:
return [None, None]
logging.basicConfig(level=logging.INFO)
#logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
## find text with recording_id
text = dict()
speaker = dict()
texttype= dict()
qq = session.query(Tasks, Texts, TextGroups).join(TaskText).filter(TaskText.text_id == Texts.id, TextGroups.id == Texts.text_group_id).order_by(Texts.id)
for ta, te, tg in qq:
if not ta.recording_id:
continue
addto(text, ta.recording_id, norm(te.text))
if ta.recording_id in speaker:
if speaker[ta.recording_id] != ta.profile_id:
print "Inconsistent", speaker[ta.recording_id], ta.profile_id
else:
speaker[ta.recording_id] = ta.profile_id
texttype[ta.recording_id] = tg
logging.info("Number of recordings: %d", len(text))
## main loop over questions
qq = session.query(Questions).join(QuestionGroups, Components).filter(QuestionGroups.name.like("Vragenlijst%"))
print ", ".join([string(x) for x in ["qid", "atype", "qlist", "utype", "lid", "sid", "value", "prompt"]])
for q in qq.filter(Components.type == "slider"):
logging.info("Question %d: %s", q.id, q.question)
qid = "q%02d" % q.id
aq = session.query(Tasks, Answers).filter(Tasks.question_id == q.id, Answers.id == Tasks.answer_id)
for t, a in aq.order_by(Answers.created_at):
rid = t.question_recording_id
print ",".join([string(x) for x in [qid, q.components.type] + textinfo(rid) + [t.profile_id, speaker.get(rid), a.answer_numeric, prompt(t)]])
for q in qq.filter(or_(Components.type == "yesno", Components.type == "option")):
logging.info("Question %d: %s", q.id, q.question)
qid = "q%02d" % q.id
aq = session.query(Tasks, Options).join(Answers, AnswerOption).filter(Tasks.question_id == q.id, AnswerOption.option_id == Options.id)
for t, o in aq.order_by(Answers.created_at):
rid = t.question_recording_id
print ",".join([string(x) for x in [qid, q.components.type] + textinfo(rid) + [t.profile_id, speaker.get(rid), o.value, prompt(t)]])
for q in qq.filter(Components.type == "location"):
logging.info("Question %d: %s", q.id, q.question)
qid = "q%02d" % q.id
aq = session.query(Tasks, Locations).join(Answers, AnswerLocation).filter(Tasks.question_id == q.id, AnswerLocation.location_id == Locations.id)
for t, l in aq:
loc = "%8.6f/%8.6f/%d" % (l.longitude, l.latitude, l.mapzoom)
rid = t.question_recording_id
print ",".join([string(x) for x in [qid, q.components.type] + textinfo(rid) + [t.profile_id, speaker.get(rid), loc, prompt(t)]])
| gpl-2.0 |
8191/ansible | lib/ansible/runner/lookup_plugins/together.py | 174 | 2135 | # (c) 2013, Bradley Young <young.bradley@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import ansible.utils as utils
from ansible.utils import safe_eval
import ansible.errors as errors
from itertools import izip_longest
def flatten(terms):
ret = []
for term in terms:
if isinstance(term, list):
ret.extend(term)
elif isinstance(term, tuple):
ret.extend(term)
else:
ret.append(term)
return ret
class LookupModule(object):
"""
Transpose a list of arrays:
[1, 2, 3], [4, 5, 6] -> [1, 4], [2, 5], [3, 6]
Replace any empty spots in 2nd array with None:
[1, 2], [3] -> [1, 3], [2, None]
"""
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
def __lookup_injects(self, terms, inject):
results = []
for x in terms:
intermediate = utils.listify_lookup_plugin_terms(x, self.basedir, inject)
results.append(intermediate)
return results
def run(self, terms, inject=None, **kwargs):
# this code is common with 'items.py' consider moving to utils if we need it again
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
terms = self.__lookup_injects(terms, inject)
my_list = terms[:]
if len(my_list) == 0:
raise errors.AnsibleError("with_together requires at least one element in each list")
return [flatten(x) for x in izip_longest(*my_list, fillvalue=None)]
| gpl-3.0 |
py-geek/City-Air | venv/lib/python2.7/site-packages/pymysql/tests/test_nextset.py | 4 | 1603 | import unittest2
from pymysql.tests import base
from pymysql import util
class TestNextset(base.PyMySQLTestCase):
def setUp(self):
super(TestNextset, self).setUp()
self.con = self.connections[0]
def test_nextset(self):
cur = self.con.cursor()
cur.execute("SELECT 1; SELECT 2;")
self.assertEqual([(1,)], list(cur))
r = cur.nextset()
self.assertTrue(r)
self.assertEqual([(2,)], list(cur))
self.assertIsNone(cur.nextset())
def test_skip_nextset(self):
cur = self.con.cursor()
cur.execute("SELECT 1; SELECT 2;")
self.assertEqual([(1,)], list(cur))
cur.execute("SELECT 42")
self.assertEqual([(42,)], list(cur))
def test_ok_and_next(self):
cur = self.con.cursor()
cur.execute("SELECT 1; commit; SELECT 2;")
self.assertEqual([(1,)], list(cur))
self.assertTrue(cur.nextset())
self.assertTrue(cur.nextset())
self.assertEqual([(2,)], list(cur))
self.assertFalse(bool(cur.nextset()))
@unittest2.expectedFailure
def test_multi_cursor(self):
cur1 = self.con.cursor()
cur2 = self.con.cursor()
cur1.execute("SELECT 1; SELECT 2;")
cur2.execute("SELECT 42")
self.assertEqual([(1,)], list(cur1))
self.assertEqual([(42,)], list(cur2))
r = cur1.nextset()
self.assertTrue(r)
self.assertEqual([(2,)], list(cur1))
self.assertIsNone(cur1.nextset())
#TODO: How about SSCursor and nextset?
# It's very hard to implement correctly...
| mit |
squilter/mavlink | scripts/xmlpretty.py | 113 | 2740 | #!/usr/bin/python
import xml.dom.minidom as minidom
from sys import exit, argv, stderr, stdout
import re
import argparse
parser = argparse.ArgumentParser(description="Format XML")
parser.add_argument('infile', nargs=1)
parser.add_argument('outfile', nargs='?')
args = parser.parse_args()
f = open(args.infile[0],'r')
text = f.read()
f.close()
dom = minidom.parseString(text)
def contains_only_text(node):
childNodes = node.childNodes[:]
for child in childNodes:
if child.nodeType != child.TEXT_NODE:
return False
return True
def foreach_tree(doc, root, func, level=0):
func(doc, root, level)
childNodes = root.childNodes[:]
for node in childNodes:
foreach_tree(doc, node, func, level+1)
def strip_indent(doc, node, level):
if node.nodeType == node.TEXT_NODE and re.match(r"^\s+$", node.nodeValue):
node.parentNode.removeChild(node)
node.unlink()
def strip_comment_whitespace(doc, node, level):
if node.nodeType == node.COMMENT_NODE:
node.nodeValue = re.sub(r"\s+", " ", node.nodeValue)
def strip_comments_completely(doc, node, level):
if node.nodeType == node.COMMENT_NODE:
node.parentNode.removeChild(node)
node.unlink()
def strip_text_whitespace(doc, node, level):
if node.nodeType == node.TEXT_NODE:
node.nodeValue = re.sub(r"\s+", " ", node.nodeValue).strip()
def strip_text_completely(doc, node, level):
if node.nodeType == node.TEXT_NODE:
node.parentNode.removeChild(node)
node.unlink()
def auto_indent(doc, node, level):
if level > 0 and not contains_only_text(node.parentNode):
node.parentNode.insertBefore(doc.createTextNode("\n%s" % (" "*4*level)), node)
if node.nextSibling is None:
node.parentNode.appendChild(doc.createTextNode("\n%s" % (" "*4*(level-1))))
def next_non_text_sibling(node):
ret = node.nextSibling
while ret is not None and ret.nodeType == node.TEXT_NODE:
ret = ret.nextSibling
return ret
def auto_space(doc, node, level):
if level > 0 and node.childNodes is not None and len(node.childNodes) > 1 and next_non_text_sibling(node) is not None:
node.parentNode.insertBefore(doc.createTextNode("\n"), node.nextSibling)
foreach_tree(dom, dom.documentElement, strip_indent)
foreach_tree(dom, dom.documentElement, strip_comment_whitespace)
foreach_tree(dom, dom.documentElement, strip_text_whitespace)
foreach_tree(dom, dom.documentElement, auto_indent)
foreach_tree(dom, dom.documentElement, auto_space)
if args.outfile is not None:
f = open(args.outfile, 'w')
f.truncate()
else:
f = stdout
f.write("<?xml version='1.0'?>\n")
f.write(dom.documentElement.toxml())
f.write("\n")
f.close()
| lgpl-3.0 |
hcpss-banderson/py-tasc | optionresolver.py | 1 | 1993 | import optparse, yaml, json
class OptionResolver(object):
"""Resolve user input options"""
def __init__(self):
self.parser = optparse.OptionParser()
self.set_options()
def set_options(self):
"""Use optparser to manage options"""
self.parser.add_option(
"--manifest", "-m",
help = "The location of the manifest file.",
default = "./manifest.yml")
self.parser.add_option(
"--destination", "-d",
help = "Where to assemble the code.",
default = ".")
self.parser.add_option(
"--extra-parameters", "-e",
help = "A JSON encoded string with extra parameters.")
def parse(self):
"""Return the raw parsed user supplied values
:rtype: dict[str, str]
"""
return self.parser.parse_args()[0]
def manifest_location(self):
"""Return the location of the manifest file
:rtype: str
"""
return self.parse().manifest
def manifest(self):
"""Get the parsed values from the manifest
:rtype: dict[str, mixed]
"""
with open(self.manifest_location(), "r") as stream:
yamlstring = stream.read()
# Allow token replacements
params = self.extra_parameters()
if params:
yamlstring = yamlstring.format(**params)
return yaml.load(yamlstring)
def destination(self):
"""Get the assembly location
:rtype: str
"""
return self.parse().destination
def extra_parameters(self):
"""Get extra parameters
:rtype: dict[str, str]
"""
params_string = self.parse().extra_parameters
if params_string:
return json.loads(self.parse().extra_parameters)
| mit |
nazarov-tech/frequency | run.py | 1 | 2145 | #!/usr/bin/env python
# coding: utf-8
import numpy as np
DATA = np.genfromtxt("data.txt", dtype=int)
SIZE = 4
def count_occurrences(data, pattern):
"""Find all occurrences of pattern and count them"""
pattern = np.fromstring(pattern, dtype=int, sep=' ')
pattern_len = np.shape(pattern)[0]
data_len = np.shape(data)[0] # data[data_len] is not already allowed to call
occurrences_amount = 0
index = 0
for value in data:
if value == pattern[0]: # possible start of pattern
start = index
if start + pattern_len >= data_len: # if pattern doesn't fit into sequence
break
else: # check is it pattern
is_match = True
for p in xrange(pattern_len):
if data[start + p] != pattern[p]: # it is not
is_match = False
break
if is_match:
occurrences_amount += 1
index += 1
return occurrences_amount
def gen_2d_frequency_matrix(data, size):
'''Generates 2d frequency matrix'''
frequency_matrix = np.empty((size, size))
for i in xrange(size):
for j in xrange(size):
pattern = ' '.join(str(x) for x in [i + 1, j + 1])
frequency_matrix[i, j] = count_occurrences(data, pattern)
return frequency_matrix
def gen_3d_frequency_matrix(data, size):
"""Generates 3d frequency matrix"""
frequency_matrix = np.zeros((size ** 2, size))
import itertools
patterns = []
for i, j, k in itertools.product(xrange(size), xrange(size), xrange(size)):
patterns.append(' '.join(str(i) for i in [i + 1, j + 1, k + 1]))
s = 0
for i in xrange(size ** 2):
for j in xrange(size):
frequency_matrix[i, j] = count_occurrences(data, patterns[s])
s += 1
return frequency_matrix
if __name__ == '__main__':
# array_string = ' '.join(str(x) for x in DATA)
# print array_string
matrix = gen_2d_frequency_matrix(DATA, SIZE)
print matrix, '\n'
matrix = gen_3d_frequency_matrix(DATA, SIZE)
print matrix
| mit |
jdsika/TUM_HOly | openrave/docs/source/tutorials/openravepy_examples/plan_multiple_robots.py | 4 | 1445 | """Set multiple robots in one configuration to allow for simultaneously planning
"""
from openravepy import *
env = Environment() # create the environment
env.SetViewer('qtcoin') # start the viewer
with env:
# init scene
robot1 = env.ReadRobotURI('robots/barrettwam.robot.xml')
env.Add(robot1,True)
robot2 = env.ReadRobotURI('robots/barrettwam.robot.xml')
env.Add(robot2,True)
Trobot = robot2.GetTransform()
Trobot[0,3] += 0.5
robot2.SetTransform(Trobot)
RaveSetDebugLevel(DebugLevel.Debug) # set output level to debug
# create planner parmaeters
params = Planner.PlannerParameters()
params.SetConfigurationSpecification(env, robot1.GetActiveManipulator().GetArmConfigurationSpecification() + robot2.GetActiveManipulator().GetArmConfigurationSpecification())
params.SetGoalConfig([ 2.16339636e+00, -3.67548731e-01, -1.84983003e+00, 1.76388705e+00, -1.27624984e-07, 7.65325147e-09, 0.00000000e+00, -7.27862052e-01, -6.52626197e-01, -8.10210670e-09, 1.34978628e+00, -1.21644879e-08, 2.77047240e-08, 0.00000000e+00])
# start planner
traj = RaveCreateTrajectory(env,'')
planner = RaveCreatePlanner(env,'birrt')
planner.InitPlan(None,params)
status = planner.PlanPath(traj)
# set new traj to robot controllers
robot1.GetController().SetPath(traj)
robot2.GetController().SetPath(traj)
robot1.WaitForController(0) # wait
robot2.WaitForController(0) # wait
| mit |
yotchang4s/cafebabepy | src/main/python/encodings/shift_jis.py | 816 | 1039 | #
# shift_jis.py: Python Unicode Codec for SHIFT_JIS
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('shift_jis')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='shift_jis',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| bsd-3-clause |
alextruberg/custom_django | django/contrib/staticfiles/views.py | 105 | 1544 | """
Views and functions for serving static files. These are only to be used during
development, and SHOULD NOT be used in a production setting.
"""
import os
import posixpath
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.http import Http404
from django.utils.six.moves.urllib.parse import unquote
from django.views import static
from django.contrib.staticfiles import finders
def serve(request, path, insecure=False, **kwargs):
"""
Serve static files below a given point in the directory structure or
from locations inferred from the staticfiles finders.
To use, put a URL pattern such as::
(r'^(?P<path>.*)$', 'django.contrib.staticfiles.views.serve')
in your URLconf.
It uses the django.views.static view to serve the found files.
"""
if not settings.DEBUG and not insecure:
raise ImproperlyConfigured("The staticfiles view can only be used in "
"debug mode or if the --insecure "
"option of 'runserver' is used")
normalized_path = posixpath.normpath(unquote(path)).lstrip('/')
absolute_path = finders.find(normalized_path)
if not absolute_path:
if path.endswith('/') or path == '':
raise Http404("Directory indexes are not allowed here.")
raise Http404("'%s' could not be found" % path)
document_root, path = os.path.split(absolute_path)
return static.serve(request, path, document_root=document_root, **kwargs)
| bsd-3-clause |
rimacone/testing2 | tests/bearlib/aspects/ModuleTest.py | 11 | 3656 | from types import ModuleType
import coalib.bearlib.aspects
from coalib.bearlib.aspects.exceptions import (AspectNotFoundError,
MultipleAspectFoundError)
import pytest
import unittest
class aspectsModuleTest(unittest.TestCase):
def test_module(self):
# check that module is correctly wrapped
assert isinstance(coalib.bearlib.aspects, ModuleType)
assert type(coalib.bearlib.aspects) is not ModuleType
assert (type(coalib.bearlib.aspects) is
coalib.bearlib.aspects.aspectsModule)
def test__getitem__(self):
dict_spelling = coalib.bearlib.aspects.Root.Spelling.DictionarySpelling
# check a leaf aspect
for aspectname in ['DictionarySpelling',
'spelling.DictionarySpelling',
'root.SPELLING.DictionarySpelling']:
assert coalib.bearlib.aspects[aspectname] is dict_spelling
# check a container aspect
for aspectname in ['Spelling', 'SPELLING', 'ROOT.spelling']:
assert (coalib.bearlib.aspects[aspectname] is
coalib.bearlib.aspects.Root.Spelling)
# check root aspect
for aspectname in ['Root', 'root', 'ROOT']:
assert (coalib.bearlib.aspects[aspectname] is
coalib.bearlib.aspects.Root)
def test__getitem__no_match(self):
for aspectname in ['noaspect', 'NOASPECT',
'Root.DictionarySpelling']:
with pytest.raises(AspectNotFoundError) as exc:
coalib.bearlib.aspects[aspectname]
exc.match(r"^No aspect named '%s'$" % aspectname)
def test__getitem__multi_match(self):
for aspectname in ['Length', 'length', 'LENGTH']:
with pytest.raises(MultipleAspectFoundError) as exc:
coalib.bearlib.aspects[aspectname]
exc.match(r"^Multiple aspects named '%s'. " % aspectname +
r'Choose from '
r'\[<aspectclass'
r" 'Root.Metadata.CommitMessage.Body.Length'>,"
r' <aspectclass'
r" 'Root.Metadata.CommitMessage.Shortlog.Length'>"
r'\]$')
def test_get(self):
# check a leaf aspect
for aspectname in ['clone', 'redundancy.clone',
'root.redundancy.clone']:
self.assertIs(coalib.bearlib.aspects.get(aspectname),
coalib.bearlib.aspects.Root.Redundancy.Clone)
# check a container aspect
for aspectname in ['Spelling', 'SPELLING', 'ROOT.spelling']:
self.assertIs(coalib.bearlib.aspects.get(aspectname),
coalib.bearlib.aspects.Root.Spelling)
# check root aspect
for aspectname in ['Root', 'root', 'ROOT']:
self.assertIs(coalib.bearlib.aspects.get(aspectname),
coalib.bearlib.aspects.Root)
def test_get_no_match(self):
for aspectname in ['noaspect', 'NOASPECT', 'Root.aspectsYEAH']:
self.assertIsNone(coalib.bearlib.aspects.get(aspectname))
def test_get_multi_match(self):
with self.assertRaisesRegex(
MultipleAspectFoundError,
r"^Multiple aspects named 'length'. "
r'Choose from '
r'\[<aspectclass'
r" 'Root.Metadata.CommitMessage.Body.Length'>,"
r' <aspectclass'
r" 'Root.Metadata.CommitMessage.Shortlog.Length'>"
r'\]$'):
coalib.bearlib.aspects.get('length')
| agpl-3.0 |
mayfieldrobotics/marm | test/test_ff_wrapper.py | 1 | 1797 | from datetime import timedelta
import pytest
import marm
@pytest.mark.parametrize(
'file_name,args,as_fo,expected', [
('sonic.ts', ['-select_streams', 'a'], False, {
1: {'codec_type': u'audio',
'dts': 10931520,
'dts_time': 121.461333,
'duration': 1920,
'duration_time': 0.021333,
'flags': u'K_',
'pts': 10931520,
'pts_time': 121.461333,
'size': 14,
'stream_index': 1}
}),
('sonic.ts', ['-select_streams', 'a'], True, {
1: {'codec_type': u'audio',
'dts': 10931520,
'dts_time': 121.461333,
'duration': 1920,
'duration_time': 0.021333,
'flags': u'K_',
'pts': 10931520,
'pts_time': 121.461333,
'size': 14,
'stream_index': 1}
}),
]
)
def test_ffprobe_last_packet(fixtures, file_name, args, as_fo, expected):
p = fixtures.join(file_name)
if as_fo:
with p.open('rb') as fo:
r = marm.FFProbe.for_last_packet(*(args + ['-']), stdin=fo)
else:
r = marm.FFProbe.for_last_packet(*(args + [p.strpath]))
assert r == expected
@pytest.mark.parametrize(
'file_name,expected_stream,expected_format', [
('sonic.ts',
timedelta(seconds=120.033333),
timedelta(seconds=120.054667)),
]
)
def test_ffprobe_duration(
fixtures,
file_name,
expected_stream,
expected_format):
p = fixtures.join(file_name)
assert marm.FFProbe.for_stream_duration(p.strpath) == expected_stream
assert marm.FFProbe.for_format_duration(p.strpath) == expected_format
| bsd-3-clause |
myarjunar/QGIS | python/plugins/processing/algs/gdal/aspect.py | 1 | 3795 | # -*- coding: utf-8 -*-
"""
***************************************************************************
aspect.py
---------------------
Date : October 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
__author__ = 'Alexander Bruy'
__date__ = 'October 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterNumber
from processing.core.outputs import OutputRaster
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class aspect(GdalAlgorithm):
INPUT = 'INPUT'
BAND = 'BAND'
COMPUTE_EDGES = 'COMPUTE_EDGES'
ZEVENBERGEN = 'ZEVENBERGEN'
TRIG_ANGLE = 'TRIG_ANGLE'
ZERO_FLAT = 'ZERO_FLAT'
OUTPUT = 'OUTPUT'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Aspect')
self.group, self.i18n_group = self.trAlgorithm('Raster analysis')
self.addParameter(ParameterRaster(self.INPUT, self.tr('Input layer')))
self.addParameter(ParameterNumber(
self.BAND, self.tr('Band number'), 1, 99, 1))
self.addParameter(ParameterBoolean(
self.COMPUTE_EDGES, self.tr('Compute edges'), False))
self.addParameter(ParameterBoolean(self.ZEVENBERGEN,
self.tr("Use Zevenbergen&Thorne formula (instead of the Horn's one)"),
False))
self.addParameter(ParameterBoolean(self.TRIG_ANGLE,
self.tr('Return trigonometric angle (instead of azimuth)'), False))
self.addParameter(ParameterBoolean(self.ZERO_FLAT,
self.tr('Return 0 for flat (instead of -9999)'), False))
self.addOutput(OutputRaster(self.OUTPUT, self.tr('Aspect')))
def getConsoleCommands(self):
arguments = ['aspect']
arguments.append(str(self.getParameterValue(self.INPUT)))
output = str(self.getOutputValue(self.OUTPUT))
arguments.append(output)
arguments.append('-of')
arguments.append(GdalUtils.getFormatShortNameFromFilename(output))
arguments.append('-b')
arguments.append(str(self.getParameterValue(self.BAND)))
if self.getParameterValue(self.COMPUTE_EDGES):
arguments.append('-compute_edges')
if self.getParameterValue(self.ZEVENBERGEN):
arguments.append('-alg')
arguments.append('ZevenbergenThorne')
if self.getParameterValue(self.TRIG_ANGLE):
arguments.append('-trigonometric')
if self.getParameterValue(self.ZERO_FLAT):
arguments.append('-zero_for_flat')
return ['gdaldem', GdalUtils.escapeAndJoin(arguments)]
| gpl-2.0 |
hlieberman/debian-ansible | plugins/inventory/digital_ocean.py | 26 | 19885 | #!/usr/bin/env python
'''
DigitalOcean external inventory script
======================================
Generates Ansible inventory of DigitalOcean Droplets.
In addition to the --list and --host options used by Ansible, there are options
for generating JSON of other DigitalOcean data. This is useful when creating
droplets. For example, --regions will return all the DigitalOcean Regions.
This information can also be easily found in the cache file, whose default
location is /tmp/ansible-digital_ocean.cache).
The --pretty (-p) option pretty-prints the output for better human readability.
----
Although the cache stores all the information received from DigitalOcean,
the cache is not used for current droplet information (in --list, --host,
--all, and --droplets). This is so that accurate droplet information is always
found. You can force this script to use the cache with --force-cache.
----
Configuration is read from `digital_ocean.ini`, then from environment variables,
then and command-line arguments.
Most notably, the DigitalOcean Client ID and API Key must be specified. They
can be specified in the INI file or with the following environment variables:
export DO_CLIENT_ID='DO123' DO_API_KEY='abc123'
Alternatively, they can be passed on the command-line with --client-id and
--api-key.
If you specify DigitalOcean credentials in the INI file, a handy way to
get them into your environment (e.g., to use the digital_ocean module)
is to use the output of the --env option with export:
export $(digital_ocean.py --env)
----
The following groups are generated from --list:
- ID (droplet ID)
- NAME (droplet NAME)
- image_ID
- image_NAME
- distro_NAME (distribution NAME from image)
- region_ID
- region_NAME
- size_ID
- size_NAME
- status_STATUS
When run against a specific host, this script returns the following variables:
- do_created_at
- do_distroy
- do_id
- do_image
- do_image_id
- do_ip_address
- do_name
- do_region
- do_region_id
- do_size
- do_size_id
- do_status
-----
```
usage: digital_ocean.py [-h] [--list] [--host HOST] [--all]
[--droplets] [--regions] [--images] [--sizes]
[--ssh-keys] [--domains] [--pretty]
[--cache-path CACHE_PATH]
[--cache-max_age CACHE_MAX_AGE]
[--refresh-cache] [--client-id CLIENT_ID]
[--api-key API_KEY]
Produce an Ansible Inventory file based on DigitalOcean credentials
optional arguments:
-h, --help show this help message and exit
--list List all active Droplets as Ansible inventory
(default: True)
--host HOST Get all Ansible inventory variables about a specific
Droplet
--all List all DigitalOcean information as JSON
--droplets List Droplets as JSON
--regions List Regions as JSON
--images List Images as JSON
--sizes List Sizes as JSON
--ssh-keys List SSH keys as JSON
--domains List Domains as JSON
--pretty, -p Pretty-print results
--cache-path CACHE_PATH
Path to the cache files (default: .)
--cache-max_age CACHE_MAX_AGE
Maximum age of the cached items (default: 0)
--refresh-cache Force refresh of cache by making API requests to
DigitalOcean (default: False - use cache files)
--client-id CLIENT_ID, -c CLIENT_ID
DigitalOcean Client ID
--api-key API_KEY, -a API_KEY
DigitalOcean API Key
```
'''
# (c) 2013, Evan Wies <evan@neomantra.net>
#
# Inspired by the EC2 inventory plugin:
# https://github.com/ansible/ansible/blob/devel/plugins/inventory/ec2.py
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import os
import sys
import re
import argparse
from time import time
import ConfigParser
try:
import json
except ImportError:
import simplejson as json
try:
from dopy.manager import DoError, DoManager
except ImportError, e:
print "failed=True msg='`dopy` library required for this script'"
sys.exit(1)
class DigitalOceanInventory(object):
###########################################################################
# Main execution path
###########################################################################
def __init__(self):
''' Main execution path '''
# DigitalOceanInventory data
self.data = {} # All DigitalOcean data
self.inventory = {} # Ansible Inventory
self.index = {} # Varous indices of Droplet metadata
# Define defaults
self.cache_path = '.'
self.cache_max_age = 0
# Read settings, environment variables, and CLI arguments
self.read_settings()
self.read_environment()
self.read_cli_args()
# Verify credentials were set
if not hasattr(self, 'client_id') or not hasattr(self, 'api_key'):
print '''Could not find values for DigitalOcean client_id and api_key.
They must be specified via either ini file, command line argument (--client-id and --api-key),
or environment variables (DO_CLIENT_ID and DO_API_KEY)'''
sys.exit(-1)
# env command, show DigitalOcean credentials
if self.args.env:
print "DO_CLIENT_ID=%s DO_API_KEY=%s" % (self.client_id, self.api_key)
sys.exit(0)
# Manage cache
self.cache_filename = self.cache_path + "/ansible-digital_ocean.cache"
self.cache_refreshed = False
if not self.args.force_cache and self.args.refresh_cache or not self.is_cache_valid():
self.load_all_data_from_digital_ocean()
else:
self.load_from_cache()
if len(self.data) == 0:
if self.args.force_cache:
print '''Cache is empty and --force-cache was specified'''
sys.exit(-1)
self.load_all_data_from_digital_ocean()
else:
# We always get fresh droplets for --list, --host, --all, and --droplets
# unless --force-cache is specified
if not self.args.force_cache and (
self.args.list or self.args.host or self.args.all or self.args.droplets):
self.load_droplets_from_digital_ocean()
# Pick the json_data to print based on the CLI command
if self.args.droplets: json_data = { 'droplets': self.data['droplets'] }
elif self.args.regions: json_data = { 'regions': self.data['regions'] }
elif self.args.images: json_data = { 'images': self.data['images'] }
elif self.args.sizes: json_data = { 'sizes': self.data['sizes'] }
elif self.args.ssh_keys: json_data = { 'ssh_keys': self.data['ssh_keys'] }
elif self.args.domains: json_data = { 'domains': self.data['domains'] }
elif self.args.all: json_data = self.data
elif self.args.host: json_data = self.load_droplet_variables_for_host()
else: # '--list' this is last to make it default
json_data = self.inventory
if self.args.pretty:
print json.dumps(json_data, sort_keys=True, indent=2)
else:
print json.dumps(json_data)
# That's all she wrote...
###########################################################################
# Script configuration
###########################################################################
def read_settings(self):
''' Reads the settings from the digital_ocean.ini file '''
config = ConfigParser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/digital_ocean.ini')
# Credentials
if config.has_option('digital_ocean', 'client_id'):
self.client_id = config.get('digital_ocean', 'client_id')
if config.has_option('digital_ocean', 'api_key'):
self.api_key = config.get('digital_ocean', 'api_key')
# Cache related
if config.has_option('digital_ocean', 'cache_path'):
self.cache_path = config.get('digital_ocean', 'cache_path')
if config.has_option('digital_ocean', 'cache_max_age'):
self.cache_max_age = config.getint('digital_ocean', 'cache_max_age')
def read_environment(self):
''' Reads the settings from environment variables '''
# Setup credentials
if os.getenv("DO_CLIENT_ID"): self.client_id = os.getenv("DO_CLIENT_ID")
if os.getenv("DO_API_KEY"): self.api_key = os.getenv("DO_API_KEY")
def read_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on DigitalOcean credentials')
parser.add_argument('--list', action='store_true', help='List all active Droplets as Ansible inventory (default: True)')
parser.add_argument('--host', action='store', help='Get all Ansible inventory variables about a specific Droplet')
parser.add_argument('--all', action='store_true', help='List all DigitalOcean information as JSON')
parser.add_argument('--droplets','-d', action='store_true', help='List Droplets as JSON')
parser.add_argument('--regions', action='store_true', help='List Regions as JSON')
parser.add_argument('--images', action='store_true', help='List Images as JSON')
parser.add_argument('--sizes', action='store_true', help='List Sizes as JSON')
parser.add_argument('--ssh-keys', action='store_true', help='List SSH keys as JSON')
parser.add_argument('--domains', action='store_true',help='List Domains as JSON')
parser.add_argument('--pretty','-p', action='store_true', help='Pretty-print results')
parser.add_argument('--cache-path', action='store', help='Path to the cache files (default: .)')
parser.add_argument('--cache-max_age', action='store', help='Maximum age of the cached items (default: 0)')
parser.add_argument('--force-cache', action='store_true', default=False, help='Only use data from the cache')
parser.add_argument('--refresh-cache','-r', action='store_true', default=False, help='Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)')
parser.add_argument('--env','-e', action='store_true', help='Display DO_CLIENT_ID and DO_API_KEY')
parser.add_argument('--client-id','-c', action='store', help='DigitalOcean Client ID')
parser.add_argument('--api-key','-a', action='store', help='DigitalOcean API Key')
self.args = parser.parse_args()
if self.args.client_id: self.client_id = self.args.client_id
if self.args.api_key: self.api_key = self.args.api_key
if self.args.cache_path: self.cache_path = self.args.cache_path
if self.args.cache_max_age: self.cache_max_age = self.args.cache_max_age
# Make --list default if none of the other commands are specified
if (not self.args.droplets and not self.args.regions and not self.args.images and
not self.args.sizes and not self.args.ssh_keys and not self.args.domains and
not self.args.all and not self.args.host):
self.args.list = True
###########################################################################
# Data Management
###########################################################################
def load_all_data_from_digital_ocean(self):
''' Use dopy to get all the information from DigitalOcean and save data in cache files '''
manager = DoManager(self.client_id, self.api_key)
self.data = {}
self.data['droplets'] = self.sanitize_list(manager.all_active_droplets())
self.data['regions'] = self.sanitize_list(manager.all_regions())
self.data['images'] = self.sanitize_list(manager.all_images(filter=None))
self.data['sizes'] = self.sanitize_list(manager.sizes())
self.data['ssh_keys'] = self.sanitize_list(manager.all_ssh_keys())
self.data['domains'] = self.sanitize_list(manager.all_domains())
self.index = {}
self.index['region_to_name'] = self.build_index(self.data['regions'], 'id', 'name')
self.index['size_to_name'] = self.build_index(self.data['sizes'], 'id', 'name')
self.index['image_to_name'] = self.build_index(self.data['images'], 'id', 'name')
self.index['image_to_distro'] = self.build_index(self.data['images'], 'id', 'distribution')
self.index['host_to_droplet'] = self.build_index(self.data['droplets'], 'ip_address', 'id', False)
self.build_inventory()
self.write_to_cache()
def load_droplets_from_digital_ocean(self):
''' Use dopy to get droplet information from DigitalOcean and save data in cache files '''
manager = DoManager(self.client_id, self.api_key)
self.data['droplets'] = self.sanitize_list(manager.all_active_droplets())
self.index['host_to_droplet'] = self.build_index(self.data['droplets'], 'ip_address', 'id', False)
self.build_inventory()
self.write_to_cache()
def build_index(self, source_seq, key_from, key_to, use_slug=True):
dest_dict = {}
for item in source_seq:
name = (use_slug and item.has_key('slug')) and item['slug'] or item[key_to]
key = item[key_from]
dest_dict[key] = name
return dest_dict
def build_inventory(self):
'''Build Ansible inventory of droplets'''
self.inventory = {}
# add all droplets by id and name
for droplet in self.data['droplets']:
dest = droplet['ip_address']
self.inventory[droplet['id']] = [dest]
self.push(self.inventory, droplet['name'], dest)
self.push(self.inventory, 'region_'+droplet['region_id'], dest)
self.push(self.inventory, 'image_' +droplet['image_id'], dest)
self.push(self.inventory, 'size_' +droplet['size_id'], dest)
self.push(self.inventory, 'status_'+droplet['status'], dest)
region_name = self.index['region_to_name'].get(droplet['region_id'])
if region_name:
self.push(self.inventory, 'region_'+region_name, dest)
size_name = self.index['size_to_name'].get(droplet['size_id'])
if size_name:
self.push(self.inventory, 'size_'+size_name, dest)
image_name = self.index['image_to_name'].get(droplet['image_id'])
if image_name:
self.push(self.inventory, 'image_'+image_name, dest)
distro_name = self.index['image_to_distro'].get(droplet['image_id'])
if distro_name:
self.push(self.inventory, 'distro_'+distro_name, dest)
def load_droplet_variables_for_host(self):
'''Generate a JSON reponse to a --host call'''
host = self.to_safe(str(self.args.host))
if not host in self.index['host_to_droplet']:
# try updating cache
if not self.args.force_cache:
self.load_all_data_from_digital_ocean()
if not host in self.index['host_to_droplet']:
# host might not exist anymore
return {}
droplet = None
if self.cache_refreshed:
for drop in self.data['droplets']:
if drop['ip_address'] == host:
droplet = self.sanitize_dict(drop)
break
else:
# Cache wasn't refreshed this run, so hit DigitalOcean API
manager = DoManager(self.client_id, self.api_key)
droplet_id = self.index['host_to_droplet'][host]
droplet = self.sanitize_dict(manager.show_droplet(droplet_id))
if not droplet:
return {}
# Put all the information in a 'do_' namespace
info = {}
for k, v in droplet.items():
info['do_'+k] = v
# Generate user-friendly variables (i.e. not the ID's)
if droplet.has_key('region_id'):
info['do_region'] = self.index['region_to_name'].get(droplet['region_id'])
if droplet.has_key('size_id'):
info['do_size'] = self.index['size_to_name'].get(droplet['size_id'])
if droplet.has_key('image_id'):
info['do_image'] = self.index['image_to_name'].get(droplet['image_id'])
info['do_distro'] = self.index['image_to_distro'].get(droplet['image_id'])
return info
###########################################################################
# Cache Management
###########################################################################
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_filename):
mod_time = os.path.getmtime(self.cache_filename)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
return True
return False
def load_from_cache(self):
''' Reads the data from the cache file and assigns it to member variables as Python Objects'''
cache = open(self.cache_filename, 'r')
json_data = cache.read()
cache.close()
data = json.loads(json_data)
self.data = data['data']
self.inventory = data['inventory']
self.index = data['index']
def write_to_cache(self):
''' Writes data in JSON format to a file '''
data = { 'data': self.data, 'index': self.index, 'inventory': self.inventory }
json_data = json.dumps(data, sort_keys=True, indent=2)
cache = open(self.cache_filename, 'w')
cache.write(json_data)
cache.close()
###########################################################################
# Utilities
###########################################################################
def push(self, my_dict, key, element):
''' Pushed an element onto an array that may not have been defined in the dict '''
if key in my_dict:
my_dict[key].append(element);
else:
my_dict[key] = [element]
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
return re.sub("[^A-Za-z0-9\-\.]", "_", word)
def sanitize_dict(self, d):
new_dict = {}
for k, v in d.items():
if v != None:
new_dict[self.to_safe(str(k))] = self.to_safe(str(v))
return new_dict
def sanitize_list(self, seq):
new_seq = []
for d in seq:
new_seq.append(self.sanitize_dict(d))
return new_seq
###########################################################################
# Run the script
DigitalOceanInventory()
| gpl-3.0 |
freedesktop-unofficial-mirror/telepathy__telepathy-qt | tools/qt-types-gen.py | 7 | 17027 | #!/usr/bin/python
#
# Copyright (C) 2008 Collabora Limited <http://www.collabora.co.uk>
# Copyright (C) 2008 Nokia Corporation
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import sys
import xml.dom.minidom
from getopt import gnu_getopt
from libtpcodegen import NS_TP, get_descendant_text, get_by_path
from libqtcodegen import binding_from_usage, binding_from_decl, extract_arg_or_member_info, format_docstring, gather_externals, gather_custom_lists, get_qt_name, get_headerfile_cmd, RefRegistry
class BrokenSpecException(Exception):
pass
class MissingTypes(BrokenSpecException):
def __init__(self, types):
super(MissingTypes, self).__init__(self)
self.types = types
def __str__(self):
typelist = ''.join([' %s' % t for t in self.types])
return "The following types were used, but not provided by the spec " \
"or by <tp:external-type/> declarations in all.xml:\n%s" % typelist
class UnresolvedDependency(BrokenSpecException):
def __init__(self, child, parent):
super(UnresolvedDependency, self).__init__(self)
self.child = child
self.parent = parent
def __str__(self):
return 'Type %s has unresolved dependency on %s' % (
self.child, self.parent)
class EmptyStruct(BrokenSpecException):
def __init__(self, struct_name):
super(EmptyStruct, self).__init__(self)
self.struct_name = struct_name
def __str__(self):
return 'tp:struct %s should have some members' % self.struct_name
class MalformedMapping(BrokenSpecException):
def __init__(self, mapping_name, members):
super(MalformedMapping, self).__init__(self)
self.mapping_name = mapping_name
self.members = members
def __str__(self):
return 'tp:mapping %s should have 2 members, not %u' % (
self.mapping_name, self.members)
class WTF(BrokenSpecException):
def __init__(self, element_name):
super(BrokenSpecException, self).__init__(self)
self.element_name = element_name
def __str__(self):
return 'What the hell is a tp:%s?' % self.element_name
class DepInfo:
def __init__(self, el, externals, custom_lists):
self.el = el
name = get_by_path(el, '@name')
array_name = get_by_path(el, '@array-name')
array_depth = get_by_path(el, '@array-depth')
if array_depth:
array_depth = int(array_depth)
else:
array_depth = None
self.binding = binding_from_decl(name, array_name, array_depth)
self.deps = []
for member in get_by_path(el, 'member'):
sig = member.getAttribute('type')
tptype = member.getAttributeNS(NS_TP, 'type')
if (sig, tptype) in externals:
continue
if tptype.endswith('[]'):
tptype = tptype[:-2]
binding = binding_from_usage(sig, tptype, custom_lists)
if binding.custom_type:
self.deps.append(binding.val)
self.revdeps = []
class Generator(object):
def __init__(self, opts):
try:
self.namespace = opts['--namespace']
self.declfile = opts['--declfile']
self.implfile = opts['--implfile']
self.realinclude = opts['--realinclude']
self.prettyinclude = opts.get('--prettyinclude', self.realinclude)
self.extraincludes = opts.get('--extraincludes', None)
self.must_define = opts.get('--must-define', None)
self.visibility = opts.get('--visibility', '')
dom = xml.dom.minidom.parse(opts['--specxml'])
except KeyError, k:
assert False, 'Missing required parameter %s' % k.args[0]
self.decls = []
self.impls = []
self.spec = get_by_path(dom, "spec")[0]
self.externals = gather_externals(self.spec)
self.custom_lists = gather_custom_lists(self.spec, self.namespace)
self.required_custom = []
self.required_arrays = []
self.to_declare = []
self.depinfos = {}
self.refs = RefRegistry(self.spec)
def __call__(self):
# Emit comment header
self.both('/* Generated from ')
self.both(get_descendant_text(get_by_path(self.spec, 'title')))
version = get_by_path(self.spec, "version")
if version:
self.both(', version ' + get_descendant_text(version))
self.both(' */\n')
# Gather info on available and required types
self.gather_required()
if self.must_define:
self.decl('\n')
self.decl('#ifndef %s\n' % self.must_define)
self.decl('#error %s\n' % self.must_define)
self.decl('#endif')
self.decl('\n')
if self.extraincludes:
for include in self.extraincludes.split(','):
self.decl('#include %s\n' % include)
self.decl("""
#include <QtGlobal>
#include <QByteArray>
#include <QString>
#include <QStringList>
#include <QVariantList>
#include <QVariantMap>
#include <QDBusArgument>
#include <QDBusMetaType>
#include <QDBusObjectPath>
#include <QDBusSignature>
#include <QDBusVariant>
#include <TelepathyQt/Global>
/**
* \\addtogroup typesconstants Types and constants
*
* Enumerated, flag, structure, list and mapping types and utility constants.
*/
/**
* \\defgroup struct Structure types
* \\ingroup typesconstants
*
* Structure types generated from the specification.
*/
/**
* \\defgroup list List types
* \\ingroup typesconstants
*
* List types generated from the specification.
*/
/**
* \\defgroup mapping Mapping types
* \\ingroup typesconstants
*
* Mapping types generated from the specification.
*/
""")
if self.must_define:
self.impl("""
#define %s""" % self.must_define)
self.impl("""
#include "%s"
""" % self.realinclude)
self.both("""
namespace %s
{
""" % self.namespace)
# Emit type definitions for types provided in the spec
self.provide_all()
# Emit type registration function
self.decl("""
} // namespace %s
""" % self.namespace)
self.impl("""\
TP_QT_NO_EXPORT void _registerTypes()
{
static bool registered = false;
if (registered)
return;
registered = true;
""")
# Emit Qt metatype declarations
self.to_declare.sort()
for metatype in self.to_declare:
self.decl('Q_DECLARE_METATYPE(%s)\n' % metatype)
self.impl(' qDBusRegisterMetaType<%s>();\n' % ((metatype.endswith('>') and metatype + ' ') or metatype))
self.impl("""\
}
} // namespace %s
""" % self.namespace)
# Write output to files
open(self.declfile, 'w').write(''.join(self.decls).encode("utf-8"))
open(self.implfile, 'w').write(''.join(self.impls).encode("utf-8"))
def decl(self, str):
self.decls.append(str)
def impl(self, str):
self.impls.append(str)
def both(self, str):
self.decl(str)
self.impl(str)
def gather_required(self):
members = self.spec.getElementsByTagNameNS(NS_TP, 'member')
args = self.spec.getElementsByTagName('arg')
props = self.spec.getElementsByTagName('property')
tp_props = self.spec.getElementsByTagNameNS(NS_TP, 'property')
for requirer in members + args + props + tp_props:
sig = requirer.getAttribute('type')
tptype = requirer.getAttributeNS(NS_TP, 'type')
external = (sig, tptype) in self.externals
binding = binding_from_usage(sig, tptype, self.custom_lists, external)
if binding.custom_type and binding.val not in self.required_custom:
self.required_custom.append(binding.val)
if not binding.custom_type and binding.array_of and (binding.val, binding.array_of) not in self.required_arrays:
self.required_arrays.append((binding.val, binding.array_of))
def provide_all(self):
self.required_arrays.sort()
for (val, array_of) in self.required_arrays:
real = 'QList<%s>' % array_of
self.decl("""\
/**
* \\struct %s
* \\ingroup list
%s\
*
* Generic list type with %s elements. Convertible with
* %s, but needed to have a discrete type in the Qt type system.
*/
""" % (val, get_headerfile_cmd(self.realinclude, self.prettyinclude), array_of, real))
self.decl(self.faketype(val, real))
self.to_declare.append(self.namespace + '::' + val)
structs = self.spec.getElementsByTagNameNS(NS_TP, 'struct')
mappings = self.spec.getElementsByTagNameNS(NS_TP, 'mapping')
exts = self.spec.getElementsByTagNameNS(NS_TP, 'external-type')
for deptype in structs + mappings:
info = DepInfo(deptype, self.externals, self.custom_lists)
self.depinfos[info.binding.val] = info
leaves = []
next_leaves = []
for val, depinfo in self.depinfos.iteritems():
leaf = True
for dep in depinfo.deps:
if not self.depinfos.has_key(dep):
raise UnresolvedDependency(val, dep)
leaf = False
self.depinfos[dep].revdeps.append(val)
if leaf:
next_leaves.append(val)
while leaves or next_leaves:
if not leaves:
leaves = next_leaves
leaves.sort()
next_leaves = []
val = leaves.pop(0)
depinfo = self.depinfos[val]
self.output_by_depinfo(depinfo)
for revdep in depinfo.revdeps:
revdepinfo = self.depinfos[revdep]
revdepinfo.deps.remove(val)
if not revdepinfo.deps:
next_leaves.append(revdep)
del self.depinfos[val]
for provider in structs + mappings + exts:
name = get_by_path(provider, '@name')
array_name = get_by_path(provider, '@array-name')
array_depth = get_by_path(provider, '@array-depth')
if array_depth:
array_depth = int(array_depth)
else:
array_depth = None
sig = provider.getAttribute('type')
tptype = provider.getAttribute('name')
external = (sig, tptype) in self.externals
binding = binding_from_decl(name, array_name, array_depth, external)
self.provide(binding.val)
if binding.array_val:
self.provide(binding.array_val)
d = binding.array_depth
while d > 1:
d -= 1
self.provide(binding.array_val + ('List' * d))
if self.required_custom:
raise MissingTypes(self.required_custom)
def provide(self, type):
if type in self.required_custom:
self.required_custom.remove(type)
def output_by_depinfo(self, depinfo):
names, docstrings, bindings = extract_arg_or_member_info(get_by_path(depinfo.el, 'member'), self.custom_lists, self.externals, None, self.refs, ' * ', (' /**', ' */'))
members = len(names)
if depinfo.el.localName == 'struct':
if members == 0:
raise EmptyStruct(depinfo.binding.val)
self.decl("""\
/**
* \\struct %(name)s
* \\ingroup struct
%(headercmd)s\
*
* Structure type generated from the specification.
%(docstring)s\
*/
struct %(visibility)s %(name)s
{
""" % {
'name' : depinfo.binding.val,
'headercmd': get_headerfile_cmd(self.realinclude, self.prettyinclude),
'docstring' : format_docstring(depinfo.el, self.refs),
'visibility': self.visibility,
})
for i in xrange(members):
self.decl("""\
%s\
%s %s;
""" % (docstrings[i], bindings[i].val, names[i]))
self.decl("""\
};
""")
self.both('%s bool operator==(%s v1, %s v2)' %
(self.visibility,
depinfo.binding.inarg,
depinfo.binding.inarg))
self.decl(';\n')
self.impl("""
{""")
if (bindings[0].val != 'QDBusVariant'):
self.impl("""
return ((v1.%s == v2.%s)""" % (names[0], names[0]))
else:
self.impl("""
return ((v1.%s.variant() == v2.%s.variant())""" % (names[0], names[0]))
for i in xrange(1, members):
if (bindings[i].val != 'QDBusVariant'):
self.impl("""
&& (v1.%s == v2.%s)""" % (names[i], names[i]))
else:
self.impl("""
&& (v1.%s.variant() == v2.%s.variant())""" % (names[i], names[i]))
self.impl("""
);
}
""")
self.decl('inline bool operator!=(%s v1, %s v2)' %
(depinfo.binding.inarg, depinfo.binding.inarg))
self.decl("""
{
return !operator==(v1, v2);
}
""")
self.both('%s QDBusArgument& operator<<(QDBusArgument& arg, %s val)' %
(self.visibility, depinfo.binding.inarg))
self.decl(';\n')
self.impl("""
{
arg.beginStructure();
arg << %s;
arg.endStructure();
return arg;
}
""" % ' << '.join(['val.' + name for name in names]))
self.both('%s const QDBusArgument& operator>>(const QDBusArgument& arg, %s val)' %
(self.visibility, depinfo.binding.outarg))
self.decl(';\n\n')
self.impl("""
{
arg.beginStructure();
arg >> %s;
arg.endStructure();
return arg;
}
""" % ' >> '.join(['val.' + name for name in names]))
elif depinfo.el.localName == 'mapping':
if members != 2:
raise MalformedMapping(depinfo.binding.val, members)
realtype = 'QMap<%s, %s>' % (bindings[0].val, (bindings[1].val.endswith('>') and bindings[1].val + ' ') or bindings[1].val)
self.decl("""\
/**
* \\struct %s
* \\ingroup mapping
%s\
*
* Mapping type generated from the specification. Convertible with
* %s, but needed to have a discrete type in the Qt type system.
%s\
*/
""" % (depinfo.binding.val, get_headerfile_cmd(self.realinclude, self.prettyinclude), realtype, format_docstring(depinfo.el, self.refs)))
self.decl(self.faketype(depinfo.binding.val, realtype))
else:
raise WTF(depinfo.el.localName)
self.to_declare.append(self.namespace + '::' + depinfo.binding.val)
if depinfo.binding.array_val:
self.to_declare.append('%s::%s' % (self.namespace, depinfo.binding.array_val))
self.decl("""\
/**
* \\ingroup list
%s\
*
* Array of %s values.
*/
typedef %s %s;
""" % (get_headerfile_cmd(self.realinclude, self.prettyinclude), depinfo.binding.val, 'QList<%s>' % depinfo.binding.val, depinfo.binding.array_val))
i = depinfo.binding.array_depth
while i > 1:
i -= 1
self.to_declare.append('%s::%s%s' % (self.namespace, depinfo.binding.array_val, ('List' * i)))
list_of = depinfo.binding.array_val + ('List' * (i-1))
self.decl("""\
/**
* \\ingroup list
%s\
*
* Array of %s values.
*/
typedef QList<%s> %sList;
""" % (get_headerfile_cmd(self.realinclude, self.prettyinclude), list_of, list_of, list_of))
def faketype(self, fake, real):
return """\
struct %(visibility)s %(fake)s : public %(real)s
{
inline %(fake)s() : %(real)s() {}
inline %(fake)s(const %(real)s& a) : %(real)s(a) {}
inline %(fake)s& operator=(const %(real)s& a)
{
*(static_cast<%(real)s*>(this)) = a;
return *this;
}
};
""" % {'fake' : fake, 'real' : real, 'visibility': self.visibility}
if __name__ == '__main__':
options, argv = gnu_getopt(sys.argv[1:], '',
['declfile=',
'implfile=',
'realinclude=',
'prettyinclude=',
'extraincludes=',
'must-define=',
'namespace=',
'specxml=',
'visibility=',
])
try:
Generator(dict(options))()
except BrokenSpecException as e:
print >> sys.stderr, 'Your spec is broken, dear developer! %s' % e
sys.exit(42)
| lgpl-2.1 |
ahmednuaman/gdrive-cms-py-gae | httplib2/socks.py | 811 | 18459 | """SocksiPy - Python SOCKS module.
Version 1.00
Copyright 2006 Dan-Haim. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of Dan Haim nor the names of his contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE.
This module provides a standard socket-like interface for Python
for tunneling connections through SOCKS proxies.
"""
"""
Minor modifications made by Christopher Gilbert (http://motomastyle.com/)
for use in PyLoris (http://pyloris.sourceforge.net/)
Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/)
mainly to merge bug fixes found in Sourceforge
"""
import base64
import socket
import struct
import sys
if getattr(socket, 'socket', None) is None:
raise ImportError('socket.socket missing, proxy support unusable')
PROXY_TYPE_SOCKS4 = 1
PROXY_TYPE_SOCKS5 = 2
PROXY_TYPE_HTTP = 3
PROXY_TYPE_HTTP_NO_TUNNEL = 4
_defaultproxy = None
_orgsocket = socket.socket
class ProxyError(Exception): pass
class GeneralProxyError(ProxyError): pass
class Socks5AuthError(ProxyError): pass
class Socks5Error(ProxyError): pass
class Socks4Error(ProxyError): pass
class HTTPError(ProxyError): pass
_generalerrors = ("success",
"invalid data",
"not connected",
"not available",
"bad proxy type",
"bad input")
_socks5errors = ("succeeded",
"general SOCKS server failure",
"connection not allowed by ruleset",
"Network unreachable",
"Host unreachable",
"Connection refused",
"TTL expired",
"Command not supported",
"Address type not supported",
"Unknown error")
_socks5autherrors = ("succeeded",
"authentication is required",
"all offered authentication methods were rejected",
"unknown username or invalid password",
"unknown error")
_socks4errors = ("request granted",
"request rejected or failed",
"request rejected because SOCKS server cannot connect to identd on the client",
"request rejected because the client program and identd report different user-ids",
"unknown error")
def setdefaultproxy(proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
"""setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets a default proxy which all further socksocket objects will use,
unless explicitly changed.
"""
global _defaultproxy
_defaultproxy = (proxytype, addr, port, rdns, username, password)
def wrapmodule(module):
"""wrapmodule(module)
Attempts to replace a module's socket library with a SOCKS socket. Must set
a default proxy using setdefaultproxy(...) first.
This will only work on modules that import socket directly into the namespace;
most of the Python Standard Library falls into this category.
"""
if _defaultproxy != None:
module.socket.socket = socksocket
else:
raise GeneralProxyError((4, "no proxy specified"))
class socksocket(socket.socket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET, type=SOCK_STREAM and proto=0.
"""
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, _sock=None):
_orgsocket.__init__(self, family, type, proto, _sock)
if _defaultproxy != None:
self.__proxy = _defaultproxy
else:
self.__proxy = (None, None, None, None, None, None)
self.__proxysockname = None
self.__proxypeername = None
self.__httptunnel = True
def __recvall(self, count):
"""__recvall(count) -> data
Receive EXACTLY the number of bytes requested from the socket.
Blocks until the required number of bytes have been received.
"""
data = self.recv(count)
while len(data) < count:
d = self.recv(count-len(data))
if not d: raise GeneralProxyError((0, "connection closed unexpectedly"))
data = data + d
return data
def sendall(self, content, *args):
""" override socket.socket.sendall method to rewrite the header
for non-tunneling proxies if needed
"""
if not self.__httptunnel:
content = self.__rewriteproxy(content)
return super(socksocket, self).sendall(content, *args)
def __rewriteproxy(self, header):
""" rewrite HTTP request headers to support non-tunneling proxies
(i.e. those which do not support the CONNECT method).
This only works for HTTP (not HTTPS) since HTTPS requires tunneling.
"""
host, endpt = None, None
hdrs = header.split("\r\n")
for hdr in hdrs:
if hdr.lower().startswith("host:"):
host = hdr
elif hdr.lower().startswith("get") or hdr.lower().startswith("post"):
endpt = hdr
if host and endpt:
hdrs.remove(host)
hdrs.remove(endpt)
host = host.split(" ")[1]
endpt = endpt.split(" ")
if (self.__proxy[4] != None and self.__proxy[5] != None):
hdrs.insert(0, self.__getauthheader())
hdrs.insert(0, "Host: %s" % host)
hdrs.insert(0, "%s http://%s%s %s" % (endpt[0], host, endpt[1], endpt[2]))
return "\r\n".join(hdrs)
def __getauthheader(self):
auth = self.__proxy[4] + ":" + self.__proxy[5]
return "Proxy-Authorization: Basic " + base64.b64encode(auth)
def setproxy(self, proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
"""setproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxytype - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be preformed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
"""
self.__proxy = (proxytype, addr, port, rdns, username, password)
def __negotiatesocks5(self, destaddr, destport):
"""__negotiatesocks5(self,destaddr,destport)
Negotiates a connection through a SOCKS5 server.
"""
# First we'll send the authentication packages we support.
if (self.__proxy[4]!=None) and (self.__proxy[5]!=None):
# The username/password details were supplied to the
# setproxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
self.sendall(struct.pack('BBBB', 0x05, 0x02, 0x00, 0x02))
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
self.sendall(struct.pack('BBB', 0x05, 0x01, 0x00))
# We'll receive the server's response to determine which
# method was selected
chosenauth = self.__recvall(2)
if chosenauth[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
# Check the chosen authentication method
if chosenauth[1:2] == chr(0x00).encode():
# No authentication is required
pass
elif chosenauth[1:2] == chr(0x02).encode():
# Okay, we need to perform a basic username/password
# authentication.
self.sendall(chr(0x01).encode() + chr(len(self.__proxy[4])) + self.__proxy[4] + chr(len(self.__proxy[5])) + self.__proxy[5])
authstat = self.__recvall(2)
if authstat[0:1] != chr(0x01).encode():
# Bad response
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if authstat[1:2] != chr(0x00).encode():
# Authentication failed
self.close()
raise Socks5AuthError((3, _socks5autherrors[3]))
# Authentication succeeded
else:
# Reaching here is always bad
self.close()
if chosenauth[1] == chr(0xFF).encode():
raise Socks5AuthError((2, _socks5autherrors[2]))
else:
raise GeneralProxyError((1, _generalerrors[1]))
# Now we can request the actual connection
req = struct.pack('BBB', 0x05, 0x01, 0x00)
# If the given destination address is an IP address, we'll
# use the IPv4 address request even if remote resolving was specified.
try:
ipaddr = socket.inet_aton(destaddr)
req = req + chr(0x01).encode() + ipaddr
except socket.error:
# Well it's not an IP number, so it's probably a DNS name.
if self.__proxy[3]:
# Resolve remotely
ipaddr = None
req = req + chr(0x03).encode() + chr(len(destaddr)).encode() + destaddr
else:
# Resolve locally
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
req = req + chr(0x01).encode() + ipaddr
req = req + struct.pack(">H", destport)
self.sendall(req)
# Get the response
resp = self.__recvall(4)
if resp[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
elif resp[1:2] != chr(0x00).encode():
# Connection failed
self.close()
if ord(resp[1:2])<=8:
raise Socks5Error((ord(resp[1:2]), _socks5errors[ord(resp[1:2])]))
else:
raise Socks5Error((9, _socks5errors[9]))
# Get the bound address/port
elif resp[3:4] == chr(0x01).encode():
boundaddr = self.__recvall(4)
elif resp[3:4] == chr(0x03).encode():
resp = resp + self.recv(1)
boundaddr = self.__recvall(ord(resp[4:5]))
else:
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
boundport = struct.unpack(">H", self.__recvall(2))[0]
self.__proxysockname = (boundaddr, boundport)
if ipaddr != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def getproxysockname(self):
"""getsockname() -> address info
Returns the bound IP address and port number at the proxy.
"""
return self.__proxysockname
def getproxypeername(self):
"""getproxypeername() -> address info
Returns the IP and port number of the proxy.
"""
return _orgsocket.getpeername(self)
def getpeername(self):
"""getpeername() -> address info
Returns the IP address and port number of the destination
machine (note: getproxypeername returns the proxy)
"""
return self.__proxypeername
def __negotiatesocks4(self,destaddr,destport):
"""__negotiatesocks4(self,destaddr,destport)
Negotiates a connection through a SOCKS4 server.
"""
# Check if the destination address provided is an IP address
rmtrslv = False
try:
ipaddr = socket.inet_aton(destaddr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if self.__proxy[3]:
ipaddr = struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01)
rmtrslv = True
else:
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
# Construct the request packet
req = struct.pack(">BBH", 0x04, 0x01, destport) + ipaddr
# The username parameter is considered userid for SOCKS4
if self.__proxy[4] != None:
req = req + self.__proxy[4]
req = req + chr(0x00).encode()
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if rmtrslv:
req = req + destaddr + chr(0x00).encode()
self.sendall(req)
# Get the response from the server
resp = self.__recvall(8)
if resp[0:1] != chr(0x00).encode():
# Bad data
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
if resp[1:2] != chr(0x5A).encode():
# Server returned an error
self.close()
if ord(resp[1:2]) in (91, 92, 93):
self.close()
raise Socks4Error((ord(resp[1:2]), _socks4errors[ord(resp[1:2]) - 90]))
else:
raise Socks4Error((94, _socks4errors[4]))
# Get the bound address/port
self.__proxysockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
if rmtrslv != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def __negotiatehttp(self, destaddr, destport):
"""__negotiatehttp(self,destaddr,destport)
Negotiates a connection through an HTTP server.
"""
# If we need to resolve locally, we do this now
if not self.__proxy[3]:
addr = socket.gethostbyname(destaddr)
else:
addr = destaddr
headers = ["CONNECT ", addr, ":", str(destport), " HTTP/1.1\r\n"]
headers += ["Host: ", destaddr, "\r\n"]
if (self.__proxy[4] != None and self.__proxy[5] != None):
headers += [self.__getauthheader(), "\r\n"]
headers.append("\r\n")
self.sendall("".join(headers).encode())
# We read the response until we get the string "\r\n\r\n"
resp = self.recv(1)
while resp.find("\r\n\r\n".encode()) == -1:
resp = resp + self.recv(1)
# We just need the first line to check if the connection
# was successful
statusline = resp.splitlines()[0].split(" ".encode(), 2)
if statusline[0] not in ("HTTP/1.0".encode(), "HTTP/1.1".encode()):
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
try:
statuscode = int(statusline[1])
except ValueError:
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if statuscode != 200:
self.close()
raise HTTPError((statuscode, statusline[2]))
self.__proxysockname = ("0.0.0.0", 0)
self.__proxypeername = (addr, destport)
def connect(self, destpair):
"""connect(self, despair)
Connects to the specified destination through a proxy.
destpar - A tuple of the IP/DNS address and the port number.
(identical to socket's connect).
To select the proxy server use setproxy().
"""
# Do a minimal input check first
if (not type(destpair) in (list,tuple)) or (len(destpair) < 2) or (not isinstance(destpair[0], basestring)) or (type(destpair[1]) != int):
raise GeneralProxyError((5, _generalerrors[5]))
if self.__proxy[0] == PROXY_TYPE_SOCKS5:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self, (self.__proxy[1], portnum))
self.__negotiatesocks5(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_SOCKS4:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self,(self.__proxy[1], portnum))
self.__negotiatesocks4(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
_orgsocket.connect(self,(self.__proxy[1], portnum))
self.__negotiatehttp(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP_NO_TUNNEL:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
_orgsocket.connect(self,(self.__proxy[1],portnum))
if destpair[1] == 443:
self.__negotiatehttp(destpair[0],destpair[1])
else:
self.__httptunnel = False
elif self.__proxy[0] == None:
_orgsocket.connect(self, (destpair[0], destpair[1]))
else:
raise GeneralProxyError((4, _generalerrors[4]))
| mit |
WanderlandTravelers/ansible-wpa_supplicant-role | molecule/options/tests/test_options.py | 1 | 1376 | import os
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_wpa_passphrase_exists(host):
assert host.file("/usr/bin/wpa_passphrase").exists
@pytest.mark.parametrize('line', [
'country=GB',
'ap_scan=1',
'ctrl_interface=madeup',
'update_config=0',
])
def test_one_liners(wpa_supplicant, line):
assert wpa_supplicant.contains(line)
@pytest.mark.parametrize('network', ["""
network={
ssid="test"
psk=e0b3e76d15f938fcd6ce682459a868312a0e0b779aee825a66aca6837701e685
priority=1
id_str="testOne"
}""", """
network={
ssid="test2"
psk=1d38836715b27c7e4812db576ff51e43c77ac17a76314ffb5551002b4217c1eb
priority=2
id_str="testTwo"
}""", """
network={
ssid="testing"
key_mgmt=NONE
}""", """
network={
ssid="yourHiddenSSID"
scan_ssid=1
psk=f4457c4b9ba5635ff9d694125cf8587e4b0f48b700c8dfb2f6eb4b5b57df9a14
}""", """
network={
ssid="1x-test"
scan_ssid=1
key_mgmt=IEEE8021X
eap=TLS
identity="user@example.com"
ca_cert="/etc/cert/ca.pem"
client_cert="/etc/cert/user.pem"
private_key="/etc/cert/user.prv"
private_key_passwd="password"
eapol_flags=3
}"""])
def test_network(network_present, network):
assert network_present(network)
| mit |
philsch/ansible | lib/ansible/modules/cloud/cloudstack/cs_project.py | 17 | 8762 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_project
short_description: Manages projects on Apache CloudStack based clouds.
description:
- Create, update, suspend, activate and remove projects.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the project.
required: true
display_text:
description:
- Display text of the project.
- If not specified, C(name) will be used as C(display_text).
required: false
default: null
state:
description:
- State of the project.
required: false
default: 'present'
choices: [ 'present', 'absent', 'active', 'suspended' ]
domain:
description:
- Domain the project is related to.
required: false
default: null
account:
description:
- Account the project is related to.
required: false
default: null
tags:
description:
- List of tags. Tags are a list of dictionaries having keys C(key) and C(value).
- "If you want to delete all tags, set a empty list e.g. C(tags: [])."
required: false
default: null
version_added: "2.2"
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Create a project
- local_action:
module: cs_project
name: web
tags:
- { key: admin, value: john }
- { key: foo, value: bar }
# Rename a project
- local_action:
module: cs_project
name: web
display_text: my web project
# Suspend an existing project
- local_action:
module: cs_project
name: web
state: suspended
# Activate an existing project
- local_action:
module: cs_project
name: web
state: active
# Remove a project
- local_action:
module: cs_project
name: web
state: absent
'''
RETURN = '''
---
id:
description: UUID of the project.
returned: success
type: string
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
name:
description: Name of the project.
returned: success
type: string
sample: web project
display_text:
description: Display text of the project.
returned: success
type: string
sample: web project
state:
description: State of the project.
returned: success
type: string
sample: Active
domain:
description: Domain the project is related to.
returned: success
type: string
sample: example domain
account:
description: Account the project is related to.
returned: success
type: string
sample: example account
tags:
description: List of resource tags associated with the project.
returned: success
type: dict
sample: '[ { "key": "foo", "value": "bar" } ]'
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
CloudStackException,
cs_argument_spec,
cs_required_together
)
class AnsibleCloudStackProject(AnsibleCloudStack):
def get_project(self):
if not self.project:
project = self.module.params.get('name')
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id')
}
projects = self.cs.listProjects(**args)
if projects:
for p in projects['project']:
if project.lower() in [p['name'].lower(), p['id']]:
self.project = p
break
return self.project
def present_project(self):
project = self.get_project()
if not project:
project = self.create_project(project)
else:
project = self.update_project(project)
if project:
project = self.ensure_tags(resource=project, resource_type='project')
# refresh resource
self.project = project
return project
def update_project(self, project):
args = {
'id': project['id'],
'displaytext': self.get_or_fallback('display_text', 'name')
}
if self.has_changed(args, project):
self.result['changed'] = True
if not self.module.check_mode:
project = self.cs.updateProject(**args)
if 'errortext' in project:
self.module.fail_json(msg="Failed: '%s'" % project['errortext'])
poll_async = self.module.params.get('poll_async')
if project and poll_async:
project = self.poll_job(project, 'project')
return project
def create_project(self, project):
self.result['changed'] = True
args = {
'name': self.module.params.get('name'),
'displaytext': self.get_or_fallback('display_text', 'name'),
'account': self.get_account('name'),
'domainid': self.get_domain('id')
}
if not self.module.check_mode:
project = self.cs.createProject(**args)
if 'errortext' in project:
self.module.fail_json(msg="Failed: '%s'" % project['errortext'])
poll_async = self.module.params.get('poll_async')
if project and poll_async:
project = self.poll_job(project, 'project')
return project
def state_project(self, state='active'):
project = self.present_project()
if project['state'].lower() != state:
self.result['changed'] = True
args = {
'id': project['id']
}
if not self.module.check_mode:
if state == 'suspended':
project = self.cs.suspendProject(**args)
else:
project = self.cs.activateProject(**args)
if 'errortext' in project:
self.module.fail_json(msg="Failed: '%s'" % project['errortext'])
poll_async = self.module.params.get('poll_async')
if project and poll_async:
project = self.poll_job(project, 'project')
return project
def absent_project(self):
project = self.get_project()
if project:
self.result['changed'] = True
args = {
'id': project['id']
}
if not self.module.check_mode:
res = self.cs.deleteProject(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if res and poll_async:
res = self.poll_job(res, 'project')
return project
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
display_text=dict(),
state=dict(choices=['present', 'absent', 'active', 'suspended'], default='present'),
domain=dict(),
account=dict(),
poll_async=dict(type='bool', default=True),
tags=dict(type='list', aliases=['tag']),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
try:
acs_project = AnsibleCloudStackProject(module)
state = module.params.get('state')
if state in ['absent']:
project = acs_project.absent_project()
elif state in ['active', 'suspended']:
project = acs_project.state_project(state=state)
else:
project = acs_project.present_project()
result = acs_project.get_result(project)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
ujenmr/ansible | lib/ansible/modules/notification/matrix.py | 47 | 3854 | #!/usr/bin/python
# coding: utf-8
# (c) 2018, Jan Christian Grünhage <jan.christian@gruenhage.xyz>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
author: "Jan Christian Grünhage (@jcgruenhage)"
module: matrix
short_description: Send notifications to matrix
description:
- This module sends html formatted notifications to matrix rooms.
version_added: "2.8"
options:
msg_plain:
description:
- Plain text form of the message to send to matrix, usually markdown
required: true
msg_html:
description:
- HTML form of the message to send to matrix
required: true
room_id:
description:
- ID of the room to send the notification to
required: true
hs_url:
description:
- URL of the homeserver, where the CS-API is reachable
required: true
token:
description:
- Authentication token for the API call. If provided, user_id and password are not required
user_id:
description:
- The user id of the user
password:
description:
- The password to log in with
requirements:
- matrix-client (Python library)
'''
EXAMPLES = '''
- name: Send matrix notification with token
matrix:
msg_plain: "**hello world**"
msg_html: "<b>hello world</b>"
room_id: "!12345678:server.tld"
hs_url: "https://matrix.org"
token: "{{ matrix_auth_token }}"
- name: Send matrix notification with user_id and password
matrix:
msg_plain: "**hello world**"
msg_html: "<b>hello world</b>"
room_id: "!12345678:server.tld"
hs_url: "https://matrix.org"
user_id: "ansible_notification_bot"
password: "{{ matrix_auth_password }}"
'''
RETURN = '''
'''
import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
MATRIX_IMP_ERR = None
try:
from matrix_client.client import MatrixClient
except ImportError:
MATRIX_IMP_ERR = traceback.format_exc()
matrix_found = False
else:
matrix_found = True
def run_module():
module_args = dict(
msg_plain=dict(type='str', required=True),
msg_html=dict(type='str', required=True),
room_id=dict(type='str', required=True),
hs_url=dict(type='str', required=True),
token=dict(type='str', required=False, no_log=True),
user_id=dict(type='str', required=False),
password=dict(type='str', required=False, no_log=True),
)
result = dict(
changed=False,
message=''
)
module = AnsibleModule(
argument_spec=module_args,
mutually_exclusive=[['password', 'token']],
required_one_of=[['password', 'token']],
required_together=[['user_id', 'password']],
supports_check_mode=True
)
if not matrix_found:
module.fail_json(msg=missing_required_lib('matrix-client'), exception=MATRIX_IMP_ERR)
if module.check_mode:
return result
# create a client object
client = MatrixClient(module.params['hs_url'])
if module.params['token'] is not None:
client.api.token = module.params['token']
else:
client.login(module.params['user_id'], module.params['password'], sync=False)
# make sure we are in a given room and return a room object for it
room = client.join_room(module.params['room_id'])
# send an html formatted messages
room.send_html(module.params['msg_html'], module.params['msg_plain'])
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()
| gpl-3.0 |
mt2d2/servo | tests/wpt/web-platform-tests/tools/wptserve/wptserve/handlers.py | 45 | 11370 | import cgi
import json
import os
import traceback
import urllib
import urlparse
from constants import content_types
from pipes import Pipeline, template
from ranges import RangeParser
from request import Authentication
from response import MultipartContent
from utils import HTTPException
__all__ = ["file_handler", "python_script_handler",
"FunctionHandler", "handler", "json_handler",
"as_is_handler", "ErrorHandler", "BasicAuthHandler"]
def guess_content_type(path):
ext = os.path.splitext(path)[1].lstrip(".")
if ext in content_types:
return content_types[ext]
return "application/octet-stream"
def filesystem_path(base_path, request, url_base="/"):
if base_path is None:
base_path = request.doc_root
path = request.url_parts.path
if path.startswith(url_base):
path = path[len(url_base):]
if ".." in path:
raise HTTPException(404)
new_path = os.path.join(base_path, path)
# Otherwise setting path to / allows access outside the root directory
if not new_path.startswith(base_path):
raise HTTPException(404)
return new_path
class DirectoryHandler(object):
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
def __call__(self, request, response):
if not request.url_parts.path.endswith("/"):
raise HTTPException(404)
path = filesystem_path(self.base_path, request, self.url_base)
assert os.path.isdir(path)
response.headers = [("Content-Type", "text/html")]
response.content = """<!doctype html>
<meta name="viewport" content="width=device-width">
<title>Directory listing for %(path)s</title>
<h1>Directory listing for %(path)s</h1>
<ul>
%(items)s
</li>
""" % {"path": cgi.escape(request.url_parts.path),
"items": "\n".join(self.list_items(request, path))}
def list_items(self, request, path):
# TODO: this won't actually list all routes, only the
# ones that correspond to a real filesystem path. It's
# not possible to list every route that will match
# something, but it should be possible to at least list the
# statically defined ones
base_path = request.url_parts.path
if not base_path.endswith("/"):
base_path += "/"
if base_path != "/":
link = urlparse.urljoin(base_path, "..")
yield ("""<li class="dir"><a href="%(link)s">%(name)s</a>""" %
{"link": link, "name": ".."})
for item in sorted(os.listdir(path)):
link = cgi.escape(urllib.quote(item))
if os.path.isdir(os.path.join(path, item)):
link += "/"
class_ = "dir"
else:
class_ = "file"
yield ("""<li class="%(class)s"><a href="%(link)s">%(name)s</a>""" %
{"link": link, "name": cgi.escape(item), "class": class_})
directory_handler = DirectoryHandler()
class FileHandler(object):
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
self.directory_handler = DirectoryHandler(self.base_path)
def __call__(self, request, response):
path = filesystem_path(self.base_path, request, self.url_base)
if os.path.isdir(path):
return self.directory_handler(request, response)
try:
#This is probably racy with some other process trying to change the file
file_size = os.stat(path).st_size
response.headers.update(self.get_headers(request, path))
if "Range" in request.headers:
try:
byte_ranges = RangeParser()(request.headers['Range'], file_size)
except HTTPException as e:
if e.code == 416:
response.headers.set("Content-Range", "bytes */%i" % file_size)
raise
else:
byte_ranges = None
data = self.get_data(response, path, byte_ranges)
response.content = data
query = urlparse.parse_qs(request.url_parts.query)
pipeline = None
if "pipe" in query:
pipeline = Pipeline(query["pipe"][-1])
elif os.path.splitext(path)[0].endswith(".sub"):
pipeline = Pipeline("sub")
if pipeline is not None:
response = pipeline(request, response)
return response
except (OSError, IOError):
raise HTTPException(404)
def get_headers(self, request, path):
rv = self.default_headers(path)
rv.extend(self.load_headers(request, os.path.join(os.path.split(path)[0], "__dir__")))
rv.extend(self.load_headers(request, path))
return rv
def load_headers(self, request, path):
headers_path = path + ".sub.headers"
if os.path.exists(headers_path):
use_sub = True
else:
headers_path = path + ".headers"
use_sub = False
try:
with open(headers_path) as headers_file:
data = headers_file.read()
except IOError:
return []
else:
if use_sub:
data = template(request, data)
return [tuple(item.strip() for item in line.split(":", 1))
for line in data.splitlines() if line]
def get_data(self, response, path, byte_ranges):
with open(path, 'rb') as f:
if byte_ranges is None:
return f.read()
else:
response.status = 206
if len(byte_ranges) > 1:
parts_content_type, content = self.set_response_multipart(response,
byte_ranges,
f)
for byte_range in byte_ranges:
content.append_part(self.get_range_data(f, byte_range),
parts_content_type,
[("Content-Range", byte_range.header_value())])
return content
else:
response.headers.set("Content-Range", byte_ranges[0].header_value())
return self.get_range_data(f, byte_ranges[0])
def set_response_multipart(self, response, ranges, f):
parts_content_type = response.headers.get("Content-Type")
if parts_content_type:
parts_content_type = parts_content_type[-1]
else:
parts_content_type = None
content = MultipartContent()
response.headers.set("Content-Type", "multipart/byteranges; boundary=%s" % content.boundary)
return parts_content_type, content
def get_range_data(self, f, byte_range):
f.seek(byte_range.lower)
return f.read(byte_range.upper - byte_range.lower)
def default_headers(self, path):
return [("Content-Type", guess_content_type(path))]
file_handler = FileHandler()
class PythonScriptHandler(object):
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
def __call__(self, request, response):
path = filesystem_path(self.base_path, request, self.url_base)
try:
environ = {"__file__": path}
execfile(path, environ, environ)
if "main" in environ:
handler = FunctionHandler(environ["main"])
handler(request, response)
else:
raise HTTPException(500, "No main function in script %s" % path)
except IOError:
raise HTTPException(404)
python_script_handler = PythonScriptHandler()
class FunctionHandler(object):
def __init__(self, func):
self.func = func
def __call__(self, request, response):
try:
rv = self.func(request, response)
except Exception:
msg = traceback.format_exc()
raise HTTPException(500, message=msg)
if rv is not None:
if isinstance(rv, tuple):
if len(rv) == 3:
status, headers, content = rv
response.status = status
elif len(rv) == 2:
headers, content = rv
else:
raise HTTPException(500)
response.headers.update(headers)
else:
content = rv
response.content = content
#The generic name here is so that this can be used as a decorator
def handler(func):
return FunctionHandler(func)
class JsonHandler(object):
def __init__(self, func):
self.func = func
def __call__(self, request, response):
return FunctionHandler(self.handle_request)(request, response)
def handle_request(self, request, response):
rv = self.func(request, response)
response.headers.set("Content-Type", "application/json")
enc = json.dumps
if isinstance(rv, tuple):
rv = list(rv)
value = tuple(rv[:-1] + [enc(rv[-1])])
length = len(value[-1])
else:
value = enc(rv)
length = len(value)
response.headers.set("Content-Length", length)
return value
def json_handler(func):
return JsonHandler(func)
class AsIsHandler(object):
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
def __call__(self, request, response):
path = filesystem_path(self.base_path, request, self.url_base)
try:
with open(path) as f:
response.writer.write_content(f.read())
response.close_connection = True
except IOError:
raise HTTPException(404)
as_is_handler = AsIsHandler()
class BasicAuthHandler(object):
def __init__(self, handler, user, password):
"""
A Basic Auth handler
:Args:
- handler: a secondary handler for the request after authentication is successful (example file_handler)
- user: string of the valid user name or None if any / all credentials are allowed
- password: string of the password required
"""
self.user = user
self.password = password
self.handler = handler
def __call__(self, request, response):
if "authorization" not in request.headers:
response.status = 401
response.headers.set("WWW-Authenticate", "Basic")
return response
else:
auth = Authentication(request.headers)
if self.user is not None and (self.user != auth.username or self.password != auth.password):
response.set_error(403, "Invalid username or password")
return response
return self.handler(request, response)
basic_auth_handler = BasicAuthHandler(file_handler, None, None)
class ErrorHandler(object):
def __init__(self, status):
self.status = status
def __call__(self, request, response):
response.set_error(self.status)
| mpl-2.0 |
eul-721/The-Perfect-Pokemon-Team-Balancer | libs/env/Lib/site-packages/pip/_vendor/html5lib/ihatexml.py | 1727 | 16581 | from __future__ import absolute_import, division, unicode_literals
import re
import warnings
from .constants import DataLossWarning
baseChar = """
[#x0041-#x005A] | [#x0061-#x007A] | [#x00C0-#x00D6] | [#x00D8-#x00F6] |
[#x00F8-#x00FF] | [#x0100-#x0131] | [#x0134-#x013E] | [#x0141-#x0148] |
[#x014A-#x017E] | [#x0180-#x01C3] | [#x01CD-#x01F0] | [#x01F4-#x01F5] |
[#x01FA-#x0217] | [#x0250-#x02A8] | [#x02BB-#x02C1] | #x0386 |
[#x0388-#x038A] | #x038C | [#x038E-#x03A1] | [#x03A3-#x03CE] |
[#x03D0-#x03D6] | #x03DA | #x03DC | #x03DE | #x03E0 | [#x03E2-#x03F3] |
[#x0401-#x040C] | [#x040E-#x044F] | [#x0451-#x045C] | [#x045E-#x0481] |
[#x0490-#x04C4] | [#x04C7-#x04C8] | [#x04CB-#x04CC] | [#x04D0-#x04EB] |
[#x04EE-#x04F5] | [#x04F8-#x04F9] | [#x0531-#x0556] | #x0559 |
[#x0561-#x0586] | [#x05D0-#x05EA] | [#x05F0-#x05F2] | [#x0621-#x063A] |
[#x0641-#x064A] | [#x0671-#x06B7] | [#x06BA-#x06BE] | [#x06C0-#x06CE] |
[#x06D0-#x06D3] | #x06D5 | [#x06E5-#x06E6] | [#x0905-#x0939] | #x093D |
[#x0958-#x0961] | [#x0985-#x098C] | [#x098F-#x0990] | [#x0993-#x09A8] |
[#x09AA-#x09B0] | #x09B2 | [#x09B6-#x09B9] | [#x09DC-#x09DD] |
[#x09DF-#x09E1] | [#x09F0-#x09F1] | [#x0A05-#x0A0A] | [#x0A0F-#x0A10] |
[#x0A13-#x0A28] | [#x0A2A-#x0A30] | [#x0A32-#x0A33] | [#x0A35-#x0A36] |
[#x0A38-#x0A39] | [#x0A59-#x0A5C] | #x0A5E | [#x0A72-#x0A74] |
[#x0A85-#x0A8B] | #x0A8D | [#x0A8F-#x0A91] | [#x0A93-#x0AA8] |
[#x0AAA-#x0AB0] | [#x0AB2-#x0AB3] | [#x0AB5-#x0AB9] | #x0ABD | #x0AE0 |
[#x0B05-#x0B0C] | [#x0B0F-#x0B10] | [#x0B13-#x0B28] | [#x0B2A-#x0B30] |
[#x0B32-#x0B33] | [#x0B36-#x0B39] | #x0B3D | [#x0B5C-#x0B5D] |
[#x0B5F-#x0B61] | [#x0B85-#x0B8A] | [#x0B8E-#x0B90] | [#x0B92-#x0B95] |
[#x0B99-#x0B9A] | #x0B9C | [#x0B9E-#x0B9F] | [#x0BA3-#x0BA4] |
[#x0BA8-#x0BAA] | [#x0BAE-#x0BB5] | [#x0BB7-#x0BB9] | [#x0C05-#x0C0C] |
[#x0C0E-#x0C10] | [#x0C12-#x0C28] | [#x0C2A-#x0C33] | [#x0C35-#x0C39] |
[#x0C60-#x0C61] | [#x0C85-#x0C8C] | [#x0C8E-#x0C90] | [#x0C92-#x0CA8] |
[#x0CAA-#x0CB3] | [#x0CB5-#x0CB9] | #x0CDE | [#x0CE0-#x0CE1] |
[#x0D05-#x0D0C] | [#x0D0E-#x0D10] | [#x0D12-#x0D28] | [#x0D2A-#x0D39] |
[#x0D60-#x0D61] | [#x0E01-#x0E2E] | #x0E30 | [#x0E32-#x0E33] |
[#x0E40-#x0E45] | [#x0E81-#x0E82] | #x0E84 | [#x0E87-#x0E88] | #x0E8A |
#x0E8D | [#x0E94-#x0E97] | [#x0E99-#x0E9F] | [#x0EA1-#x0EA3] | #x0EA5 |
#x0EA7 | [#x0EAA-#x0EAB] | [#x0EAD-#x0EAE] | #x0EB0 | [#x0EB2-#x0EB3] |
#x0EBD | [#x0EC0-#x0EC4] | [#x0F40-#x0F47] | [#x0F49-#x0F69] |
[#x10A0-#x10C5] | [#x10D0-#x10F6] | #x1100 | [#x1102-#x1103] |
[#x1105-#x1107] | #x1109 | [#x110B-#x110C] | [#x110E-#x1112] | #x113C |
#x113E | #x1140 | #x114C | #x114E | #x1150 | [#x1154-#x1155] | #x1159 |
[#x115F-#x1161] | #x1163 | #x1165 | #x1167 | #x1169 | [#x116D-#x116E] |
[#x1172-#x1173] | #x1175 | #x119E | #x11A8 | #x11AB | [#x11AE-#x11AF] |
[#x11B7-#x11B8] | #x11BA | [#x11BC-#x11C2] | #x11EB | #x11F0 | #x11F9 |
[#x1E00-#x1E9B] | [#x1EA0-#x1EF9] | [#x1F00-#x1F15] | [#x1F18-#x1F1D] |
[#x1F20-#x1F45] | [#x1F48-#x1F4D] | [#x1F50-#x1F57] | #x1F59 | #x1F5B |
#x1F5D | [#x1F5F-#x1F7D] | [#x1F80-#x1FB4] | [#x1FB6-#x1FBC] | #x1FBE |
[#x1FC2-#x1FC4] | [#x1FC6-#x1FCC] | [#x1FD0-#x1FD3] | [#x1FD6-#x1FDB] |
[#x1FE0-#x1FEC] | [#x1FF2-#x1FF4] | [#x1FF6-#x1FFC] | #x2126 |
[#x212A-#x212B] | #x212E | [#x2180-#x2182] | [#x3041-#x3094] |
[#x30A1-#x30FA] | [#x3105-#x312C] | [#xAC00-#xD7A3]"""
ideographic = """[#x4E00-#x9FA5] | #x3007 | [#x3021-#x3029]"""
combiningCharacter = """
[#x0300-#x0345] | [#x0360-#x0361] | [#x0483-#x0486] | [#x0591-#x05A1] |
[#x05A3-#x05B9] | [#x05BB-#x05BD] | #x05BF | [#x05C1-#x05C2] | #x05C4 |
[#x064B-#x0652] | #x0670 | [#x06D6-#x06DC] | [#x06DD-#x06DF] |
[#x06E0-#x06E4] | [#x06E7-#x06E8] | [#x06EA-#x06ED] | [#x0901-#x0903] |
#x093C | [#x093E-#x094C] | #x094D | [#x0951-#x0954] | [#x0962-#x0963] |
[#x0981-#x0983] | #x09BC | #x09BE | #x09BF | [#x09C0-#x09C4] |
[#x09C7-#x09C8] | [#x09CB-#x09CD] | #x09D7 | [#x09E2-#x09E3] | #x0A02 |
#x0A3C | #x0A3E | #x0A3F | [#x0A40-#x0A42] | [#x0A47-#x0A48] |
[#x0A4B-#x0A4D] | [#x0A70-#x0A71] | [#x0A81-#x0A83] | #x0ABC |
[#x0ABE-#x0AC5] | [#x0AC7-#x0AC9] | [#x0ACB-#x0ACD] | [#x0B01-#x0B03] |
#x0B3C | [#x0B3E-#x0B43] | [#x0B47-#x0B48] | [#x0B4B-#x0B4D] |
[#x0B56-#x0B57] | [#x0B82-#x0B83] | [#x0BBE-#x0BC2] | [#x0BC6-#x0BC8] |
[#x0BCA-#x0BCD] | #x0BD7 | [#x0C01-#x0C03] | [#x0C3E-#x0C44] |
[#x0C46-#x0C48] | [#x0C4A-#x0C4D] | [#x0C55-#x0C56] | [#x0C82-#x0C83] |
[#x0CBE-#x0CC4] | [#x0CC6-#x0CC8] | [#x0CCA-#x0CCD] | [#x0CD5-#x0CD6] |
[#x0D02-#x0D03] | [#x0D3E-#x0D43] | [#x0D46-#x0D48] | [#x0D4A-#x0D4D] |
#x0D57 | #x0E31 | [#x0E34-#x0E3A] | [#x0E47-#x0E4E] | #x0EB1 |
[#x0EB4-#x0EB9] | [#x0EBB-#x0EBC] | [#x0EC8-#x0ECD] | [#x0F18-#x0F19] |
#x0F35 | #x0F37 | #x0F39 | #x0F3E | #x0F3F | [#x0F71-#x0F84] |
[#x0F86-#x0F8B] | [#x0F90-#x0F95] | #x0F97 | [#x0F99-#x0FAD] |
[#x0FB1-#x0FB7] | #x0FB9 | [#x20D0-#x20DC] | #x20E1 | [#x302A-#x302F] |
#x3099 | #x309A"""
digit = """
[#x0030-#x0039] | [#x0660-#x0669] | [#x06F0-#x06F9] | [#x0966-#x096F] |
[#x09E6-#x09EF] | [#x0A66-#x0A6F] | [#x0AE6-#x0AEF] | [#x0B66-#x0B6F] |
[#x0BE7-#x0BEF] | [#x0C66-#x0C6F] | [#x0CE6-#x0CEF] | [#x0D66-#x0D6F] |
[#x0E50-#x0E59] | [#x0ED0-#x0ED9] | [#x0F20-#x0F29]"""
extender = """
#x00B7 | #x02D0 | #x02D1 | #x0387 | #x0640 | #x0E46 | #x0EC6 | #x3005 |
#[#x3031-#x3035] | [#x309D-#x309E] | [#x30FC-#x30FE]"""
letter = " | ".join([baseChar, ideographic])
# Without the
name = " | ".join([letter, digit, ".", "-", "_", combiningCharacter,
extender])
nameFirst = " | ".join([letter, "_"])
reChar = re.compile(r"#x([\d|A-F]{4,4})")
reCharRange = re.compile(r"\[#x([\d|A-F]{4,4})-#x([\d|A-F]{4,4})\]")
def charStringToList(chars):
charRanges = [item.strip() for item in chars.split(" | ")]
rv = []
for item in charRanges:
foundMatch = False
for regexp in (reChar, reCharRange):
match = regexp.match(item)
if match is not None:
rv.append([hexToInt(item) for item in match.groups()])
if len(rv[-1]) == 1:
rv[-1] = rv[-1] * 2
foundMatch = True
break
if not foundMatch:
assert len(item) == 1
rv.append([ord(item)] * 2)
rv = normaliseCharList(rv)
return rv
def normaliseCharList(charList):
charList = sorted(charList)
for item in charList:
assert item[1] >= item[0]
rv = []
i = 0
while i < len(charList):
j = 1
rv.append(charList[i])
while i + j < len(charList) and charList[i + j][0] <= rv[-1][1] + 1:
rv[-1][1] = charList[i + j][1]
j += 1
i += j
return rv
# We don't really support characters above the BMP :(
max_unicode = int("FFFF", 16)
def missingRanges(charList):
rv = []
if charList[0] != 0:
rv.append([0, charList[0][0] - 1])
for i, item in enumerate(charList[:-1]):
rv.append([item[1] + 1, charList[i + 1][0] - 1])
if charList[-1][1] != max_unicode:
rv.append([charList[-1][1] + 1, max_unicode])
return rv
def listToRegexpStr(charList):
rv = []
for item in charList:
if item[0] == item[1]:
rv.append(escapeRegexp(chr(item[0])))
else:
rv.append(escapeRegexp(chr(item[0])) + "-" +
escapeRegexp(chr(item[1])))
return "[%s]" % "".join(rv)
def hexToInt(hex_str):
return int(hex_str, 16)
def escapeRegexp(string):
specialCharacters = (".", "^", "$", "*", "+", "?", "{", "}",
"[", "]", "|", "(", ")", "-")
for char in specialCharacters:
string = string.replace(char, "\\" + char)
return string
# output from the above
nonXmlNameBMPRegexp = re.compile('[\x00-,/:-@\\[-\\^`\\{-\xb6\xb8-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u02cf\u02d2-\u02ff\u0346-\u035f\u0362-\u0385\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482\u0487-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u0590\u05a2\u05ba\u05be\u05c0\u05c3\u05c5-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u063f\u0653-\u065f\u066a-\u066f\u06b8-\u06b9\u06bf\u06cf\u06d4\u06e9\u06ee-\u06ef\u06fa-\u0900\u0904\u093a-\u093b\u094e-\u0950\u0955-\u0957\u0964-\u0965\u0970-\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09bd\u09c5-\u09c6\u09c9-\u09ca\u09ce-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09f2-\u0a01\u0a03-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a58\u0a5d\u0a5f-\u0a65\u0a75-\u0a80\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0adf\u0ae1-\u0ae5\u0af0-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3b\u0b44-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b62-\u0b65\u0b70-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bd6\u0bd8-\u0be6\u0bf0-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3d\u0c45\u0c49\u0c4e-\u0c54\u0c57-\u0c5f\u0c62-\u0c65\u0c70-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbd\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce2-\u0ce5\u0cf0-\u0d01\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d3d\u0d44-\u0d45\u0d49\u0d4e-\u0d56\u0d58-\u0d5f\u0d62-\u0d65\u0d70-\u0e00\u0e2f\u0e3b-\u0e3f\u0e4f\u0e5a-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0f17\u0f1a-\u0f1f\u0f2a-\u0f34\u0f36\u0f38\u0f3a-\u0f3d\u0f48\u0f6a-\u0f70\u0f85\u0f8c-\u0f8f\u0f96\u0f98\u0fae-\u0fb0\u0fb8\u0fba-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u20cf\u20dd-\u20e0\u20e2-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3004\u3006\u3008-\u3020\u3030\u3036-\u3040\u3095-\u3098\u309b-\u309c\u309f-\u30a0\u30fb\u30ff-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')
nonXmlNameFirstBMPRegexp = re.compile('[\x00-@\\[-\\^`\\{-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u0385\u0387\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u0640\u064b-\u0670\u06b8-\u06b9\u06bf\u06cf\u06d4\u06d6-\u06e4\u06e7-\u0904\u093a-\u093c\u093e-\u0957\u0962-\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09db\u09de\u09e2-\u09ef\u09f2-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a58\u0a5d\u0a5f-\u0a71\u0a75-\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abc\u0abe-\u0adf\u0ae1-\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3c\u0b3e-\u0b5b\u0b5e\u0b62-\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c5f\u0c62-\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cdd\u0cdf\u0ce2-\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d5f\u0d62-\u0e00\u0e2f\u0e31\u0e34-\u0e3f\u0e46-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eb1\u0eb4-\u0ebc\u0ebe-\u0ebf\u0ec5-\u0f3f\u0f48\u0f6a-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3006\u3008-\u3020\u302a-\u3040\u3095-\u30a0\u30fb-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')
# Simpler things
nonPubidCharRegexp = re.compile("[^\x20\x0D\x0Aa-zA-Z0-9\-\'()+,./:=?;!*#@$_%]")
class InfosetFilter(object):
replacementRegexp = re.compile(r"U[\dA-F]{5,5}")
def __init__(self, replaceChars=None,
dropXmlnsLocalName=False,
dropXmlnsAttrNs=False,
preventDoubleDashComments=False,
preventDashAtCommentEnd=False,
replaceFormFeedCharacters=True,
preventSingleQuotePubid=False):
self.dropXmlnsLocalName = dropXmlnsLocalName
self.dropXmlnsAttrNs = dropXmlnsAttrNs
self.preventDoubleDashComments = preventDoubleDashComments
self.preventDashAtCommentEnd = preventDashAtCommentEnd
self.replaceFormFeedCharacters = replaceFormFeedCharacters
self.preventSingleQuotePubid = preventSingleQuotePubid
self.replaceCache = {}
def coerceAttribute(self, name, namespace=None):
if self.dropXmlnsLocalName and name.startswith("xmlns:"):
warnings.warn("Attributes cannot begin with xmlns", DataLossWarning)
return None
elif (self.dropXmlnsAttrNs and
namespace == "http://www.w3.org/2000/xmlns/"):
warnings.warn("Attributes cannot be in the xml namespace", DataLossWarning)
return None
else:
return self.toXmlName(name)
def coerceElement(self, name, namespace=None):
return self.toXmlName(name)
def coerceComment(self, data):
if self.preventDoubleDashComments:
while "--" in data:
warnings.warn("Comments cannot contain adjacent dashes", DataLossWarning)
data = data.replace("--", "- -")
return data
def coerceCharacters(self, data):
if self.replaceFormFeedCharacters:
for i in range(data.count("\x0C")):
warnings.warn("Text cannot contain U+000C", DataLossWarning)
data = data.replace("\x0C", " ")
# Other non-xml characters
return data
def coercePubid(self, data):
dataOutput = data
for char in nonPubidCharRegexp.findall(data):
warnings.warn("Coercing non-XML pubid", DataLossWarning)
replacement = self.getReplacementCharacter(char)
dataOutput = dataOutput.replace(char, replacement)
if self.preventSingleQuotePubid and dataOutput.find("'") >= 0:
warnings.warn("Pubid cannot contain single quote", DataLossWarning)
dataOutput = dataOutput.replace("'", self.getReplacementCharacter("'"))
return dataOutput
def toXmlName(self, name):
nameFirst = name[0]
nameRest = name[1:]
m = nonXmlNameFirstBMPRegexp.match(nameFirst)
if m:
warnings.warn("Coercing non-XML name", DataLossWarning)
nameFirstOutput = self.getReplacementCharacter(nameFirst)
else:
nameFirstOutput = nameFirst
nameRestOutput = nameRest
replaceChars = set(nonXmlNameBMPRegexp.findall(nameRest))
for char in replaceChars:
warnings.warn("Coercing non-XML name", DataLossWarning)
replacement = self.getReplacementCharacter(char)
nameRestOutput = nameRestOutput.replace(char, replacement)
return nameFirstOutput + nameRestOutput
def getReplacementCharacter(self, char):
if char in self.replaceCache:
replacement = self.replaceCache[char]
else:
replacement = self.escapeChar(char)
return replacement
def fromXmlName(self, name):
for item in set(self.replacementRegexp.findall(name)):
name = name.replace(item, self.unescapeChar(item))
return name
def escapeChar(self, char):
replacement = "U%05X" % ord(char)
self.replaceCache[char] = replacement
return replacement
def unescapeChar(self, charcode):
return chr(int(charcode[1:], 16))
| gpl-2.0 |
leoc/home-assistant | homeassistant/components/sensor/fastdotcom.py | 6 | 3899 | """
Support for Fast.com internet speed testing sensor.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.fastdotcom/
"""
import logging
import voluptuous as vol
import homeassistant.util.dt as dt_util
import homeassistant.helpers.config_validation as cv
from homeassistant.components import recorder
from homeassistant.components.sensor import (DOMAIN, PLATFORM_SCHEMA)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import track_time_change
REQUIREMENTS = ['fastdotcom==0.0.1']
_LOGGER = logging.getLogger(__name__)
CONF_SECOND = 'second'
CONF_MINUTE = 'minute'
CONF_HOUR = 'hour'
CONF_DAY = 'day'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_SECOND, default=[0]):
vol.All(cv.ensure_list, [vol.All(vol.Coerce(int), vol.Range(0, 59))]),
vol.Optional(CONF_MINUTE, default=[0]):
vol.All(cv.ensure_list, [vol.All(vol.Coerce(int), vol.Range(0, 59))]),
vol.Optional(CONF_HOUR):
vol.All(cv.ensure_list, [vol.All(vol.Coerce(int), vol.Range(0, 23))]),
vol.Optional(CONF_DAY):
vol.All(cv.ensure_list, [vol.All(vol.Coerce(int), vol.Range(1, 31))]),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Fast.com sensor."""
data = SpeedtestData(hass, config)
sensor = SpeedtestSensor(data)
add_devices([sensor])
def update(call=None):
"""Update service for manual updates."""
data.update(dt_util.now())
sensor.update()
hass.services.register(DOMAIN, 'update_fastdotcom', update)
# pylint: disable=too-few-public-methods
class SpeedtestSensor(Entity):
"""Implementation of a FAst.com sensor."""
def __init__(self, speedtest_data):
"""Initialize the sensor."""
self._name = 'Fast.com Download'
self.speedtest_client = speedtest_data
self._state = None
self._unit_of_measurement = 'Mbit/s'
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
def update(self):
"""Get the latest data and update the states."""
data = self.speedtest_client.data
if data is None:
entity_id = 'sensor.fastcom_speedtest'
states = recorder.get_model('States')
try:
last_state = recorder.execute(
recorder.query('States').filter(
(states.entity_id == entity_id) &
(states.last_changed == states.last_updated) &
(states.state != 'unknown')
).order_by(states.state_id.desc()).limit(1))
except TypeError:
return
except RuntimeError:
return
if not last_state:
return
self._state = last_state[0].state
else:
self._state = data['download']
class SpeedtestData(object):
"""Get the latest data from fast.com."""
def __init__(self, hass, config):
"""Initialize the data object."""
self.data = None
track_time_change(hass, self.update,
second=config.get(CONF_SECOND),
minute=config.get(CONF_MINUTE),
hour=config.get(CONF_HOUR),
day=config.get(CONF_DAY))
def update(self, now):
"""Get the latest data from fast.com."""
from fastdotcom import fast_com
_LOGGER.info('Executing fast.com speedtest')
self.data = {'download': fast_com()}
| mit |
jantman/GitPython | git/test/test_fun.py | 3 | 9277 | from git.test.lib import *
from git.objects.fun import (
traverse_tree_recursive,
traverse_trees_recursive,
tree_to_stream
)
from git.index.fun import (
aggressive_tree_merge
)
from gitdb.util import bin_to_hex
from gitdb.base import IStream
from gitdb.typ import str_tree_type
from stat import (
S_IFDIR,
S_IFREG,
S_IFLNK
)
from git.index import IndexFile
from cStringIO import StringIO
class TestFun(TestBase):
def _assert_index_entries(self, entries, trees):
index = IndexFile.from_tree(self.rorepo, *[self.rorepo.tree(bin_to_hex(t)) for t in trees])
assert entries
assert len(index.entries) == len(entries)
for entry in entries:
assert (entry.path, entry.stage) in index.entries
# END assert entry matches fully
def test_aggressive_tree_merge(self):
# head tree with additions, removals and modification compared to its predecessor
odb = self.rorepo.odb
HC = self.rorepo.commit("6c1faef799095f3990e9970bc2cb10aa0221cf9c")
H = HC.tree
B = HC.parents[0].tree
# entries from single tree
trees = [H.binsha]
self._assert_index_entries(aggressive_tree_merge(odb, trees), trees)
# from multiple trees
trees = [B.binsha, H.binsha]
self._assert_index_entries(aggressive_tree_merge(odb, trees), trees)
# three way, no conflict
tree = self.rorepo.tree
B = tree("35a09c0534e89b2d43ec4101a5fb54576b577905")
H = tree("4fe5cfa0e063a8d51a1eb6f014e2aaa994e5e7d4")
M = tree("1f2b19de3301e76ab3a6187a49c9c93ff78bafbd")
trees = [B.binsha, H.binsha, M.binsha]
self._assert_index_entries(aggressive_tree_merge(odb, trees), trees)
# three-way, conflict in at least one file, both modified
B = tree("a7a4388eeaa4b6b94192dce67257a34c4a6cbd26")
H = tree("f9cec00938d9059882bb8eabdaf2f775943e00e5")
M = tree("44a601a068f4f543f73fd9c49e264c931b1e1652")
trees = [B.binsha, H.binsha, M.binsha]
self._assert_index_entries(aggressive_tree_merge(odb, trees), trees)
# too many trees
self.failUnlessRaises(ValueError, aggressive_tree_merge, odb, trees*2)
def mktree(self, odb, entries):
"""create a tree from the given tree entries and safe it to the database"""
sio = StringIO()
tree_to_stream(entries, sio.write)
sio.seek(0)
istream = odb.store(IStream(str_tree_type, len(sio.getvalue()), sio))
return istream.binsha
@with_rw_repo('0.1.6')
def test_three_way_merge(self, rwrepo):
def mkfile(name, sha, executable=0):
return (sha, S_IFREG | 0644 | executable*0111, name)
def mkcommit(name, sha):
return (sha, S_IFDIR | S_IFLNK, name)
def assert_entries(entries, num_entries, has_conflict=False):
assert len(entries) == num_entries
assert has_conflict == (len([e for e in entries if e.stage != 0]) > 0)
mktree = self.mktree
shaa = "\1"*20
shab = "\2"*20
shac = "\3"*20
odb = rwrepo.odb
# base tree
bfn = 'basefile'
fbase = mkfile(bfn, shaa)
tb = mktree(odb, [fbase])
# non-conflicting new files, same data
fa = mkfile('1', shab)
th = mktree(odb, [fbase, fa])
fb = mkfile('2', shac)
tm = mktree(odb, [fbase, fb])
# two new files, same base file
trees = [tb, th, tm]
assert_entries(aggressive_tree_merge(odb, trees), 3)
# both delete same file, add own one
fa = mkfile('1', shab)
th = mktree(odb, [fa])
fb = mkfile('2', shac)
tm = mktree(odb, [fb])
# two new files
trees = [tb, th, tm]
assert_entries(aggressive_tree_merge(odb, trees), 2)
# same file added in both, differently
fa = mkfile('1', shab)
th = mktree(odb, [fa])
fb = mkfile('1', shac)
tm = mktree(odb, [fb])
# expect conflict
trees = [tb, th, tm]
assert_entries(aggressive_tree_merge(odb, trees), 2, True)
# same file added, different mode
fa = mkfile('1', shab)
th = mktree(odb, [fa])
fb = mkcommit('1', shab)
tm = mktree(odb, [fb])
# expect conflict
trees = [tb, th, tm]
assert_entries(aggressive_tree_merge(odb, trees), 2, True)
# same file added in both
fa = mkfile('1', shab)
th = mktree(odb, [fa])
fb = mkfile('1', shab)
tm = mktree(odb, [fb])
# expect conflict
trees = [tb, th, tm]
assert_entries(aggressive_tree_merge(odb, trees), 1)
# modify same base file, differently
fa = mkfile(bfn, shab)
th = mktree(odb, [fa])
fb = mkfile(bfn, shac)
tm = mktree(odb, [fb])
# conflict, 3 versions on 3 stages
trees = [tb, th, tm]
assert_entries(aggressive_tree_merge(odb, trees), 3, True)
# change mode on same base file, by making one a commit, the other executable
# no content change ( this is totally unlikely to happen in the real world )
fa = mkcommit(bfn, shaa)
th = mktree(odb, [fa])
fb = mkfile(bfn, shaa, executable=1)
tm = mktree(odb, [fb])
# conflict, 3 versions on 3 stages, because of different mode
trees = [tb, th, tm]
assert_entries(aggressive_tree_merge(odb, trees), 3, True)
for is_them in range(2):
# only we/they change contents
fa = mkfile(bfn, shab)
th = mktree(odb, [fa])
trees = [tb, th, tb]
if is_them:
trees = [tb, tb, th]
entries = aggressive_tree_merge(odb, trees)
assert len(entries) == 1 and entries[0].binsha == shab
# only we/they change the mode
fa = mkcommit(bfn, shaa)
th = mktree(odb, [fa])
trees = [tb, th, tb]
if is_them:
trees = [tb, tb, th]
entries = aggressive_tree_merge(odb, trees)
assert len(entries) == 1 and entries[0].binsha == shaa and entries[0].mode == fa[1]
# one side deletes, the other changes = conflict
fa = mkfile(bfn, shab)
th = mktree(odb, [fa])
tm = mktree(odb, [])
trees = [tb, th, tm]
if is_them:
trees = [tb, tm, th]
# as one is deleted, there are only 2 entries
assert_entries(aggressive_tree_merge(odb, trees), 2, True)
# END handle ours, theirs
def _assert_tree_entries(self, entries, num_trees):
for entry in entries:
assert len(entry) == num_trees
paths = set(e[2] for e in entry if e)
# only one path per set of entries
assert len(paths) == 1
# END verify entry
def test_tree_traversal(self):
# low level tree tarversal
odb = self.rorepo.odb
H = self.rorepo.tree('29eb123beb1c55e5db4aa652d843adccbd09ae18') # head tree
M = self.rorepo.tree('e14e3f143e7260de9581aee27e5a9b2645db72de') # merge tree
B = self.rorepo.tree('f606937a7a21237c866efafcad33675e6539c103') # base tree
B_old = self.rorepo.tree('1f66cfbbce58b4b552b041707a12d437cc5f400a') # old base tree
# two very different trees
entries = traverse_trees_recursive(odb, [B_old.binsha, H.binsha], '')
self._assert_tree_entries(entries, 2)
oentries = traverse_trees_recursive(odb, [H.binsha, B_old.binsha], '')
assert len(oentries) == len(entries)
self._assert_tree_entries(oentries, 2)
# single tree
is_no_tree = lambda i, d: i.type != 'tree'
entries = traverse_trees_recursive(odb, [B.binsha], '')
assert len(entries) == len(list(B.traverse(predicate=is_no_tree)))
self._assert_tree_entries(entries, 1)
# two trees
entries = traverse_trees_recursive(odb, [B.binsha, H.binsha], '')
self._assert_tree_entries(entries, 2)
# tree trees
entries = traverse_trees_recursive(odb, [B.binsha, H.binsha, M.binsha], '')
self._assert_tree_entries(entries, 3)
def test_tree_traversal_single(self):
max_count = 50
count = 0
odb = self.rorepo.odb
for commit in self.rorepo.commit("29eb123beb1c55e5db4aa652d843adccbd09ae18").traverse():
if count >= max_count:
break
count += 1
entries = traverse_tree_recursive(odb, commit.tree.binsha, '')
assert entries
# END for each commit
| bsd-3-clause |
kemalakyol48/python-for-android | python3-alpha/python3-src/Tools/demo/queens.py | 113 | 2270 | #!/usr/bin/env python3
"""
N queens problem.
The (well-known) problem is due to Niklaus Wirth.
This solution is inspired by Dijkstra (Structured Programming). It is
a classic recursive backtracking approach.
"""
N = 8 # Default; command line overrides
class Queens:
def __init__(self, n=N):
self.n = n
self.reset()
def reset(self):
n = self.n
self.y = [None] * n # Where is the queen in column x
self.row = [0] * n # Is row[y] safe?
self.up = [0] * (2*n-1) # Is upward diagonal[x-y] safe?
self.down = [0] * (2*n-1) # Is downward diagonal[x+y] safe?
self.nfound = 0 # Instrumentation
def solve(self, x=0): # Recursive solver
for y in range(self.n):
if self.safe(x, y):
self.place(x, y)
if x+1 == self.n:
self.display()
else:
self.solve(x+1)
self.remove(x, y)
def safe(self, x, y):
return not self.row[y] and not self.up[x-y] and not self.down[x+y]
def place(self, x, y):
self.y[x] = y
self.row[y] = 1
self.up[x-y] = 1
self.down[x+y] = 1
def remove(self, x, y):
self.y[x] = None
self.row[y] = 0
self.up[x-y] = 0
self.down[x+y] = 0
silent = 0 # If true, count solutions only
def display(self):
self.nfound = self.nfound + 1
if self.silent:
return
print('+-' + '--'*self.n + '+')
for y in range(self.n-1, -1, -1):
print('|', end=' ')
for x in range(self.n):
if self.y[x] == y:
print("Q", end=' ')
else:
print(".", end=' ')
print('|')
print('+-' + '--'*self.n + '+')
def main():
import sys
silent = 0
n = N
if sys.argv[1:2] == ['-n']:
silent = 1
del sys.argv[1]
if sys.argv[1:]:
n = int(sys.argv[1])
q = Queens(n)
q.silent = silent
q.solve()
print("Found", q.nfound, "solutions.")
if __name__ == "__main__":
main()
| apache-2.0 |
dcbdmb/example-code | 17-futures/countries/flags2_common.py | 10 | 5281 | """Utilities for second set of flag examples.
"""
import os
import time
import sys
import string
import argparse
from collections import namedtuple
from enum import Enum
Result = namedtuple('Result', 'status data')
HTTPStatus = Enum('Status', 'ok not_found error')
POP20_CC = ('CN IN US ID BR PK NG BD RU JP '
'MX PH VN ET EG DE IR TR CD FR').split()
DEFAULT_CONCUR_REQ = 1
MAX_CONCUR_REQ = 1
SERVERS = {
'REMOTE': 'http://flupy.org/data/flags',
'LOCAL': 'http://localhost:8001/flags',
'DELAY': 'http://localhost:8002/flags',
'ERROR': 'http://localhost:8003/flags',
}
DEFAULT_SERVER = 'LOCAL'
DEST_DIR = 'downloads/'
COUNTRY_CODES_FILE = 'country_codes.txt'
def save_flag(img, filename):
path = os.path.join(DEST_DIR, filename)
with open(path, 'wb') as fp:
fp.write(img)
def initial_report(cc_list, actual_req, server_label):
if len(cc_list) <= 10:
cc_msg = ', '.join(cc_list)
else:
cc_msg = 'from {} to {}'.format(cc_list[0], cc_list[-1])
print('{} site: {}'.format(server_label, SERVERS[server_label]))
msg = 'Searching for {} flag{}: {}'
plural = 's' if len(cc_list) != 1 else ''
print(msg.format(len(cc_list), plural, cc_msg))
plural = 's' if actual_req != 1 else ''
msg = '{} concurrent connection{} will be used.'
print(msg.format(actual_req, plural))
def final_report(cc_list, counter, start_time):
elapsed = time.time() - start_time
print('-' * 20)
msg = '{} flag{} downloaded.'
plural = 's' if counter[HTTPStatus.ok] != 1 else ''
print(msg.format(counter[HTTPStatus.ok], plural))
if counter[HTTPStatus.not_found]:
print(counter[HTTPStatus.not_found], 'not found.')
if counter[HTTPStatus.error]:
plural = 's' if counter[HTTPStatus.error] != 1 else ''
print('{} error{}.'.format(counter[HTTPStatus.error], plural))
print('Elapsed time: {:.2f}s'.format(elapsed))
def expand_cc_args(every_cc, all_cc, cc_args, limit):
codes = set()
A_Z = string.ascii_uppercase
if every_cc:
codes.update(a+b for a in A_Z for b in A_Z)
elif all_cc:
with open(COUNTRY_CODES_FILE) as fp:
text = fp.read()
codes.update(text.split())
else:
for cc in (c.upper() for c in cc_args):
if len(cc) == 1 and cc in A_Z:
codes.update(cc+c for c in A_Z)
elif len(cc) == 2 and all(c in A_Z for c in cc):
codes.add(cc)
else:
msg = 'each CC argument must be A to Z or AA to ZZ.'
raise ValueError('*** Usage error: '+msg)
return sorted(codes)[:limit]
def process_args(default_concur_req):
server_options = ', '.join(sorted(SERVERS))
parser = argparse.ArgumentParser(
description='Download flags for country codes. '
'Default: top 20 countries by population.')
parser.add_argument('cc', metavar='CC', nargs='*',
help='country code or 1st letter (eg. B for BA...BZ)')
parser.add_argument('-a', '--all', action='store_true',
help='get all available flags (AD to ZW)')
parser.add_argument('-e', '--every', action='store_true',
help='get flags for every possible code (AA...ZZ)')
parser.add_argument('-l', '--limit', metavar='N', type=int,
help='limit to N first codes', default=sys.maxsize)
parser.add_argument('-m', '--max_req', metavar='CONCURRENT', type=int,
default=default_concur_req,
help='maximum concurrent requests (default={})'
.format(default_concur_req))
parser.add_argument('-s', '--server', metavar='LABEL',
default=DEFAULT_SERVER,
help='Server to hit; one of {} (default={})'
.format(server_options, DEFAULT_SERVER))
parser.add_argument('-v', '--verbose', action='store_true',
help='output detailed progress info')
args = parser.parse_args()
if args.max_req < 1:
print('*** Usage error: --max_req CONCURRENT must be >= 1')
parser.print_usage()
sys.exit(1)
if args.limit < 1:
print('*** Usage error: --limit N must be >= 1')
parser.print_usage()
sys.exit(1)
args.server = args.server.upper()
if args.server not in SERVERS:
print('*** Usage error: --server LABEL must be one of',
server_options)
parser.print_usage()
sys.exit(1)
try:
cc_list = expand_cc_args(args.every, args.all, args.cc, args.limit)
except ValueError as exc:
print(exc.args[0])
parser.print_usage()
sys.exit(1)
if not cc_list:
cc_list = sorted(POP20_CC)
return args, cc_list
def main(download_many, default_concur_req, max_concur_req):
args, cc_list = process_args(default_concur_req)
actual_req = min(args.max_req, max_concur_req, len(cc_list))
initial_report(cc_list, actual_req, args.server)
base_url = SERVERS[args.server]
t0 = time.time()
counter = download_many(cc_list, base_url, args.verbose, actual_req)
assert sum(counter.values()) == len(cc_list), \
'some downloads are unaccounted for'
final_report(cc_list, counter, t0)
| mit |
davidoren/CuckooSploit | modules/signatures/DetectDEPInHeap.py | 6 | 2274 | import subprocess
import logging
import json
from lib.cuckoo.common.abstracts import Signature
from subprocess import Popen, PIPE
log = logging.getLogger(__name__)
class DetectDEPInHeap(Signature):
name = "Detect DEP bypass in the heap, probably by ROP"
description = "Detects DEP bypass in the heap"
severity = 2
categories = ["exploit", "rop"]
authors = ["Ilana Marcus"]
minimum = "1.2"
evented = True
enabled = True
MEM_PRIVATE = 0x20000
READ_WRITE_EXECUTE = 0x40
def __init__(self, caller):
Signature.__init__(self, caller)
filter_categories = set(["process"])
self.has_dep_bypass = False
self.api_call_functions = set()
self.hits = []
def on_call(self, call, pid):
if call["api"] in ["NtProtectVirtualMemory", "VirtualProtectEx", "VirtualAllocEx"]:
if int(self.get_argument(call, "VirtQueryType")) == self.MEM_PRIVATE and \
int(self.get_argument(call, "protection").replace("0x", ""), 16) == self.READ_WRITE_EXECUTE:
self.has_dep_bypass = True
self.api_call_functions.add(call["api"])
address = self.get_argument(call, "BaseAddress")
old_protection = int(self.get_argument(call, "old_protection").replace("0x", ""), 16)
formatted_old_protection = format(old_protection, '02x')
new_protection = int(self.get_argument(call, "protection").replace("0x", ""), 16)
formatted_new_protection = format(new_protection, '02x')
match_data = {"address": address, "old_protection": formatted_old_protection, "new_protection":formatted_new_protection}
#self.add_match(None, "api", match_data)
self.hits.append(match_data)
def on_complete(self):
if self.has_dep_bypass:
self.description += ": {0}".format(self.api_call_functions)
for hit in self.hits:
self.description += " [address: {0}, old protection: {1}, new protection {2}] ".format(hit["address"], hit["old_protection"], hit["new_protection"])
return True
return False
| gpl-3.0 |
milad-soufastai/ansible-modules-extras | cloud/vmware/vmware_target_canonical_facts.py | 75 | 2841 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: vmware_target_canonical_facts
short_description: Return canonical (NAA) from an ESXi host
description:
- Return canonical (NAA) from an ESXi host based on SCSI target ID
version_added: "2.0"
author: Joseph Callen
notes:
requirements:
- Tested on vSphere 5.5
- PyVmomi installed
options:
target_id:
description:
- The target id based on order of scsi device
required: True
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
# Example vmware_target_canonical_facts command from Ansible Playbooks
- name: Get Canonical name
local_action: >
vmware_target_canonical_facts
hostname="{{ ansible_ssh_host }}" username=root password=vmware
target_id=7
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
def find_hostsystem(content):
host_system = get_all_objs(content, [vim.HostSystem])
for host in host_system:
return host
return None
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(target_id=dict(required=True, type='int')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
content = connect_to_api(module)
host = find_hostsystem(content)
target_lun_uuid = {}
scsilun_canonical = {}
# Associate the scsiLun key with the canonicalName (NAA)
for scsilun in host.config.storageDevice.scsiLun:
scsilun_canonical[scsilun.key] = scsilun.canonicalName
# Associate target number with LUN uuid
for target in host.config.storageDevice.scsiTopology.adapter[0].target:
for lun in target.lun:
target_lun_uuid[target.target] = lun.scsiLun
module.exit_json(changed=False, canonical=scsilun_canonical[target_lun_uuid[module.params['target_id']]])
from ansible.module_utils.basic import *
from ansible.module_utils.vmware import *
if __name__ == '__main__':
main()
| gpl-3.0 |
jmcorgan/bitcoin | test/functional/feature_uacomment.py | 10 | 1697 | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the -uacomment option."""
import re
from test_framework.test_framework import BitcoinTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import assert_equal
class UacommentTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
self.log.info("test multiple -uacomment")
test_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-12:-1]
assert_equal(test_uacomment, "(testnode0)")
self.restart_node(0, ["-uacomment=foo"])
foo_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-17:-1]
assert_equal(foo_uacomment, "(testnode0; foo)")
self.log.info("test -uacomment max length")
self.stop_node(0)
expected = "Error: Total length of network version string \([0-9]+\) exceeds maximum length \(256\). Reduce the number or size of uacomments."
self.nodes[0].assert_start_raises_init_error(["-uacomment=" + 'a' * 256], expected, match=ErrorMatch.FULL_REGEX)
self.log.info("test -uacomment unsafe characters")
for unsafe_char in ['/', ':', '(', ')']:
expected = "Error: User Agent comment \(" + re.escape(unsafe_char) + "\) contains unsafe characters."
self.nodes[0].assert_start_raises_init_error(["-uacomment=" + unsafe_char], expected, match=ErrorMatch.FULL_REGEX)
if __name__ == '__main__':
UacommentTest().main()
| mit |
dikujepsen/OpenTran | v2.0/framework/lan/lan_ast.py | 1 | 14257 | import sys
class Node(object):
def children(self):
""" A sequence of all children that are Nodes
"""
pass
def show(self, buf=sys.stdout, offset=0, _my_node_name=None):
""" Pretty print the Node and all its attributes and
children (recursively) to a buffer.
:param buf:
Open IO buffer into which the Node is printed.
:param offset:
Initial offset (amount of leading spaces)
:param _my_node_name:
name of node
"""
lead = ' ' * offset
if _my_node_name is not None:
buf.write(lead + self.__class__.__name__ + ' <' + _my_node_name + '>: ')
else:
buf.write(lead + self.__class__.__name__ + ' <top>: ')
# if self.__class__ == Number:
# print ": " + self.value
nvlist = [(n, getattr(self, n)) for n in self.attr_names]
attrstr = ', '.join('%s=%s' % nv for nv in nvlist)
buf.write(attrstr)
buf.write('\n')
for (child_name, child) in self.children():
## print child
child.show(
buf,
offset=offset + 2,
_my_node_name=child_name)
class NodeVisitor(object):
current_parent = None
def __init__(self):
self.current_child = None
def visit(self, node):
""" Visit a node.
"""
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
retval = visitor(node)
return retval
def generic_visit(self, node):
""" Called if no explicit visitor function exists for a
node. Implements preorder visiting of the node.
"""
oldparent = NodeVisitor.current_parent
NodeVisitor.current_parent = node
for c_name, c in node.children():
self.current_child = c_name
self.visit(c)
NodeVisitor.current_parent = oldparent
class FileAST(Node):
def __init__(self, ext, coord=None):
self.ext = ext
self.coord = coord
def __repr__(self):
return "FileAST(%r)" % (self.ext)
def children(self):
nodelist = []
for i, child in enumerate(self.ext or []):
nodelist.append(("ext[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class Comment(Node):
def __init__(self, value, coord=None):
self.value = value
self.coord = coord
def __repr__(self):
return "Comment(%r)" % (self.value)
def children(self):
nodelist = []
return tuple(nodelist)
attr_names = ('value',)
class ArrayInit(Node):
def __init__(self, values, coord=None):
self.values = values
self.coord = coord
def __repr__(self):
return "ArrayInit(%r)" % (self.values)
def children(self):
nodelist = []
for n in self.values:
nodelist.append(n)
return tuple(nodelist)
attr_names = ()
class Constant(Node):
def __init__(self, value, coord=None):
self.value = value
self.coord = coord
def __repr__(self):
return "Constant(%r)" % (self.value)
def children(self):
nodelist = []
return tuple(nodelist)
attr_names = ('value',)
class Increment(Node):
def __init__(self, name, op, coord=None):
self.name = name
self.op = op
self.coord = coord
def __repr__(self):
return "Increment(%r%r)" % (self.name, self.op)
def children(self):
nodelist = []
nodelist.append(("name", self.name))
return tuple(nodelist)
attr_names = ("op",)
class UnaryBefore(Node):
def __init__(self, op, expr, coord=None):
self.op = op
self.expr = expr
self.coord = coord
def __repr__(self):
return "UnaryBefore(%r %r)" % (self.op, self.expr)
def children(self):
nodelist = []
nodelist.append(("expr", self.expr))
return tuple(nodelist)
attr_names = ("op",)
class Id(Node):
def __init__(self, name, coord=None):
self.name = name
self.coord = coord
def __repr__(self):
return "Id(%r)" % (self.name)
def children(self):
nodelist = []
return tuple(nodelist)
attr_names = ('name',)
class Include(Node):
def __init__(self, name, coord=None):
self.name = name
self.coord = coord
def __repr__(self):
return "Include(%r)" % (self.name)
def children(self):
nodelist = []
return tuple(nodelist)
attr_names = ('name',)
class TypeId(Node):
def __init__(self, type, name, coord=None):
self.type = type
self.name = name
self.coord = coord
def __repr__(self):
return "TypeId(%r %r)" % (self.type, self.name)
def children(self):
nodelist = [("name", self.name)]
return tuple(nodelist)
attr_names = ('type',)
class ArrayTypeId(Node):
def __init__(self, type, name, subscript, coord=None):
self.type = type
self.name = name
self.subscript = subscript
self.coord = coord
def __repr__(self):
return "ArrayTypeId(%r %r % r)" % (self.type, self.name, self.subscript)
def children(self):
nodelist = [("name", self.name)]
for count, i in enumerate(self.subscript):
nodelist.append(("subscript %r" % count, i))
return tuple(nodelist)
attr_names = ('type',)
class Assignment(Node):
def __init__(self, lval, rval, op='=', coord=None):
self.lval = lval
self.op = op
self.rval = rval
self.coord = coord
def __repr__(self):
return "Assignment(%r %r %r)" % (self.lval, self.op, self.rval)
def children(self):
nodelist = [("lval", self.lval), ("rval", self.rval)]
return tuple(nodelist)
attr_names = ("op",)
# Special class for grouping statements (no newlines in between)
class GroupCompound(Node):
def __init__(self, statements, coord=None):
self.statements = statements
self.coord = coord
def __repr__(self):
return "GroupCompound({%r})" % self.statements
def children(self):
nodelist = []
count = 0
for i in self.statements:
nodelist.append(("stmt[%r]" % count, i))
count += 1
return tuple(nodelist)
attr_names = ()
class Compound(Node):
def __init__(self, statements, coord=None):
self.statements = statements
self.coord = coord
def __repr__(self):
return "Compound({%r})" % self.statements
def children(self):
nodelist = []
count = 0
for i in self.statements:
nodelist.append(("stmt[%r]" % count, i))
count += 1
return tuple(nodelist)
attr_names = ()
class ArgList(Node):
def __init__(self, arglist, coord=None):
self.arglist = arglist
self.coord = coord
def __repr__(self):
return "ArgList(%r)" % (self.arglist)
def children(self):
nodelist = []
count = 0
for i in self.arglist:
nodelist.append(("arglist[%r]" % count, i))
count += 1
return tuple(nodelist)
attr_names = ()
class ArrayRef(Node):
def __init__(self, name, subscript, coord=None, extra=dict()):
self.name = name
self.subscript = subscript
self.coord = coord
self.extra = extra
def __repr__(self):
return "ArrayRef(%r%r)" % (self.name, self.subscript)
def children(self):
nodelist = [("name", self.name)]
count = 0
for i in self.subscript:
nodelist.append(("subscript %r" % count, i))
count += 1
return tuple(nodelist)
attr_names = ()
class BinOp(Node):
def __init__(self, lval, op, rval, coord=None):
self.lval = lval
self.rval = rval
self.op = op
self.coord = coord
def __repr__(self):
return "BinOp(%r %r %r)" % (self.lval, self.op, self.rval)
def children(self):
nodelist = []
nodelist.append(("lval", self.lval))
nodelist.append(("rval", self.rval))
return tuple(nodelist)
attr_names = ("op",)
class FuncDecl(Node):
def __init__(self, typeid, arglist, compound, coord=None):
self.typeid = typeid
self.arglist = arglist
self.compound = compound
self.coord = coord
def __repr__(self):
return "FuncDecl(%r %r %r)" % (self.typeid,
self.arglist,
self.compound)
def children(self):
nodelist = []
nodelist.append(("typeid", self.typeid))
nodelist.append(("arglist", self.arglist))
nodelist.append(("compound", self.compound))
return tuple(nodelist)
attr_names = ()
class ForLoop(Node):
def __init__(self, init, cond, inc, compound, coord=None):
self.init = init
self.cond = cond
self.inc = inc
self.compound = compound
def __repr__(self):
return "\nForLoop(%r, %r, %r, %r) " % (self.init.lval.name, \
self.cond, \
self.inc, \
self.compound
)
def children(self):
nodelist = []
nodelist.append(("init", self.init))
nodelist.append(("cond", self.cond))
nodelist.append(("inc", self.inc))
nodelist.append(("compound", self.compound))
return tuple(nodelist)
attr_names = ()
class IfThen(Node):
def __init__(self, cond, compound, coord=None):
self.cond = cond
self.compound = compound
def __repr__(self):
return "If(%r) then {%r}" % (self.cond, \
self.compound)
def children(self):
nodelist = []
nodelist.append(("cond", self.cond))
nodelist.append(("compound", self.compound))
return tuple(nodelist)
attr_names = ()
class IfThenElse(Node):
def __init__(self, cond, compound1, compound2, coord=None):
self.cond = cond
self.compound1 = compound1
self.compound2 = compound2
def __repr__(self):
return "If(%r) then {%r} else {%r}" % (self.cond, \
self.compound1, \
self.compound2)
def children(self):
nodelist = []
nodelist.append(("cond", self.cond))
nodelist.append(("compoundthen", self.compound1))
nodelist.append(("compoundelse", self.compound2))
return tuple(nodelist)
attr_names = ()
class Return(Node):
def __init__(self, expr):
self.expr = expr
def __repr__(self):
return "Return(%r)" % self.expr
def children(self):
nodelist = [("expr", self.expr)]
return tuple(nodelist)
attr_names = ()
# EXTRAS FOR OPTIMIZATION INFORMATION PURPOSES
class Transpose(Node):
def __init__(self, type, name, base_name, hst_name):
self.type = type
self.name = name
self.base_name = base_name
self.hst_name = hst_name
def __repr__(self):
return "Transpose(%r %r %r %r)" % (self.type, self.name, self.base_name, self.hst_name)
def children(self):
nodelist = [("name", self.name), ("base_name", self.base_name), ("hst_name", self.hst_name)]
return tuple(nodelist)
attr_names = ('type',)
class KernelArgDefine(Node):
def __init__(self, name):
self.name = name
def __repr__(self):
return "KernelArgDefine(%r)" % self.name
def children(self):
nodelist = [("name", self.name)]
return tuple(nodelist)
class Stencil(Node):
def __init__(self, name, local_name, size):
self.name = name
self.local_name = local_name
self.size = size
def __repr__(self):
return "Stencil(%r %r %r)" % (self.name, self.local_name, self.size)
def children(self):
nodelist = [("name", self.name), ("local_name", self.local_name)]
return tuple(nodelist)
attr_names = ('size',)
class Block(Node):
def __init__(self, name, size):
self.name = name
self.size = size
def __repr__(self):
return "Block(%r %r)" % (self.name, self.size)
def children(self):
nodelist = [("name", self.name)]
return tuple(nodelist)
attr_names = ('size',)
class ParDim(Node):
def __init__(self, par_dim):
self.par_dim = par_dim
def __repr__(self):
return "ParDim(%r)" % self.par_dim
def children(self):
return tuple([])
attr_names = ('par_dim',)
class ProgramName(Node):
def __init__(self, name):
self.name = name
def __repr__(self):
return "ProgramName(%r)" % self.name
def children(self):
return tuple([])
attr_names = ('name',)
class RawCpp(Node):
def __init__(self, code):
self.code = code
def __repr__(self):
return "RawCpp(%r)" % self.code
def children(self):
return tuple([])
attr_names = ('code',)
class Type(Node):
def __init__(self, type):
self.type = type
def __repr__(self):
return "Type(%r)" % self.type
def children(self):
return tuple([])
attr_names = ('type',)
class Ref(Node):
def __init__(self, expr):
if isinstance(expr, str):
expr = Id(expr)
self.expr = expr
def __repr__(self):
return "Ref(%r)" % self.expr
def children(self):
nodelist = [("var", self.expr)]
return tuple(nodelist)
attr_names = ()
class Cout(Node):
def __init__(self, print_args):
self.print_args = print_args
def __repr__(self):
return "Cout(%r)" % self.print_args
def children(self):
nodelist = [("print_args", self.print_args)]
return tuple(nodelist)
attr_names = ()
| mit |
rrampage/rethinkdb | test/common/vcoptparse.py | 32 | 9768 | # Copyright 2010-2012 RethinkDB, all rights reserved.
"""
vcoptparse is short for Value-Centric Option Parser. It's a tiny argument parsing library. It has
less features than optparse or argparse, but it kicks more ass.
optparse and argparse allow the client to specify the flags that should be parsed, and as an
afterthought specify what keys should appear in the options dictionary when the parse is over.
vcoptparse works the other way around: you specify the keys and how to determine the keys from the
command line. That's why it's called "value-centric".
Here is a simple example:
>>> op = OptParser()
>>> op["verbose"] = BoolFlag("--verbose")
>>> op["count"] = IntFlag("--count", 5) # Default count is 5
>>> op["infiles"] = ManyPositionalArgs()
>>> op.parse(["foo.py", "--count", "5", "file1.txt", "file2.txt"])
{'count': 5, 'verbose': False, 'infiles': ['file1.txt', 'file2.txt']}
"""
class NoValueClass(object):
pass
NoValue = NoValueClass()
class Arg(object):
pass
class OptError(StandardError):
pass
class OptParser(object):
def __init__(self):
self.parsers_by_key = {}
self.parsers_in_order = []
def __setitem__(self, key, parser):
assert isinstance(parser, Arg)
if key in self.parsers_by_key: del self[key]
assert parser not in self.parsers_by_key.values()
self.parsers_by_key[key] = parser
self.parsers_in_order.append((key, parser))
def __getitem__(self, key):
return self.parsers_by_key[key]
def __delitem__(self, key):
self.parsers_in_order.remove((key, self.parsers_by_key[key]))
del self.parsers_by_key[key]
def parse(self, args):
args = args[1:] # Cut off name of program
values = dict((key, NoValue) for key in self.parsers_by_key.keys())
def name_for_key(key):
return getattr(self.parsers_by_key[key], "name", key)
def set_value(key, new_value):
combiner = getattr(self.parsers_by_key[key], "combiner", enforce_one_combiner)
try:
values[key] = combiner(values[key], new_value)
except OptError as e:
raise OptError(str(e) % {"name": name_for_key(key)})
# Build flag table
flags = {}
for key, parser in self.parsers_in_order:
if hasattr(parser, "flags"):
for flag in parser.flags:
assert flag.startswith("-")
if flag in flags:
raise ValueError("The flag %r has two different meanings." % flag)
flags[flag] = (key, parser)
# Handle flag arguments and store positional arguments
positionals = []
while args:
arg = args.pop(0)
if arg.startswith("-"):
if arg in flags:
key, parser = flags[arg]
set_value(key, parser.flag(arg, args))
else:
raise OptError("Don't know how to handle flag %r" % arg)
else:
positionals.append(arg)
# Handle positional arguments
for key, parser in self.parsers_in_order:
if hasattr(parser, "positional"):
set_value(key, parser.positional(positionals))
if positionals:
raise OptError("Unexpected extra positional argument(s): %s" % ", ".join(repr(x) for x in positionals))
# Apply defaults
for key, parser in self.parsers_by_key.iteritems():
if values[key] is NoValue:
if hasattr(parser, "default") and parser.default is not NoValue:
values[key] = parser.default
else:
raise OptError("You need to specify a value for %r" % name_for_key(key))
return values
# Combiners (indicate how to combine repeat specifications of the same flag)
def most_recent_combiner(old, new):
return new
def enforce_one_combiner(old, new):
if old is not NoValue:
raise OptError("%(name)r should only be specified once.")
return new
def append_combiner(old, new):
if old is NoValue: old = []
return old + [new]
# Converters (indicate how to convert from string arguments to values)
def bool_converter(x):
if x.lower() in ["yes", "true", "y", "t"]: return True
elif x.lower() in ["no", "false", "n", "f"]: return False
else: raise OptError("Expected a yes/no value. Got %r." % x)
def int_converter(x):
try: return int(x)
except ValueError: raise OptError("Expected an integer. Got %r." % x)
def float_converter(x):
try: return float(x)
except ValueError: raise OptError("Expected a float. Got %r." % x)
def choice_converter(choices):
def check(x):
if x in choices: return x
else: raise OptError("Expected one of %s. Got %r." % (", ".join(choices), x))
return check
# Standard argument parsers for common situations
class BoolFlag(Arg):
def __init__(self, arg, invert=False):
assert isinstance(invert, bool)
self.flags = [arg]
self.default = invert
def flag(self, flag, args):
return not self.default
class ChoiceFlags(Arg):
def __init__(self, choices, default = NoValue):
assert all(isinstance(x, str) for x in choices)
self.flags = choices
self.default = default
def flag(self, flag, args):
return flag.lstrip("-")
class ValueFlag(Arg):
def __init__(self, name, converter = str, default = NoValue, combiner = enforce_one_combiner):
assert isinstance(name, str)
assert callable(converter)
assert callable(combiner)
self.flags = [name]
self.converter = converter
self.combiner = combiner
self.default = default
def flag(self, flag, args):
try: value = args.pop(0)
except IndexError:
raise OptError("Flag %r expects an argument." % flag)
try: value2 = self.converter(value)
except OptError as e:
raise OptError("Problem in argument to flag %r: %s" % (flag, e))
return value2
class StringFlag(ValueFlag):
def __init__(self, name, default = NoValue):
ValueFlag.__init__(self, name, str, default = default)
class IntFlag(ValueFlag):
def __init__(self, name, default = NoValue):
ValueFlag.__init__(self, name, int_converter, default = default)
class FloatFlag(ValueFlag):
def __init__(self, name, default = NoValue):
ValueFlag.__init__(self, name, float_converter, default = default)
class ChoiceFlag(ValueFlag):
def __init__(self, name, choices, default = NoValue):
ValueFlag.__init__(self, name, choice_converter(choices), default = default)
class MultiValueFlag(Arg):
def __init__(self, name, converters = [str], default = NoValue, combiner = enforce_one_combiner):
assert isinstance(name, str)
assert all(callable(x) for x in converters)
assert callable(combiner)
self.flags = [name]
self.converters = converters
self.combiner = combiner
self.default = default
def flag(self, flag, args):
new_values = ()
args_gotten = 0
for converter in self.converters:
try: value = args.pop(0)
except IndexError:
raise OptError("Flag %r expects %d argument(s), but only got %d." % (flag, len(self.converters), args_gotten))
try: value2 = converter(value)
except OptError as e:
raise OptError("Problem in argument %d to flag %r: %s" % (args_gotten + 1, flag, e))
new_values += (value2, )
args_gotten += 1
return new_values
class AllArgsAfterFlag(Arg):
def __init__(self, name, converter = str, default = NoValue):
assert isinstance(name, str)
assert callable(converter)
self.flags = [name]
self.converter = converter
self.default = default
def flag(self, flag, args):
args2 = []
for arg in args:
try: args2.append(self.converter(arg))
except OptError as e: raise OptError("For %r: %s" % (flag, e))
del args[:] # We consume all arguments remaining
return args2
class PositionalArg(Arg):
def __init__(self, name = None, converter = str, default = NoValue):
assert callable(converter)
self.name = name
self.converter = converter
self.default = default
def positional(self, args):
try: value = args.pop(0)
except IndexError:
if self.default is NoValue:
if self.name is None:
raise OptError("Too few positional arguments.")
else:
raise OptError("Too few positional arguments; need a value for %r." % self.name)
else:
return NoValue
try: value2 = self.converter(value)
except OptError as e:
if self.name is None: raise
else: raise OptError("For %r: %s" % (self.name, e))
return value2
class ManyPositionalArgs(Arg):
def __init__(self, name = None, converter = str):
assert callable(converter)
self.name = name
self.converter = converter
def positional(self, args):
args2 = []
for arg in args:
try: args2.append(self.converter(arg))
except OptError as e:
if self.name is None: raise
else: raise OptError("For %r: %s" % (self.name, e))
del args[:] # We consume all arguments remaining
return args2
| agpl-3.0 |
Distrotech/libreoffice | wizards/com/sun/star/wizards/web/WWHID.py | 9 | 5081 | #
# This file is part of the LibreOffice project.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# This file incorporates work covered by the following license notice:
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed
# with this work for additional information regarding copyright
# ownership. The ASF licenses this file to you under the Apache
# License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0 .
#
HID0_WEBWIZARD = 34200 + 0
HID0_WEBWIZARD = 34200 + 0
HID0_HELP = 34200 + 1
HID0_NEXT = 34200 + 2
HID0_PREV = 34200 + 3
HID0_CREATE = 34200 + 4
HID0_CANCEL = 34200 + 5
HID0_STATUS_DIALOG = 34200 + 6 #step 1
HID1_LST_SESSIONS = 34200 + 7
HID1_BTN_DEL_SES = 34200 + 9 #step 2
HID2_LST_DOCS = 34200 + 10
HID2_BTN_ADD_DOC = 34200 + 11
HID2_BTN_REM_DOC = 34200 + 12
HID2_BTN_DOC_UP = 34200 + 13
HID2_BTN_DOC_DOWN = 34200 + 14
HID2_TXT_DOC_TITLE = 34200 + 15
HID2_TXT_DOC_DESC = 34200 + 16
HID2_TXT_DOC_AUTHOR = 34200 + 17
HID2_LST_DOC_EXPORT = 34200 + 18
HID2_STATUS_ADD_DOCS = 34200 + 19 #step 3
HID3_IL_LAYOUTS_IMG1 = 34200 + 20
HID3_IL_LAYOUTS_IMG2 = 34200 + 21
HID3_IL_LAYOUTS_IMG3 = 34200 + 22
HID3_IL_LAYOUTS_IMG4 = 34200 + 23
HID3_IL_LAYOUTS_IMG5 = 34200 + 24
HID3_IL_LAYOUTS_IMG6 = 34200 + 25
HID3_IL_LAYOUTS_IMG7 = 34200 + 26
HID3_IL_LAYOUTS_IMG8 = 34200 + 27
HID3_IL_LAYOUTS_IMG9 = 34200 + 28
HID3_IL_LAYOUTS_IMG10 = 34200 + 29
HID3_IL_LAYOUTS_IMG11 = 34200 + 30
HID3_IL_LAYOUTS_IMG12 = 34200 + 31
HID3_IL_LAYOUTS_IMG13 = 34200 + 32
HID3_IL_LAYOUTS_IMG14 = 34200 + 33
HID3_IL_LAYOUTS_IMG15 = 34200 + 34 #step 4
HID4_CHK_DISPLAY_FILENAME = 34200 + 35
HID4_CHK_DISPLAY_DESCRIPTION = 34200 + 36
HID4_CHK_DISPLAY_AUTHOR = 34200 + 37
HID4_CHK_DISPLAY_CR_DATE = 34200 + 38
HID4_CHK_DISPLAY_UP_DATE = 34200 + 39
HID4_CHK_DISPLAY_FORMAT = 34200 + 40
HID4_CHK_DISPLAY_F_ICON = 34200 + 41
HID4_CHK_DISPLAY_PAGES = 34200 + 42
HID4_CHK_DISPLAY_SIZE = 34200 + 43
HID4_GRP_OPTIMAIZE_640 = 34200 + 44
HID4_GRP_OPTIMAIZE_800 = 34200 + 45
HID4_GRP_OPTIMAIZE_1024 = 34200 + 46 #step 5
HID5_LST_STYLES = 34200 + 47
HID5_BTN_BACKGND = 34200 + 48
HID5_BTN_ICONS = 34200 + 49 #step 6
HID6_TXT_SITE_TITLE = 34200 + 50
HID6_TXT_SITE_ICON = 34200 + 51
HID6_BTN_SITE_ICON = 34200 + 52
HID6_TXT_SITE_DESC = 34200 + 53
HID6_TXT_SITE_KEYWRDS = 34200 + 54
HID6_DATE_SITE_CREATED = 34200 + 55
HID6_DATE_SITE_UPDATED = 34200 + 56
HID6_NUM_SITE_REVISTS = 34200 + 57
HID6_TXT_SITE_EMAIL = 34200 + 58
HID6_TXT_SITE_COPYRIGHT = 34200 + 59 #step 7
HID7_BTN_PREVIEW = 34200 + 60
HID7_CHK_PUBLISH_LOCAL = 34200 + 61
HID7_TXT_LOCAL = 34200 + 62
HID7_BTN_LOCAL = 34200 + 63
HID7_CHK_PUBLISH_ZIP = 34200 + 64
HID7_TXT_ZIP = 34200 + 65
HID7_BTN_ZIP = 34200 + 66
HID7_CHK_PUBLISH_FTP = 34200 + 67
HID7_BTN_FTP = 34200 + 69
HID7_CHK_SAVE = 34200 + 70
HID7_TXT_SAVE = 34200 + 71 #web wizard backgrounds dialog
HID_BG = 34200 + 90
HID_BG_BTN_OTHER = 34200 + 91
HID_BG_BTN_NONE = 34200 + 92
HID_BG_BTN_OK = 34200 + 93
HID_BG_BTN_CANCEL = 34200 + 94
HID_BG_BTN_BACK = 34200 + 95
HID_BG_BTN_FW = 34200 + 96
HID_BG_BTN_IMG1 = 34200 + 97
HID_BG_BTN_IMG2 = 34200 + 98
HID_BG_BTN_IMG3 = 34200 + 99
HID_BG_BTN_IMG4 = 34200 + 100
HID_BG_BTN_IMG5 = 34200 + 101
HID_BG_BTN_IMG6 = 34200 + 102
HID_BG_BTN_IMG7 = 34200 + 103
HID_BG_BTN_IMG8 = 34200 + 104
HID_BG_BTN_IMG9 = 34200 + 105
HID_BG_BTN_IMG10 = 34200 + 106
HID_BG_BTN_IMG11 = 34200 + 107
HID_BG_BTN_IMG12 = 34200 + 108 #web wizard icons sets dialog
HID_IS = 41000 + 0
HID_IS_ICONSETS = 41000 + 1
HID_IS_BTN_NONE = 41000 + 2
HID_IS_BTN_OK = 41000 + 3
HID_IS_BTN_IMG1 = 41000 + 5
HID_IS_BTN_IMG2 = 41000 + 6
HID_IS_BTN_IMG3 = 41000 + 7
HID_IS_BTN_IMG4 = 41000 + 8
HID_IS_BTN_IMG5 = 41000 + 9
HID_IS_BTN_IMG6 = 41000 + 10
HID_IS_BTN_IMG7 = 41000 + 11
HID_IS_BTN_IMG8 = 41000 + 12
HID_IS_BTN_IMG9 = 41000 + 13
HID_IS_BTN_IMG10 = 41000 + 14
HID_IS_BTN_IMG11 = 41000 + 15
HID_IS_BTN_IMG12 = 41000 + 16
HID_IS_BTN_IMG13 = 41000 + 17
HID_IS_BTN_IMG14 = 41000 + 18
HID_IS_BTN_IMG15 = 41000 + 19
HID_IS_BTN_IMG16 = 41000 + 20
HID_IS_BTN_IMG17 = 41000 + 21
HID_IS_BTN_IMG18 = 41000 + 22
HID_IS_BTN_IMG19 = 41000 + 23
HID_IS_BTN_IMG20 = 41000 + 24
HID_IS_BTN_IMG21 = 41000 + 25
HID_IS_BTN_IMG22 = 41000 + 26
HID_IS_BTN_IMG23 = 41000 + 27
HID_IS_BTN_IMG24 = 41000 + 28
HID_IS_BTN_IMG25 = 41000 + 29
HID_IS_BTN_IMG26 = 41000 + 30
HID_IS_BTN_IMG27 = 41000 + 31
HID_IS_BTN_IMG28 = 41000 + 32
HID_IS_BTN_IMG29 = 41000 + 33
HID_IS_BTN_IMG30 = 41000 + 34
HID_IS_BTN_IMG31 = 41000 + 35
HID_IS_BTN_IMG32 = 41000 + 36
# web wizard ftp dialog
HID_FTP = 41000 + 40
HID_FTP_SERVER = 41000 + 41
HID_FTP_USERNAME = 41000 + 42
HID_FTP_PASS = 41000 + 43
HID_FTP_TEST = 41000 + 44
HID_FTP_TXT_PATH = 41000 + 45
HID_FTP_BTN_PATH = 41000 + 46
HID_FTP_OK = 41000 + 47
HID_FTP_CANCEL = 41000 + 48
| gpl-3.0 |
geotagx/pybossa | test/test_api/test_task_api.py | 1 | 22789 | # -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2015 SciFabric LTD.
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
import json
from default import db, with_context
from nose.tools import assert_equal
from test_api import TestAPI
from mock import patch, call
from factories import ProjectFactory, TaskFactory, TaskRunFactory, UserFactory
from pybossa.repositories import ProjectRepository
from pybossa.repositories import TaskRepository
from pybossa.repositories import ResultRepository
project_repo = ProjectRepository(db)
task_repo = TaskRepository(db)
result_repo = ResultRepository(db)
class TestTaskAPI(TestAPI):
def create_result(self, n_results=1, n_answers=1, owner=None,
filter_by=False):
if owner:
owner = owner
else:
owner = UserFactory.create()
project = ProjectFactory.create(owner=owner)
tasks = []
for i in range(n_results):
tasks.append(TaskFactory.create(n_answers=n_answers,
project=project))
for i in range(n_answers):
for task in tasks:
TaskRunFactory.create(task=task, project=project)
if filter_by:
return result_repo.filter_by(project_id=1)
else:
return result_repo.get_by(project_id=1)
@with_context
def test_task_query_without_params(self):
""" Test API Task query"""
project = ProjectFactory.create()
t1 = TaskFactory.create(created='2015-01-01T14:37:30.642119', info={'question': 'answer'})
tasks = TaskFactory.create_batch(8, project=project, info={'question': 'answer'})
t2 = TaskFactory.create(created='2019-01-01T14:37:30.642119', info={'question': 'answer'})
tasks.insert(0, t1)
tasks.append(t2)
res = self.app.get('/api/task')
tasks = json.loads(res.data)
assert len(tasks) == 10, tasks
task = tasks[0]
assert task['info']['question'] == 'answer', task
# The output should have a mime-type: application/json
assert res.mimetype == 'application/json', res
# Desc filter
url = "/api/task?desc=true"
res = self.app.get(url)
data = json.loads(res.data)
err_msg = "It should get the last item first."
assert data[0]['created'] == tasks[len(tasks)-1]['created'], err_msg
@with_context
def test_task_query_without_params_with_context(self):
""" Test API Task query with context"""
user = UserFactory.create()
project_oc = ProjectFactory.create(owner=user)
project_two = ProjectFactory.create()
TaskFactory.create_batch(10, project=project_oc, info={'question': 'answer'})
TaskFactory.create_batch(10, project=project_two, info={'question': 'answer'})
res = self.app.get('/api/task?api_key=' + user.api_key)
tasks = json.loads(res.data)
assert len(tasks) == 10, tasks
for task in tasks:
assert task['project_id'] == project_oc.id, task
assert task['info']['question'] == 'answer', task
# The output should have a mime-type: application/json
assert res.mimetype == 'application/json', res
res = self.app.get('/api/task?api_key=' + user.api_key + "&all=1")
tasks = json.loads(res.data)
assert len(tasks) == 20, tasks
@with_context
def test_task_query_with_params(self):
"""Test API query for task with params works"""
project = ProjectFactory.create()
tasks = TaskFactory.create_batch(10, project=project)
# Test for real field
res = self.app.get("/api/task?project_id=1")
data = json.loads(res.data)
# Should return one result
assert len(data) == 10, data
# Correct result
assert data[0]['project_id'] == 1, data
# Valid field but wrong value
res = self.app.get("/api/task?project_id=99999999")
data = json.loads(res.data)
assert len(data) == 0, data
# Multiple fields
res = self.app.get('/api/task?project_id=1&state=ongoing')
data = json.loads(res.data)
# One result
assert len(data) == 10, data
# Correct result
assert data[0]['project_id'] == 1, data
assert data[0]['state'] == u'ongoing', data
# Limits
res = self.app.get("/api/task?project_id=1&limit=5")
data = json.loads(res.data)
for item in data:
assert item['project_id'] == 1, item
assert len(data) == 5, data
# Keyset pagination
url = "/api/task?project_id=1&limit=5&last_id=%s" % tasks[4].id
res = self.app.get(url)
data = json.loads(res.data)
for item in data:
assert item['project_id'] == 1, item
assert len(data) == 5, data
assert data[0]['id'] == tasks[5].id, data
@with_context
def test_task_query_with_params_with_context(self):
"""Test API query for task with params works with context"""
user = UserFactory.create()
user_two = UserFactory.create()
project_oc = ProjectFactory.create(owner=user)
project_two = ProjectFactory.create()
tasks = TaskFactory.create_batch(10, project=project_oc)
TaskFactory.create_batch(10, project=project_two)
# Test for real field
res = self.app.get("/api/task?project_id="+ str(project_oc.id) + "&api_key=" + user.api_key)
data = json.loads(res.data)
# Should return then results
assert len(data) == 10, data
# Correct result
for t in data:
assert t['project_id'] == project_oc.id, t
res = self.app.get("/api/task?api_key=" + user.api_key + "&all=1")
data = json.loads(res.data)
# Should return one result
assert len(data) == 20, data
# Valid field but wrong value
res = self.app.get("/api/task?project_id=99999999&api_key=" + user.api_key)
data = json.loads(res.data)
assert len(data) == 0, data
# Multiple fields
res = self.app.get('/api/task?project_id=1&state=ongoing&api_key=' + user.api_key)
data = json.loads(res.data)
# One result
assert len(data) == 10, data
# Correct result
for t in data:
assert t['project_id'] == project_oc.id, data
assert t['state'] == u'ongoing', data
# Limits
res = self.app.get("/api/task?project_id=1&limit=5&api_key=" + user.api_key)
data = json.loads(res.data)
assert len(data) == 5, data
for item in data:
assert item['project_id'] == project_oc.id, item
# Keyset pagination
url = "/api/task?project_id=1&limit=5&last_id=%s&api_key=%s" % (tasks[4].id, user.api_key)
res = self.app.get(url)
data = json.loads(res.data)
assert len(data) == 5, data
assert data[0]['id'] == tasks[5].id, data
for item in data:
assert item['project_id'] == project_oc.id, item
# Test for real field with user_two
res = self.app.get("/api/task?project_id="+ str(project_oc.id) + "&api_key=" + user_two.api_key)
data = json.loads(res.data)
# Should return then results
assert len(data) == 0, data
# Test for real field with user_two
res = self.app.get("/api/task?all=1&project_id="+ str(project_oc.id) + "&api_key=" + user_two.api_key)
data = json.loads(res.data)
# Should return then results
assert len(data) == 10, data
# Correct result
for t in data:
assert t['project_id'] == project_oc.id, t
res = self.app.get("/api/task?api_key=" + user_two.api_key + "&all=1")
data = json.loads(res.data)
# Should return one result
assert len(data) == 20, data
# Valid field but wrong value
res = self.app.get("/api/task?project_id=99999999&api_key=" + user_two.api_key)
data = json.loads(res.data)
assert len(data) == 0, data
# Multiple fields
res = self.app.get('/api/task?project_id=1&state=ongoing&api_key=' + user_two.api_key)
data = json.loads(res.data)
# One result
assert len(data) == 0, data
res = self.app.get('/api/task?all=1&project_id=1&state=ongoing&api_key=' + user_two.api_key)
data = json.loads(res.data)
# One result
assert len(data) == 10, data
# Correct result
for t in data:
assert t['project_id'] == project_oc.id, data
assert t['state'] == u'ongoing', data
# Limits
res = self.app.get("/api/task?project_id=1&limit=5&api_key=" + user_two.api_key)
data = json.loads(res.data)
assert len(data) == 0, data
res = self.app.get("/api/task?all=1&project_id=1&limit=5&api_key=" + user_two.api_key)
data = json.loads(res.data)
assert len(data) == 5, data
for item in data:
assert item['project_id'] == project_oc.id, item
# Keyset pagination
url = "/api/task?project_id=1&limit=5&last_id=%s&api_key=%s" % (tasks[4].id, user_two.api_key)
res = self.app.get(url)
data = json.loads(res.data)
assert len(data) == 0, data
url = "/api/task?all=1&project_id=1&limit=5&last_id=%s&api_key=%s" % (tasks[4].id, user_two.api_key)
res = self.app.get(url)
data = json.loads(res.data)
assert len(data) == 5, data
assert data[0]['id'] == tasks[5].id, data
for item in data:
assert item['project_id'] == project_oc.id, item
@with_context
def test_task_post(self):
"""Test API Task creation"""
admin = UserFactory.create()
user = UserFactory.create()
non_owner = UserFactory.create()
project = ProjectFactory.create(owner=user)
data = dict(project_id=project.id, info='my task data')
root_data = dict(project_id=project.id, info='my root task data')
# anonymous user
# no api-key
res = self.app.post('/api/task', data=json.dumps(data))
error_msg = 'Should not be allowed to create'
assert_equal(res.status, '401 UNAUTHORIZED', error_msg)
### real user but not allowed as not owner!
res = self.app.post('/api/task?api_key=' + non_owner.api_key,
data=json.dumps(data))
error_msg = 'Should not be able to post tasks for projects of others'
assert_equal(res.status, '403 FORBIDDEN', error_msg)
# now a real user
res = self.app.post('/api/task?api_key=' + user.api_key,
data=json.dumps(data))
assert res.data, res
datajson = json.loads(res.data)
out = task_repo.get_task(datajson['id'])
assert out, out
assert_equal(out.info, 'my task data'), out
assert_equal(out.project_id, project.id)
# now the root user
res = self.app.post('/api/task?api_key=' + admin.api_key,
data=json.dumps(root_data))
assert res.data, res
datajson = json.loads(res.data)
out = task_repo.get_task(datajson['id'])
assert out, out
assert_equal(out.info, 'my root task data'), out
assert_equal(out.project_id, project.id)
# POST with not JSON data
url = '/api/task?api_key=%s' % user.api_key
res = self.app.post(url, data=data)
err = json.loads(res.data)
assert res.status_code == 415, err
assert err['status'] == 'failed', err
assert err['target'] == 'task', err
assert err['action'] == 'POST', err
assert err['exception_cls'] == 'ValueError', err
# POST with not allowed args
res = self.app.post(url + '&foo=bar', data=json.dumps(data))
err = json.loads(res.data)
assert res.status_code == 415, err
assert err['status'] == 'failed', err
assert err['target'] == 'task', err
assert err['action'] == 'POST', err
assert err['exception_cls'] == 'AttributeError', err
# POST with fake data
data['wrongfield'] = 13
res = self.app.post(url, data=json.dumps(data))
err = json.loads(res.data)
assert res.status_code == 415, err
assert err['status'] == 'failed', err
assert err['target'] == 'task', err
assert err['action'] == 'POST', err
assert err['exception_cls'] == 'TypeError', err
def test_task_post_with_reserved_fields_returns_error(self):
user = UserFactory.create()
project = ProjectFactory.create(owner=user)
data = {'created': 'today',
'state': 'completed',
'id': 222, 'project_id': project.id}
res = self.app.post('/api/task?api_key=' + user.api_key,
data=json.dumps(data))
assert res.status_code == 400, res.status_code
error = json.loads(res.data)
assert error['exception_msg'] == "Reserved keys in payload", error
def test_task_put_with_reserved_fields_returns_error(self):
user = UserFactory.create()
project = ProjectFactory.create(owner=user)
task = TaskFactory.create(project=project)
url = '/api/task/%s?api_key=%s' % (task.id, user.api_key)
data = {'created': 'today',
'state': 'completed',
'id': 222}
res = self.app.put(url, data=json.dumps(data))
assert res.status_code == 400, res.status_code
error = json.loads(res.data)
assert error['exception_msg'] == "Reserved keys in payload", error
@with_context
def test_task_update(self):
"""Test API task update"""
admin = UserFactory.create()
user = UserFactory.create()
non_owner = UserFactory.create()
project = ProjectFactory.create(owner=user)
task = TaskFactory.create(project=project)
root_task = TaskFactory.create(project=project)
data = {'n_answers': 1}
datajson = json.dumps(data)
root_data = {'n_answers': 4}
root_datajson = json.dumps(root_data)
## anonymous
res = self.app.put('/api/task/%s' % task.id, data=data)
assert_equal(res.status, '401 UNAUTHORIZED', res.status)
### real user but not allowed as not owner!
url = '/api/task/%s?api_key=%s' % (task.id, non_owner.api_key)
res = self.app.put(url, data=datajson)
assert_equal(res.status, '403 FORBIDDEN', res.status)
### real user
url = '/api/task/%s?api_key=%s' % (task.id, user.api_key)
res = self.app.put(url, data=datajson)
out = json.loads(res.data)
assert_equal(res.status, '200 OK', res.data)
assert_equal(task.n_answers, data['n_answers'])
assert_equal(task.state, 'ongoing')
assert task.id == out['id'], out
### root
res = self.app.put('/api/task/%s?api_key=%s' % (root_task.id, admin.api_key),
data=root_datajson)
assert_equal(res.status, '200 OK', res.data)
assert_equal(root_task.n_answers, root_data['n_answers'])
assert_equal(task.state, 'ongoing')
# PUT with not JSON data
res = self.app.put(url, data=data)
err = json.loads(res.data)
assert res.status_code == 415, err
assert err['status'] == 'failed', err
assert err['target'] == 'task', err
assert err['action'] == 'PUT', err
assert err['exception_cls'] == 'ValueError', err
# PUT with not allowed args
res = self.app.put(url + "&foo=bar", data=json.dumps(data))
err = json.loads(res.data)
assert res.status_code == 415, err
assert err['status'] == 'failed', err
assert err['target'] == 'task', err
assert err['action'] == 'PUT', err
assert err['exception_cls'] == 'AttributeError', err
# PUT with fake data
data['wrongfield'] = 13
res = self.app.put(url, data=json.dumps(data))
err = json.loads(res.data)
assert res.status_code == 415, err
assert err['status'] == 'failed', err
assert err['target'] == 'task', err
assert err['action'] == 'PUT', err
assert err['exception_cls'] == 'TypeError', err
@with_context
def test_task_update_state(self):
"""Test API task n_answers updates state properly."""
user = UserFactory.create()
project = ProjectFactory.create(owner=user)
task = TaskFactory.create(project=project, n_answers=1,
state='ongoing')
data = {'n_answers': 2}
datajson = json.dumps(data)
url = '/api/task/%s?api_key=%s' % (task.id, user.api_key)
res = self.app.put(url, data=datajson)
out = json.loads(res.data)
assert_equal(res.status, '200 OK', res.data)
assert_equal(task.n_answers, data['n_answers'])
assert_equal(task.state, 'ongoing')
assert task.id == out['id'], out
task.state = 'completed'
task_repo.update(task)
data = {'n_answers': 1}
datajson = json.dumps(data)
res = self.app.put(url, data=datajson)
out = json.loads(res.data)
assert_equal(res.status, '200 OK', res.data)
assert_equal(task.n_answers, data['n_answers'])
assert_equal(task.state, 'completed')
assert task.id == out['id'], out
data = {'n_answers': 5}
datajson = json.dumps(data)
res = self.app.put(url, data=datajson)
out = json.loads(res.data)
assert_equal(res.status, '200 OK', res.data)
assert_equal(task.n_answers, data['n_answers'])
assert_equal(task.state, 'ongoing')
assert task.id == out['id'], out
@with_context
def test_task_delete(self):
"""Test API task delete"""
admin = UserFactory.create()
user = UserFactory.create()
non_owner = UserFactory.create()
project = ProjectFactory.create(owner=user)
task = TaskFactory.create(project=project)
root_task = TaskFactory.create(project=project)
## anonymous
res = self.app.delete('/api/task/%s' % task.id)
error_msg = 'Anonymous should not be allowed to update'
assert_equal(res.status, '401 UNAUTHORIZED', error_msg)
### real user but not allowed as not owner!
url = '/api/task/%s?api_key=%s' % (task.id, non_owner.api_key)
res = self.app.delete(url)
error_msg = 'Should not be able to update tasks of others'
assert_equal(res.status, '403 FORBIDDEN', error_msg)
#### real user
# DELETE with not allowed args
res = self.app.delete(url + "&foo=bar")
err = json.loads(res.data)
assert res.status_code == 415, err
assert err['status'] == 'failed', err
assert err['target'] == 'task', err
assert err['action'] == 'DELETE', err
assert err['exception_cls'] == 'AttributeError', err
# DELETE returns 204
url = '/api/task/%s?api_key=%s' % (task.id, user.api_key)
res = self.app.delete(url)
assert_equal(res.status, '204 NO CONTENT', res.data)
assert res.data == '', res.data
#### root user
url = '/api/task/%s?api_key=%s' % (root_task.id, admin.api_key)
res = self.app.delete(url)
assert_equal(res.status, '204 NO CONTENT', res.data)
tasks = task_repo.filter_tasks_by(project_id=project.id)
assert task not in tasks, tasks
assert root_task not in tasks, tasks
@patch('pybossa.repositories.task_repository.uploader')
def test_task_delete_deletes_zip_files(self, uploader):
"""Test API task delete deletes also zip files with tasks and taskruns"""
admin = UserFactory.create()
project = ProjectFactory.create(owner=admin)
task = TaskFactory.create(project=project)
url = '/api/task/%s?api_key=%s' % (task.id, admin.api_key)
res = self.app.delete(url)
expected = [call('1_project1_task_json.zip', 'user_1'),
call('1_project1_task_csv.zip', 'user_1'),
call('1_project1_task_run_json.zip', 'user_1'),
call('1_project1_task_run_csv.zip', 'user_1')]
assert uploader.delete_file.call_args_list == expected
@with_context
def test_delete_task_cascade(self):
"""Test API delete task deletes associated taskruns"""
task = TaskFactory.create()
task_runs = TaskRunFactory.create_batch(3, task=task)
url = '/api/task/%s?api_key=%s' % (task.id, task.project.owner.api_key)
res = self.app.delete(url)
assert_equal(res.status, '204 NO CONTENT', res.data)
task_runs = task_repo.filter_task_runs_by(task_id=task.id)
assert len(task_runs) == 0, "There should not be any task run for task"
@with_context
def test_delete_task_when_result_associated(self):
"""Test API delete task fails when a result is associated."""
result = self.create_result()
project = project_repo.get(result.project_id)
url = '/api/task/%s?api_key=%s' % (result.task_id,
project.owner.api_key)
res = self.app.delete(url)
assert_equal(res.status, '403 FORBIDDEN', res.status)
@with_context
def test_delete_task_when_result_associated_variation(self):
"""Test API delete task fails when a result is associated after
increasing the n_answers changing its state from completed to
ongoing."""
result = self.create_result()
project = project_repo.get(result.project_id)
task = task_repo.get_task(result.task_id)
task.n_answers = 100
task_repo.update(task)
url = '/api/task/%s?api_key=%s' % (result.task_id,
project.owner.api_key)
res = self.app.delete(url)
assert_equal(res.status, '403 FORBIDDEN', res.status)
| agpl-3.0 |
paultcochrane/pyvisi | examples/basicContourExample.py | 1 | 2807 |
"""
Example of a basic contour plot
Will hopefully help me write a decent interface.
"""
import sys,os
# import the python visualisation interface
from pyvisi import *
# original vtk code
import vtk
import numarray
# generate the x and y grid data
x = numarray.arrayrange(-1, 1, stride=0.5, type='Float')
y = numarray.arrayrange(-1, 1, stride=0.5, type='Float')
# generate a matrix of repeated x values (c.f. repmat() in matlab)
xm = numarray.zeros([len(x), len(y)], type='Float')
for i in range(len(y)):
xm[:,i] = x
# generate a matrix of repeated y values (c.f. repmat() in matlab)
ym = numarray.zeros([len(x), len(y)], type='Float')
for i in range(len(x)):
ym[i,:] = y
sigma = 0.2 # the spread of the distribution
# generate the distribution
distn = numarray.exp(-(xm*xm + ym*ym)/sigma)
# convert the x data into vtkFloatArray objects
xvtk = vtk.vtkFloatArray()
for i in range(len(x)):
xvtk.InsertNextValue(x[i])
# convert the y data into vtkFloatArray objects
yvtk = vtk.vtkFloatArray()
for i in range(len(y)):
yvtk.InsertNextValue(y[i])
# convert the distribution data into vtkFloatArray objects
distnvtk = vtk.vtkFloatArray()
for i in range(len(x)):
for j in range(len(y)):
distnvtk.InsertNextValue(distn[i,j])
# make the points to put into the grid
points = vtk.vtkPoints()
values = vtk.vtkFloatArray()
count = 0
for i in xrange(len(x)):
for j in xrange(len(y)):
points.InsertPoint(count, x[i], y[j], 0)
values.InsertValue(count, distn[i,j])
count += 1
# now make the strips (whatever they are...)
#strips = vtk.vtkCellArray()
#strips.InsertNextCell(len(x)*len(y)) # number of points
#for i in xrange(len(x)*len(y)):
#strips.InsertCellPoint(i)
#strips.InsertCellPoint(0)
#strips.InsertCellPoint(1)
#strips.InsertCellPoint(7)
#strips.InsertCellPoint(6)
#strips.InsertCellPoint(2)
#strips.InsertCellPoint(3)
#strips.InsertCellPoint(5)
#strips.InsertCellPoint(4)
strips = vtk.vtkCellArray()
p2c = vtk.vtkPointDataToCellData()
p2c.SetInput(points)
# set up the polygonal data object
polyData = vtk.vtkPolyData()
polyData.SetPoints(points)
polyData.SetStrips(strips)
polyData.GetPointData().SetScalars(values)
warp = vtk.vtkWarpScalar()
warp.SetInput(polyData)
warp.SetScaleFactor(0.5)
contMapper = vtk.vtkPolyDataMapper()
contMapper.SetInput(warp.GetPolyDataOutput())
contMapper.SetScalarRange(polyData.GetScalarRange())
contActor = vtk.vtkActor()
contActor.SetMapper(contMapper)
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren.AddActor(contActor)
renWin.SetSize(400,400)
ren.SetBackground(1,1,1)
iren.Initialize()
renWin.Render()
iren.Start()
#raw_input("Press enter to continue")
# vim: expandtab shiftwidth=4:
| gpl-2.0 |
stainsteelcrown/nonsense-story-generator | venv/lib/python2.7/site-packages/pip/vcs/__init__.py | 536 | 8748 | """Handles all VCS (version control) support"""
import os
import shutil
from pip.backwardcompat import urlparse, urllib
from pip.log import logger
from pip.util import (display_path, backup_dir, find_command,
rmtree, ask_path_exists)
__all__ = ['vcs', 'get_src_requirement']
class VcsSupport(object):
_registry = {}
schemes = ['ssh', 'git', 'hg', 'bzr', 'sftp', 'svn']
def __init__(self):
# Register more schemes with urlparse for various version control systems
urlparse.uses_netloc.extend(self.schemes)
# Python >= 2.7.4, 3.3 doesn't have uses_fragment
if getattr(urlparse, 'uses_fragment', None):
urlparse.uses_fragment.extend(self.schemes)
super(VcsSupport, self).__init__()
def __iter__(self):
return self._registry.__iter__()
@property
def backends(self):
return list(self._registry.values())
@property
def dirnames(self):
return [backend.dirname for backend in self.backends]
@property
def all_schemes(self):
schemes = []
for backend in self.backends:
schemes.extend(backend.schemes)
return schemes
def register(self, cls):
if not hasattr(cls, 'name'):
logger.warn('Cannot register VCS %s' % cls.__name__)
return
if cls.name not in self._registry:
self._registry[cls.name] = cls
def unregister(self, cls=None, name=None):
if name in self._registry:
del self._registry[name]
elif cls in self._registry.values():
del self._registry[cls.name]
else:
logger.warn('Cannot unregister because no class or name given')
def get_backend_name(self, location):
"""
Return the name of the version control backend if found at given
location, e.g. vcs.get_backend_name('/path/to/vcs/checkout')
"""
for vc_type in self._registry.values():
path = os.path.join(location, vc_type.dirname)
if os.path.exists(path):
return vc_type.name
return None
def get_backend(self, name):
name = name.lower()
if name in self._registry:
return self._registry[name]
def get_backend_from_location(self, location):
vc_type = self.get_backend_name(location)
if vc_type:
return self.get_backend(vc_type)
return None
vcs = VcsSupport()
class VersionControl(object):
name = ''
dirname = ''
def __init__(self, url=None, *args, **kwargs):
self.url = url
self._cmd = None
super(VersionControl, self).__init__(*args, **kwargs)
def _filter(self, line):
return (logger.INFO, line)
def _is_local_repository(self, repo):
"""
posix absolute paths start with os.path.sep,
win32 ones ones start with drive (like c:\\folder)
"""
drive, tail = os.path.splitdrive(repo)
return repo.startswith(os.path.sep) or drive
@property
def cmd(self):
if self._cmd is not None:
return self._cmd
command = find_command(self.name)
logger.info('Found command %r at %r' % (self.name, command))
self._cmd = command
return command
def get_url_rev(self):
"""
Returns the correct repository URL and revision by parsing the given
repository URL
"""
error_message = (
"Sorry, '%s' is a malformed VCS url. "
"The format is <vcs>+<protocol>://<url>, "
"e.g. svn+http://myrepo/svn/MyApp#egg=MyApp")
assert '+' in self.url, error_message % self.url
url = self.url.split('+', 1)[1]
scheme, netloc, path, query, frag = urlparse.urlsplit(url)
rev = None
if '@' in path:
path, rev = path.rsplit('@', 1)
url = urlparse.urlunsplit((scheme, netloc, path, query, ''))
return url, rev
def get_info(self, location):
"""
Returns (url, revision), where both are strings
"""
assert not location.rstrip('/').endswith(self.dirname), 'Bad directory: %s' % location
return self.get_url(location), self.get_revision(location)
def normalize_url(self, url):
"""
Normalize a URL for comparison by unquoting it and removing any trailing slash.
"""
return urllib.unquote(url).rstrip('/')
def compare_urls(self, url1, url2):
"""
Compare two repo URLs for identity, ignoring incidental differences.
"""
return (self.normalize_url(url1) == self.normalize_url(url2))
def parse_vcs_bundle_file(self, content):
"""
Takes the contents of the bundled text file that explains how to revert
the stripped off version control data of the given package and returns
the URL and revision of it.
"""
raise NotImplementedError
def obtain(self, dest):
"""
Called when installing or updating an editable package, takes the
source path of the checkout.
"""
raise NotImplementedError
def switch(self, dest, url, rev_options):
"""
Switch the repo at ``dest`` to point to ``URL``.
"""
raise NotImplemented
def update(self, dest, rev_options):
"""
Update an already-existing repo to the given ``rev_options``.
"""
raise NotImplementedError
def check_destination(self, dest, url, rev_options, rev_display):
"""
Prepare a location to receive a checkout/clone.
Return True if the location is ready for (and requires) a
checkout/clone, False otherwise.
"""
checkout = True
prompt = False
if os.path.exists(dest):
checkout = False
if os.path.exists(os.path.join(dest, self.dirname)):
existing_url = self.get_url(dest)
if self.compare_urls(existing_url, url):
logger.info('%s in %s exists, and has correct URL (%s)' %
(self.repo_name.title(), display_path(dest),
url))
logger.notify('Updating %s %s%s' %
(display_path(dest), self.repo_name,
rev_display))
self.update(dest, rev_options)
else:
logger.warn('%s %s in %s exists with URL %s' %
(self.name, self.repo_name,
display_path(dest), existing_url))
prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ',
('s', 'i', 'w', 'b'))
else:
logger.warn('Directory %s already exists, '
'and is not a %s %s.' %
(dest, self.name, self.repo_name))
prompt = ('(i)gnore, (w)ipe, (b)ackup ', ('i', 'w', 'b'))
if prompt:
logger.warn('The plan is to install the %s repository %s' %
(self.name, url))
response = ask_path_exists('What to do? %s' % prompt[0],
prompt[1])
if response == 's':
logger.notify('Switching %s %s to %s%s' %
(self.repo_name, display_path(dest), url,
rev_display))
self.switch(dest, url, rev_options)
elif response == 'i':
# do nothing
pass
elif response == 'w':
logger.warn('Deleting %s' % display_path(dest))
rmtree(dest)
checkout = True
elif response == 'b':
dest_dir = backup_dir(dest)
logger.warn('Backing up %s to %s'
% (display_path(dest), dest_dir))
shutil.move(dest, dest_dir)
checkout = True
return checkout
def unpack(self, location):
if os.path.exists(location):
rmtree(location)
self.obtain(location)
def get_src_requirement(self, dist, location, find_tags=False):
raise NotImplementedError
def get_src_requirement(dist, location, find_tags):
version_control = vcs.get_backend_from_location(location)
if version_control:
return version_control().get_src_requirement(dist, location, find_tags)
logger.warn('cannot determine version of editable source in %s (is not SVN checkout, Git clone, Mercurial clone or Bazaar branch)' % location)
return dist.as_requirement()
| mit |
blondegeek/pymatgen | pymatgen/io/abinit/works.py | 2 | 74708 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Works for Abinit
"""
import os
import shutil
import time
import abc
import collections
import numpy as np
import copy
from monty.collections import AttrDict
from monty.itertools import chunks
from monty.functools import lazy_property
from monty.fnmatch import WildCard
from pydispatch import dispatcher
from pymatgen.core.units import EnergyArray
from . import wrappers
from .nodes import Dependency, Node, NodeError, NodeResults, FileNode, check_spectator
from .tasks import (Task, AbinitTask, ScfTask, NscfTask, DfptTask, PhononTask, ElasticTask, DdkTask,
BseTask, RelaxTask, DdeTask, BecTask, ScrTask, SigmaTask, TaskManager,
DteTask, EphTask, CollinearThenNonCollinearScfTask)
from .utils import Directory
from .netcdf import ETSF_Reader, NetcdfReader
from .abitimer import AbinitTimerParser
import logging
logger = logging.getLogger(__name__)
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__all__ = [
"Work",
"BandStructureWork",
"RelaxWork",
"G0W0Work",
"QptdmWork",
"SigmaConvWork",
"BseMdfWork",
"PhononWork",
"PhononWfkqWork",
"GKKPWork",
"BecWork",
"DteWork",
]
class WorkResults(NodeResults):
JSON_SCHEMA = NodeResults.JSON_SCHEMA.copy()
@classmethod
def from_node(cls, work):
"""Initialize an instance from a :class:`Work` instance."""
new = super().from_node(work)
# Will put all files found in outdir in GridFs
# Warning: assuming binary files.
d = {os.path.basename(f): f for f in work.outdir.list_filepaths()}
new.register_gridfs_files(**d)
return new
class WorkError(NodeError):
"""Base class for the exceptions raised by Work objects."""
class BaseWork(Node, metaclass=abc.ABCMeta):
Error = WorkError
Results = WorkResults
# interface modeled after subprocess.Popen
@property
@abc.abstractmethod
def processes(self):
"""Return a list of objects that support the `subprocess.Popen` protocol."""
def poll(self):
"""
Check if all child processes have terminated. Set and return returncode attribute.
"""
return [task.poll() for task in self]
def wait(self):
"""
Wait for child processed to terminate. Set and return returncode attribute.
"""
return [task.wait() for task in self]
def communicate(self, input=None):
"""
Interact with processes: Send data to stdin. Read data from stdout and
stderr, until end-of-file is reached.
Wait for process to terminate. The optional input argument should be a
string to be sent to the child processed, or None, if no data should be
sent to the children.
communicate() returns a list of tuples (stdoutdata, stderrdata).
"""
return [task.communicate(input) for task in self]
@property
def returncodes(self):
"""
The children return codes, set by poll() and wait() (and indirectly by communicate()).
A None value indicates that the process hasn't terminated yet.
A negative value -N indicates that the child was terminated by signal N (Unix only).
"""
return [task.returncode for task in self]
@property
def ncores_reserved(self):
"""
Returns the number of cores reserved in this moment.
A core is reserved if it's still not running but
we have submitted the task to the queue manager.
"""
return sum(task.manager.num_cores for task in self if task.status == task.S_SUB)
@property
def ncores_allocated(self):
"""
Returns the number of CPUs allocated in this moment.
A core is allocated if it's running a task or if we have
submitted a task to the queue manager but the job is still pending.
"""
return sum(task.manager.num_cores for task in self if task.status in [task.S_SUB, task.S_RUN])
@property
def ncores_used(self):
"""
Returns the number of cores used in this moment.
A core is used if there's a job that is running on it.
"""
return sum(task.manager.num_cores for task in self if task.status == task.S_RUN)
def fetch_task_to_run(self):
"""
Returns the first task that is ready to run or
None if no task can be submitted at present"
Raises:
`StopIteration` if all tasks are done.
"""
# All the tasks are done so raise an exception
# that will be handled by the client code.
if all(task.is_completed for task in self):
raise StopIteration("All tasks completed.")
for task in self:
if task.can_run:
return task
# No task found, this usually happens when we have dependencies.
# Beware of possible deadlocks here!
logger.warning("Possible deadlock in fetch_task_to_run!")
return None
def fetch_alltasks_to_run(self):
"""
Returns a list with all the tasks that can be submitted.
Empty list if not task has been found.
"""
return [task for task in self if task.can_run]
@abc.abstractmethod
def setup(self, *args, **kwargs):
"""Method called before submitting the calculations."""
def _setup(self, *args, **kwargs):
self.setup(*args, **kwargs)
def connect_signals(self):
"""
Connect the signals within the work.
The :class:`Work` is responsible for catching the important signals raised from
its task and raise new signals when some particular condition occurs.
"""
for task in self:
dispatcher.connect(self.on_ok, signal=task.S_OK, sender=task)
def disconnect_signals(self):
"""
Disable the signals within the work. This function reverses the process of `connect_signals`
"""
for task in self:
try:
dispatcher.disconnect(self.on_ok, signal=task.S_OK, sender=task)
except dispatcher.errors.DispatcherKeyError as exc:
logger.debug(str(exc))
@property
def all_ok(self):
return all(task.status == task.S_OK for task in self)
#@check_spectator
def on_ok(self, sender):
"""
This callback is called when one task reaches status `S_OK`.
It executes on_all_ok when all tasks in self have reached `S_OK`.
"""
logger.debug("in on_ok with sender %s" % sender)
if self.all_ok:
if self.finalized:
return AttrDict(returncode=0, message="Work has been already finalized")
else:
# Set finalized here, because on_all_ok might change it (e.g. Relax + EOS in a single work)
self.finalized = True
try:
results = AttrDict(**self.on_all_ok())
except Exception as exc:
self.history.critical("on_all_ok raises %s" % str(exc))
self.finalized = False
raise
# Signal to possible observers that the `Work` reached S_OK
self.history.info("Work %s is finalized and broadcasts signal S_OK" % str(self))
if self._finalized:
self.send_signal(self.S_OK)
return results
return AttrDict(returncode=1, message="Not all tasks are OK!")
#@check_spectator
def on_all_ok(self):
"""
This method is called once the `Work` is completed i.e. when all tasks
have reached status S_OK. Subclasses should provide their own implementation
Returns:
Dictionary that must contain at least the following entries:
returncode:
0 on success.
message:
a string that should provide a human-readable description of what has been performed.
"""
return dict(returncode=0, message="Calling on_all_ok of the base class!")
def get_results(self, **kwargs):
"""
Method called once the calculations are completed.
The base version returns a dictionary task_name: TaskResults for each task in self.
"""
results = self.Results.from_node(self)
return results
def get_graphviz(self, engine="automatic", graph_attr=None, node_attr=None, edge_attr=None):
"""
Generate task graph in the DOT language (only parents and children of this work).
Args:
engine: Layout command used. ['dot', 'neato', 'twopi', 'circo', 'fdp', 'sfdp', 'patchwork', 'osage']
graph_attr: Mapping of (attribute, value) pairs for the graph.
node_attr: Mapping of (attribute, value) pairs set for all nodes.
edge_attr: Mapping of (attribute, value) pairs set for all edges.
Returns: graphviz.Digraph <https://graphviz.readthedocs.io/en/stable/api.html#digraph>
"""
from graphviz import Digraph
fg = Digraph("work", #filename="work_%s.gv" % os.path.basename(self.workdir),
engine="fdp" if engine == "automatic" else engine)
# Set graph attributes.
# https://www.graphviz.org/doc/info/
#fg.attr(label="%s@%s" % (self.__class__.__name__, self.relworkdir))
fg.attr(label=repr(self))
#fg.attr(fontcolor="white", bgcolor='purple:pink')
fg.attr(rankdir="LR", pagedir="BL")
#fg.attr(constraint="false", pack="true", packMode="clust")
fg.node_attr.update(color='lightblue2', style='filled')
#fg.node_attr.update(ranksep='equally')
# Add input attributes.
if graph_attr is not None:
fg.graph_attr.update(**graph_attr)
if node_attr is not None:
fg.node_attr.update(**node_attr)
if edge_attr is not None:
fg.edge_attr.update(**edge_attr)
def node_kwargs(node):
return dict(
#shape="circle",
color=node.color_hex,
label=(str(node) if not hasattr(node, "pos_str") else
node.pos_str + "\n" + node.__class__.__name__),
)
edge_kwargs = dict(arrowType="vee", style="solid")
cluster_kwargs = dict(rankdir="LR", pagedir="BL", style="rounded", bgcolor="azure2")
# Build cluster with tasks in *this* work
cluster_name = "cluster%s" % self.name
with fg.subgraph(name=cluster_name) as wg:
wg.attr(**cluster_kwargs)
wg.attr(label="%s (%s)" % (self.__class__.__name__, self.name))
for task in self:
wg.node(task.name, **node_kwargs(task))
# Connect task to children
for child in task.get_children():
# Test if child is in this cluster (self).
myg = wg if child in self else fg
myg.node(child.name, **node_kwargs(child))
# Find file extensions required by this task
i = [dep.node for dep in child.deps].index(task)
edge_label = "+".join(child.deps[i].exts)
myg.edge(task.name, child.name, label=edge_label, color=task.color_hex,
**edge_kwargs)
# Connect task to parents
for parent in task.get_parents():
# Test if parent is in this cluster (self).
myg = wg if parent in self else fg
myg.node(parent.name, **node_kwargs(parent))
# Find file extensions required by this task
i = [dep.node for dep in task.deps].index(parent)
edge_label = "+".join(task.deps[i].exts)
myg.edge(parent.name, task.name, label=edge_label, color=parent.color_hex,
**edge_kwargs)
# Treat the case in which we have a work producing output for tasks in *this* work.
#for work in self.flow:
# children = work.get_children()
# if not children or all(child not in self for child in children):
# continue
# cluster_name = "cluster%s" % work.name
# seen = set()
# for child in children:
# if child not in self: continue
# # This is not needed, too much confusing
# #fg.edge(cluster_name, child.name, color=work.color_hex, **edge_kwargs)
# # Find file extensions required by work
# i = [dep.node for dep in child.deps].index(work)
# for ext in child.deps[i].exts:
# out = "%s (%s)" % (ext, work.name)
# fg.node(out)
# fg.edge(out, child.name, **edge_kwargs)
# key = (cluster_name, out)
# if key not in seen:
# fg.edge(cluster_name, out, color=work.color_hex, **edge_kwargs)
# seen.add(key)
return fg
class NodeContainer(metaclass=abc.ABCMeta):
"""
Mixin classes for `Work` and `Flow` objects providing helper functions
to register tasks in the container. The helper function call the
`register` method of the container.
"""
# TODO: Abstract protocol for containers
@abc.abstractmethod
def register_task(self, *args, **kwargs):
"""
Register a task in the container.
"""
# TODO: shall flow.register_task return a Task or a Work?
# Helper functions
def register_scf_task(self, *args, **kwargs):
"""Register a Scf task."""
kwargs["task_class"] = ScfTask
return self.register_task(*args, **kwargs)
def register_collinear_then_noncollinear_scf_task(self, *args, **kwargs):
"""Register a Scf task that perform a SCF run first with nsppol = 2 and then nspinor = 2"""
kwargs["task_class"] = CollinearThenNonCollinearScfTask
return self.register_task(*args, **kwargs)
def register_nscf_task(self, *args, **kwargs):
"""Register a nscf task."""
kwargs["task_class"] = NscfTask
return self.register_task(*args, **kwargs)
def register_relax_task(self, *args, **kwargs):
"""Register a task for structural optimization."""
kwargs["task_class"] = RelaxTask
return self.register_task(*args, **kwargs)
def register_phonon_task(self, *args, **kwargs):
"""Register a phonon task."""
kwargs["task_class"] = PhononTask
return self.register_task(*args, **kwargs)
def register_elastic_task(self, *args, **kwargs):
"""Register an elastic task."""
kwargs["task_class"] = ElasticTask
return self.register_task(*args, **kwargs)
def register_ddk_task(self, *args, **kwargs):
"""Register a ddk task."""
kwargs["task_class"] = DdkTask
return self.register_task(*args, **kwargs)
def register_scr_task(self, *args, **kwargs):
"""Register a screening task."""
kwargs["task_class"] = ScrTask
return self.register_task(*args, **kwargs)
def register_sigma_task(self, *args, **kwargs):
"""Register a sigma task."""
kwargs["task_class"] = SigmaTask
return self.register_task(*args, **kwargs)
def register_dde_task(self, *args, **kwargs):
"""Register a Dde task."""
kwargs["task_class"] = DdeTask
return self.register_task(*args, **kwargs)
def register_dte_task(self, *args, **kwargs):
"""Register a Dte task."""
kwargs["task_class"] = DteTask
return self.register_task(*args, **kwargs)
def register_bec_task(self, *args, **kwargs):
"""Register a BEC task."""
kwargs["task_class"] = BecTask
return self.register_task(*args, **kwargs)
def register_bse_task(self, *args, **kwargs):
"""Register a Bethe-Salpeter task."""
kwargs["task_class"] = BseTask
return self.register_task(*args, **kwargs)
def register_eph_task(self, *args, **kwargs):
"""Register an electron-phonon task."""
kwargs["task_class"] = EphTask
return self.register_task(*args, **kwargs)
def walknset_vars(self, task_class=None, *args, **kwargs):
"""
Set the values of the ABINIT variables in the input files of the nodes
Args:
task_class: If not None, only the input files of the tasks belonging
to class `task_class` are modified.
Example:
flow.walknset_vars(ecut=10, kptopt=4)
"""
def change_task(task):
if task_class is not None and task.__class__ is not task_class: return False
return True
if self.is_work:
for task in self:
if not change_task(task): continue
task.set_vars(*args, **kwargs)
elif self.is_flow:
for task in self.iflat_tasks():
if not change_task(task): continue
task.set_vars(*args, **kwargs)
else:
raise TypeError("Don't know how to set variables for object class %s" % self.__class__.__name__)
class Work(BaseWork, NodeContainer):
"""
A Work is a list of (possibly connected) tasks.
"""
def __init__(self, workdir=None, manager=None):
"""
Args:
workdir: Path to the working directory.
manager: :class:`TaskManager` object.
"""
super().__init__()
self._tasks = []
if workdir is not None:
self.set_workdir(workdir)
if manager is not None:
self.set_manager(manager)
def set_manager(self, manager):
"""Set the :class:`TaskManager` to use to launch the :class:`Task`."""
self.manager = manager.deepcopy()
for task in self:
task.set_manager(manager)
@property
def flow(self):
"""The flow containing this :class:`Work`."""
return self._flow
def set_flow(self, flow):
"""Set the flow associated to this :class:`Work`."""
if not hasattr(self, "_flow"):
self._flow = flow
else:
if self._flow != flow:
raise ValueError("self._flow != flow")
@lazy_property
def pos(self):
"""The position of self in the :class:`Flow`"""
for i, work in enumerate(self.flow):
if self == work:
return i
raise ValueError("Cannot find the position of %s in flow %s" % (self, self.flow))
@property
def pos_str(self):
"""String representation of self.pos"""
return "w" + str(self.pos)
def set_workdir(self, workdir, chroot=False):
"""Set the working directory. Cannot be set more than once unless chroot is True"""
if not chroot and hasattr(self, "workdir") and self.workdir != workdir:
raise ValueError("self.workdir != workdir: %s, %s" % (self.workdir, workdir))
self.workdir = os.path.abspath(workdir)
# Directories with (input|output|temporary) data.
# The work will use these directories to connect
# itself to other works and/or to produce new data
# that will be used by its children.
self.indir = Directory(os.path.join(self.workdir, "indata"))
self.outdir = Directory(os.path.join(self.workdir, "outdata"))
self.tmpdir = Directory(os.path.join(self.workdir, "tmpdata"))
self.wdir = Directory(self.workdir)
def chroot(self, new_workdir):
self.set_workdir(new_workdir, chroot=True)
for i, task in enumerate(self):
new_tdir = os.path.join(self.workdir, "t" + str(i))
task.set_workdir(new_tdir, chroot=True)
def __len__(self):
return len(self._tasks)
def __iter__(self):
return self._tasks.__iter__()
def __getitem__(self, slice):
return self._tasks[slice]
def chunks(self, chunk_size):
"""Yield successive chunks of tasks of lenght chunk_size."""
for tasks in chunks(self, chunk_size):
yield tasks
def opath_from_ext(self, ext):
"""
Returns the path of the output file with extension ext.
Use it when the file does not exist yet.
"""
return self.indir.path_in("in_" + ext)
def opath_from_ext(self, ext):
"""
Returns the path of the output file with extension ext.
Use it when the file does not exist yet.
"""
return self.outdir.path_in("out_" + ext)
@property
def processes(self):
return [task.process for task in self]
@property
def all_done(self):
"""True if all the :class:`Task` objects in the :class:`Work` are done."""
return all(task.status >= task.S_DONE for task in self)
@property
def isnc(self):
"""True if norm-conserving calculation."""
return all(task.isnc for task in self)
@property
def ispaw(self):
"""True if PAW calculation."""
return all(task.ispaw for task in self)
@property
def status_counter(self):
"""
Returns a `Counter` object that counts the number of task with
given status (use the string representation of the status as key).
"""
counter = collections.Counter()
for task in self:
counter[str(task.status)] += 1
return counter
def allocate(self, manager=None):
"""
This function is called once we have completed the initialization
of the :class:`Work`. It sets the manager of each task (if not already done)
and defines the working directories of the tasks.
Args:
manager: :class:`TaskManager` object or None
"""
for i, task in enumerate(self):
if not hasattr(task, "manager"):
# Set the manager
# Use the one provided in input else the one of the work/flow.
if manager is not None:
task.set_manager(manager)
else:
# Look first in work and then in the flow.
if hasattr(self, "manager"):
task.set_manager(self.manager)
else:
task.set_manager(self.flow.manager)
task_workdir = os.path.join(self.workdir, "t" + str(i))
if not hasattr(task, "workdir"):
task.set_workdir(task_workdir)
else:
if task.workdir != task_workdir:
raise ValueError("task.workdir != task_workdir: %s, %s" % (task.workdir, task_workdir))
def register(self, obj, deps=None, required_files=None, manager=None, task_class=None):
"""
Registers a new :class:`Task` and add it to the internal list, taking into account possible dependencies.
Args:
obj: :class:`AbinitInput` instance or `Task` object.
deps: Dictionary specifying the dependency of this node or list of dependencies
None means that this obj has no dependency.
required_files: List of strings with the path of the files used by the task.
Note that the files must exist when the task is registered.
Use the standard approach based on Works, Tasks and deps
if the files will be produced in the future.
manager:
The :class:`TaskManager` responsible for the submission of the task. If manager is None, we use
the `TaskManager` specified during the creation of the :class:`Work`.
task_class: Task subclass to instantiate. Default: :class:`AbinitTask`
Returns:
:class:`Task` object
"""
task_workdir = None
if hasattr(self, "workdir"):
task_workdir = os.path.join(self.workdir, "t" + str(len(self)))
if isinstance(obj, Task):
task = obj
else:
# Set the class
if task_class is None:
task_class = AbinitTask
task = task_class.from_input(obj, task_workdir, manager)
self._tasks.append(task)
# Handle possible dependencies given either as dict or list.
if deps is not None:
if hasattr(deps, "items"):
deps = [Dependency(node, exts) for node, exts in deps.items()]
task.add_deps(deps)
# Handle possible dependencies.
if required_files is not None:
task.add_required_files(required_files)
return task
# Needed by NodeContainer
register_task = register
def path_in_workdir(self, filename):
"""Create the absolute path of filename in the working directory."""
return os.path.join(self.workdir, filename)
def setup(self, *args, **kwargs):
"""
Method called before running the calculations.
The default implementation is empty.
"""
def build(self, *args, **kwargs):
"""Creates the top level directory."""
# Create the directories of the work.
self.indir.makedirs()
self.outdir.makedirs()
self.tmpdir.makedirs()
# Build dirs and files of each task.
for task in self:
task.build(*args, **kwargs)
# Connect signals within the work.
self.connect_signals()
@property
def status(self):
"""
Returns the status of the work i.e. the minimum of the status of the tasks.
"""
return self.get_all_status(only_min=True)
def get_all_status(self, only_min=False):
"""
Returns a list with the status of the tasks in self.
Args:
only_min: If True, the minimum of the status is returned.
"""
if len(self) == 0:
# The work will be created in the future.
if only_min:
return self.S_INIT
else:
return [self.S_INIT]
self.check_status()
status_list = [task.status for task in self]
if only_min:
return min(status_list)
else:
return status_list
def check_status(self):
"""Check the status of the tasks."""
# Recompute the status of the tasks
# Ignore OK and LOCKED tasks.
for task in self:
if task.status in (task.S_OK, task.S_LOCKED): continue
task.check_status()
# Take into account possible dependencies. Use a list instead of generators
for task in self:
if task.status == task.S_LOCKED: continue
if task.status < task.S_SUB and all(status == task.S_OK for status in task.deps_status):
task.set_status(task.S_READY, "Status set to Ready")
def rmtree(self, exclude_wildcard=""):
"""
Remove all files and directories in the working directory
Args:
exclude_wildcard: Optional string with regular expressions separated by `|`.
Files matching one of the regular expressions will be preserved.
example: exclude_wildard="*.nc|*.txt" preserves all the files
whose extension is in ["nc", "txt"].
"""
if not exclude_wildcard:
shutil.rmtree(self.workdir)
else:
w = WildCard(exclude_wildcard)
for dirpath, dirnames, filenames in os.walk(self.workdir):
for fname in filenames:
path = os.path.join(dirpath, fname)
if not w.match(fname):
os.remove(path)
def rm_indatadir(self):
"""Remove all the indata directories."""
for task in self:
task.rm_indatadir()
def rm_outdatadir(self):
"""Remove all the indata directories."""
for task in self:
task.rm_outatadir()
def rm_tmpdatadir(self):
"""Remove all the tmpdata directories."""
for task in self:
task.rm_tmpdatadir()
def move(self, dest, isabspath=False):
"""
Recursively move self.workdir to another location. This is similar to the Unix "mv" command.
The destination path must not already exist. If the destination already exists
but is not a directory, it may be overwritten depending on os.rename() semantics.
Be default, dest is located in the parent directory of self.workdir, use isabspath=True
to specify an absolute path.
"""
if not isabspath:
dest = os.path.join(os.path.dirname(self.workdir), dest)
shutil.move(self.workdir, dest)
def submit_tasks(self, wait=False):
"""
Submits the task in self and wait.
TODO: change name.
"""
for task in self:
task.start()
if wait:
for task in self: task.wait()
def start(self, *args, **kwargs):
"""
Start the work. Calls build and _setup first, then submit the tasks.
Non-blocking call unless wait is set to True
"""
wait = kwargs.pop("wait", False)
# Initial setup
self._setup(*args, **kwargs)
# Build dirs and files.
self.build(*args, **kwargs)
# Submit tasks (does not block)
self.submit_tasks(wait=wait)
def read_etotals(self, unit="Ha"):
"""
Reads the total energy from the GSR file produced by the task.
Return a numpy array with the total energies in Hartree
The array element is set to np.inf if an exception is raised while reading the GSR file.
"""
if not self.all_done:
raise self.Error("Some task is still in running/submitted state")
etotals = []
for task in self:
# Open the GSR file and read etotal (Hartree)
gsr_path = task.outdir.has_abiext("GSR")
etot = np.inf
if gsr_path:
with ETSF_Reader(gsr_path) as r:
etot = r.read_value("etotal")
etotals.append(etot)
return EnergyArray(etotals, "Ha").to(unit)
def parse_timers(self):
"""
Parse the TIMER section reported in the ABINIT output files.
Returns:
:class:`AbinitTimerParser` object
"""
filenames = list(filter(os.path.exists, [task.output_file.path for task in self]))
parser = AbinitTimerParser()
parser.parse(filenames)
return parser
class BandStructureWork(Work):
"""Work for band structure calculations."""
def __init__(self, scf_input, nscf_input, dos_inputs=None, workdir=None, manager=None):
"""
Args:
scf_input: Input for the SCF run
nscf_input: Input for the NSCF run defining the band structure calculation.
dos_inputs: Input(s) for the DOS. DOS is computed only if dos_inputs is not None.
workdir: Working directory.
manager: :class:`TaskManager` object.
"""
super().__init__(workdir=workdir, manager=manager)
# Register the GS-SCF run.
self.scf_task = self.register_scf_task(scf_input)
# Register the NSCF run and its dependency.
self.nscf_task = self.register_nscf_task(nscf_input, deps={self.scf_task: "DEN"})
# Add DOS computation(s) if requested.
self.dos_tasks = []
if dos_inputs is not None:
if not isinstance(dos_inputs, (list, tuple)):
dos_inputs = [dos_inputs]
for dos_input in dos_inputs:
dos_task = self.register_nscf_task(dos_input, deps={self.scf_task: "DEN"})
self.dos_tasks.append(dos_task)
def plot_ebands(self, **kwargs):
"""
Plot the band structure. kwargs are passed to the plot method of :class:`ElectronBands`.
Returns:
`matplotlib` figure
"""
with self.nscf_task.open_gsr() as gsr:
return gsr.ebands.plot(**kwargs)
def plot_ebands_with_edos(self, dos_pos=0, method="gaussian", step=0.01, width=0.1, **kwargs):
"""
Plot the band structure and the DOS.
Args:
dos_pos: Index of the task from which the DOS should be obtained (note: 0 refers to the first DOS task).
method: String defining the method for the computation of the DOS.
step: Energy step (eV) of the linear mesh.
width: Standard deviation (eV) of the gaussian.
kwargs: Keyword arguments passed to `plot_with_edos` method to customize the plot.
Returns:
`matplotlib` figure.
"""
with self.nscf_task.open_gsr() as gsr:
gs_ebands = gsr.ebands
with self.dos_tasks[dos_pos].open_gsr() as gsr:
dos_ebands = gsr.ebands
edos = dos_ebands.get_edos(method=method, step=step, width=width)
return gs_ebands.plot_with_edos(edos, **kwargs)
def plot_edoses(self, dos_pos=None, method="gaussian", step=0.01, width=0.1, **kwargs):
"""
Plot the band structure and the DOS.
Args:
dos_pos: Index of the task from which the DOS should be obtained.
None is all DOSes should be displayed. Accepts integer or list of integers.
method: String defining the method for the computation of the DOS.
step: Energy step (eV) of the linear mesh.
width: Standard deviation (eV) of the gaussian.
kwargs: Keyword arguments passed to `plot` method to customize the plot.
Returns:
`matplotlib` figure.
"""
if dos_pos is not None and not isinstance(dos_pos, (list, tuple)): dos_pos = [dos_pos]
from abipy.electrons.ebands import ElectronDosPlotter
plotter = ElectronDosPlotter()
for i, task in enumerate(self.dos_tasks):
if dos_pos is not None and i not in dos_pos: continue
with task.open_gsr() as gsr:
edos = gsr.ebands.get_edos(method=method, step=step, width=width)
ngkpt = task.get_inpvar("ngkpt")
plotter.add_edos("ngkpt %s" % str(ngkpt), edos)
return plotter.combiplot(**kwargs)
class RelaxWork(Work):
"""
Work for structural relaxations. The first task relaxes the atomic position
while keeping the unit cell parameters fixed. The second task uses the final
structure to perform a structural relaxation in which both the atomic positions
and the lattice parameters are optimized.
"""
def __init__(self, ion_input, ioncell_input, workdir=None, manager=None, target_dilatmx=None):
"""
Args:
ion_input: Input for the relaxation of the ions (cell is fixed)
ioncell_input: Input for the relaxation of the ions and the unit cell.
workdir: Working directory.
manager: :class:`TaskManager` object.
"""
super().__init__(workdir=workdir, manager=manager)
self.ion_task = self.register_relax_task(ion_input)
# Note:
# 1) It would be nice to restart from the WFK file but ABINIT crashes due to the
# different unit cell parameters if paral_kgb == 1
#paral_kgb = ion_input[0]["paral_kgb"]
#if paral_kgb == 1:
#deps = {self.ion_task: "WFK"} # --> FIXME: Problem in rwwf
#deps = {self.ion_task: "DEN"}
deps = None
self.ioncell_task = self.register_relax_task(ioncell_input, deps=deps)
# Lock ioncell_task as ion_task should communicate to ioncell_task that
# the calculation is OK and pass the final structure.
self.ioncell_task.lock(source_node=self)
self.transfer_done = False
self.target_dilatmx = target_dilatmx
#@check_spectator
def on_ok(self, sender):
"""
This callback is called when one task reaches status S_OK.
If sender == self.ion_task, we update the initial structure
used by self.ioncell_task and we unlock it so that the job can be submitted.
"""
logger.debug("in on_ok with sender %s" % sender)
if sender == self.ion_task and not self.transfer_done:
# Get the relaxed structure from ion_task
ion_structure = self.ion_task.get_final_structure()
# Transfer it to the ioncell task (we do it only once).
self.ioncell_task._change_structure(ion_structure)
self.transfer_done = True
# Unlock ioncell_task so that we can submit it.
self.ioncell_task.unlock(source_node=self)
elif sender == self.ioncell_task and self.target_dilatmx:
actual_dilatmx = self.ioncell_task.get_inpvar('dilatmx', 1.)
if self.target_dilatmx < actual_dilatmx:
self.ioncell_task.reduce_dilatmx(target=self.target_dilatmx)
self.history.info('Converging dilatmx. Value reduce from {} to {}.'
.format(actual_dilatmx, self.ioncell_task.get_inpvar('dilatmx')))
self.ioncell_task.reset_from_scratch()
return super().on_ok(sender)
def plot_ion_relaxation(self, **kwargs):
"""
Plot the history of the ion-cell relaxation.
kwargs are passed to the plot method of :class:`HistFile`
Return `matplotlib` figure or None if hist file is not found.
"""
with self.ion_task.open_hist() as hist:
return hist.plot(**kwargs) if hist else None
def plot_ioncell_relaxation(self, **kwargs):
"""
Plot the history of the ion-cell relaxation.
kwargs are passed to the plot method of :class:`HistFile`
Return `matplotlib` figure or None if hist file is not found.
"""
with self.ioncell_task.open_hist() as hist:
return hist.plot(**kwargs) if hist else None
class G0W0Work(Work):
"""
Work for general G0W0 calculations.
All input can be either single inputs or lists of inputs
"""
def __init__(self, scf_inputs, nscf_inputs, scr_inputs, sigma_inputs,
workdir=None, manager=None):
"""
Args:
scf_inputs: Input(s) for the SCF run, if it is a list add all but only link
to the last input (used for convergence studies on the KS band gap)
nscf_inputs: Input(s) for the NSCF run, if it is a list add all but only
link to the last (i.e. addditiona DOS and BANDS)
scr_inputs: Input for the screening run
sigma_inputs: List of :class:AbinitInput`for the self-energy run.
if scr and sigma are lists of the same length, every sigma gets its own screening.
if there is only one screening all sigma inputs are linked to this one
workdir: Working directory of the calculation.
manager: :class:`TaskManager` object.
"""
super().__init__(workdir=workdir, manager=manager)
spread_scr = (isinstance(sigma_inputs, (list, tuple)) and
isinstance(scr_inputs, (list, tuple)) and
len(sigma_inputs) == len(scr_inputs))
#print("spread_scr", spread_scr)
self.sigma_tasks = []
# Register the GS-SCF run.
# register all scf_inputs but link the nscf only the last scf in the list
# multiple scf_inputs can be provided to perform convergence studies
if isinstance(scf_inputs, (list, tuple)):
for scf_input in scf_inputs:
self.scf_task = self.register_scf_task(scf_input)
else:
self.scf_task = self.register_scf_task(scf_inputs)
# Register the NSCF run (s).
if isinstance(nscf_inputs, (list, tuple)):
for nscf_input in nscf_inputs:
self.nscf_task = nscf_task = self.register_nscf_task(nscf_input, deps={self.scf_task: "DEN"})
else:
self.nscf_task = nscf_task = self.register_nscf_task(nscf_inputs, deps={self.scf_task: "DEN"})
# Register the SCR and SIGMA run(s).
if spread_scr:
for scr_input, sigma_input in zip(scr_inputs, sigma_inputs):
scr_task = self.register_scr_task(scr_input, deps={nscf_task: "WFK"})
sigma_task = self.register_sigma_task(sigma_input, deps={nscf_task: "WFK", scr_task: "SCR"})
self.sigma_tasks.append(sigma_task)
else:
# Sigma work(s) connected to the same screening.
scr_task = self.register_scr_task(scr_inputs, deps={nscf_task: "WFK"})
if isinstance(sigma_inputs, (list, tuple)):
for inp in sigma_inputs:
task = self.register_sigma_task(inp, deps={nscf_task: "WFK", scr_task: "SCR"})
self.sigma_tasks.append(task)
else:
task = self.register_sigma_task(sigma_inputs, deps={nscf_task: "WFK", scr_task: "SCR"})
self.sigma_tasks.append(task)
class SigmaConvWork(Work):
"""
Work for self-energy convergence studies.
"""
def __init__(self, wfk_node, scr_node, sigma_inputs, workdir=None, manager=None):
"""
Args:
wfk_node: The node who has produced the WFK file or filepath pointing to the WFK file.
scr_node: The node who has produced the SCR file or filepath pointing to the SCR file.
sigma_inputs: List of :class:`AbinitInput` for the self-energy runs.
workdir: Working directory of the calculation.
manager: :class:`TaskManager` object.
"""
# Cast to node instances.
wfk_node, scr_node = Node.as_node(wfk_node), Node.as_node(scr_node)
super().__init__(workdir=workdir, manager=manager)
# Register the SIGMA runs.
if not isinstance(sigma_inputs, (list, tuple)):
sigma_inputs = [sigma_inputs]
for sigma_input in sigma_inputs:
self.register_sigma_task(sigma_input, deps={wfk_node: "WFK", scr_node: "SCR"})
class BseMdfWork(Work):
"""
Work for simple BSE calculations in which the self-energy corrections
are approximated by the scissors operator and the screening is modeled
with the model dielectric function.
"""
def __init__(self, scf_input, nscf_input, bse_inputs, workdir=None, manager=None):
"""
Args:
scf_input: Input for the SCF run.
nscf_input: Input for the NSCF run.
bse_inputs: List of Inputs for the BSE run.
workdir: Working directory of the calculation.
manager: :class:`TaskManager`.
"""
super().__init__(workdir=workdir, manager=manager)
# Register the GS-SCF run.
self.scf_task = self.register_scf_task(scf_input)
# Construct the input for the NSCF run.
self.nscf_task = self.register_nscf_task(nscf_input, deps={self.scf_task: "DEN"})
# Construct the input(s) for the BSE run.
if not isinstance(bse_inputs, (list, tuple)):
bse_inputs = [bse_inputs]
for bse_input in bse_inputs:
self.register_bse_task(bse_input, deps={self.nscf_task: "WFK"})
def get_mdf_robot(self):
"""Builds and returns a :class:`MdfRobot` for analyzing the results in the MDF files."""
from abilab.robots import MdfRobot
robot = MdfRobot()
for task in self[2:]:
mdf_path = task.outdir.has_abiext(robot.EXT)
if mdf_path:
robot.add_file(str(task), mdf_path)
return robot
class QptdmWork(Work):
"""
This work parallelizes the calculation of the q-points of the screening.
It also provides the callback `on_all_ok` that calls mrgscr to merge
all the partial screening files produced.
"""
def create_tasks(self, wfk_file, scr_input):
"""
Create the SCR tasks and register them in self.
Args:
wfk_file: Path to the ABINIT WFK file to use for the computation of the screening.
scr_input: Input for the screening calculation.
"""
assert len(self) == 0
wfk_file = self.wfk_file = os.path.abspath(wfk_file)
# Build a temporary work in the tmpdir that will use a shell manager
# to run ABINIT in order to get the list of q-points for the screening.
shell_manager = self.manager.to_shell_manager(mpi_procs=1)
w = Work(workdir=self.tmpdir.path_join("_qptdm_run"), manager=shell_manager)
fake_input = scr_input.deepcopy()
fake_task = w.register(fake_input)
w.allocate()
w.build()
# Create the symbolic link and add the magic value
# nqpdm = -1 to the input to get the list of q-points.
fake_task.inlink_file(wfk_file)
fake_task.set_vars({"nqptdm": -1})
fake_task.start_and_wait()
# Parse the section with the q-points
with NetcdfReader(fake_task.outdir.has_abiext("qptdms.nc")) as reader:
qpoints = reader.read_value("reduced_coordinates_of_kpoints")
#print("qpoints)
# Now we can register the task for the different q-points
for qpoint in qpoints:
qptdm_input = scr_input.deepcopy()
qptdm_input.set_vars(nqptdm=1, qptdm=qpoint)
new_task = self.register_scr_task(qptdm_input, manager=self.manager)
# Add the garbage collector.
if self.flow.gc is not None:
new_task.set_gc(self.flow.gc)
self.allocate()
def merge_scrfiles(self, remove_scrfiles=True):
"""
This method is called when all the q-points have been computed.
It runs `mrgscr` in sequential on the local machine to produce
the final SCR file in the outdir of the `Work`.
If remove_scrfiles is True, the partial SCR files are removed after the merge.
"""
scr_files = list(filter(None, [task.outdir.has_abiext("SCR") for task in self]))
self.history.info("Will call mrgscr to merge %s SCR files:\n" % len(scr_files))
assert len(scr_files) == len(self)
mrgscr = wrappers.Mrgscr(manager=self[0].manager, verbose=1)
final_scr = mrgscr.merge_qpoints(self.outdir.path, scr_files, out_prefix="out")
if remove_scrfiles:
for scr_file in scr_files:
try:
os.remove(scr_file)
except IOError:
pass
return final_scr
#@check_spectator
def on_all_ok(self):
"""
This method is called when all the q-points have been computed.
It runs `mrgscr` in sequential on the local machine to produce
the final SCR file in the outdir of the `Work`.
"""
final_scr = self.merge_scrfiles()
return self.Results(node=self, returncode=0, message="mrgscr done", final_scr=final_scr)
# TODO: MergeDdb --> DfptWork(Work) postpone it because it may break pickle.
class MergeDdb:
"""Mixin class for Works that have to merge the DDB files produced by the tasks."""
def add_becs_from_scf_task(self, scf_task, ddk_tolerance, ph_tolerance):
"""
Build tasks for the computation of Born effective charges and add them to the work.
Args:
scf_task: ScfTask object.
ddk_tolerance: dict {"varname": value} with the tolerance used in the DDK run.
None to use AbiPy default.
ph_tolerance: dict {"varname": value} with the tolerance used in the phonon run.
None to use AbiPy default.
Return:
(ddk_tasks, bec_tasks)
"""
if not isinstance(scf_task, ScfTask):
raise TypeError("task `%s` does not inherit from ScfTask" % scf_task)
# DDK calculations (self-consistent to get electric field).
multi_ddk = scf_task.input.make_ddk_inputs(tolerance=ddk_tolerance)
ddk_tasks = []
for ddk_inp in multi_ddk:
ddk_task = self.register_ddk_task(ddk_inp, deps={scf_task: "WFK"})
ddk_tasks.append(ddk_task)
# Build the list of inputs for electric field perturbation and phonons
# Each BEC task is connected to all the previous DDK task and to the scf_task.
bec_deps = {ddk_task: "DDK" for ddk_task in ddk_tasks}
bec_deps.update({scf_task: "WFK"})
bec_inputs = scf_task.input.make_bec_inputs(tolerance=ph_tolerance)
bec_tasks = []
for bec_inp in bec_inputs:
bec_task = self.register_bec_task(bec_inp, deps=bec_deps)
bec_tasks.append(bec_task)
return ddk_tasks, bec_tasks
def merge_ddb_files(self, delete_source_ddbs=True, only_dfpt_tasks=True,
exclude_tasks=None, include_tasks=None):
"""
This method is called when all the q-points have been computed.
It runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
Args:
delete_source_ddbs: True if input DDB should be removed once final DDB is created.
only_dfpt_tasks: False to merge all DDB files produced by the tasks of the work
Useful e.g. for finite stress corrections in which the stress in the
initial configuration should be merged in the final DDB.
exclude_tasks: List of tasks that should be excluded when merging the partial DDB files.
include_tasks: List of tasks that should be included when merging the partial DDB files.
Mutually exclusive with exclude_tasks.
Returns:
path to the output DDB file
"""
if exclude_tasks:
my_tasks = [task for task in self if task not in exclude_tasks]
elif include_tasks:
my_tasks = [task for task in self if task in include_tasks]
else:
my_tasks = [task for task in self]
if only_dfpt_tasks:
ddb_files = list(filter(None, [task.outdir.has_abiext("DDB") for task in my_tasks \
if isinstance(task, DfptTask)]))
else:
ddb_files = list(filter(None, [task.outdir.has_abiext("DDB") for task in my_tasks]))
self.history.info("Will call mrgddb to merge %s DDB files:" % len(ddb_files))
# DDB files are always produces so this should never happen!
if not ddb_files:
raise RuntimeError("Cannot find any DDB file to merge by the task of " % self)
# Final DDB file will be produced in the outdir of the work.
out_ddb = self.outdir.path_in("out_DDB")
if len(ddb_files) == 1:
# Avoid the merge. Just copy the DDB file to the outdir of the work.
shutil.copy(ddb_files[0], out_ddb)
else:
# Call mrgddb
desc = "DDB file merged by %s on %s" % (self.__class__.__name__, time.asctime())
mrgddb = wrappers.Mrgddb(manager=self[0].manager, verbose=0)
mrgddb.merge(self.outdir.path, ddb_files, out_ddb=out_ddb, description=desc,
delete_source_ddbs=delete_source_ddbs)
return out_ddb
def merge_pot1_files(self, delete_source=True):
"""
This method is called when all the q-points have been computed.
It runs `mrgdvdb` in sequential on the local machine to produce
the final DVDB file in the outdir of the `Work`.
Args:
delete_source: True if POT1 files should be removed after (successful) merge.
Returns:
path to the output DVDB file. None if not DFPT POT file is found.
"""
natom = len(self[0].input.structure)
max_pertcase = 3 * natom
pot1_files = []
for task in self:
if not isinstance(task, DfptTask): continue
paths = task.outdir.list_filepaths(wildcard="*_POT*")
for path in paths:
# Include only atomic perturbations i.e. files whose ext <= 3 * natom
i = path.rindex("_POT")
pertcase = int(path[i+4:].replace(".nc", ""))
if pertcase <= max_pertcase:
pot1_files.append(path)
# prtpot = 0 disables the output of the DFPT POT files so an empty list is not fatal here.
if not pot1_files: return None
self.history.info("Will call mrgdvdb to merge %s files:" % len(pot1_files))
# Final DDB file will be produced in the outdir of the work.
out_dvdb = self.outdir.path_in("out_DVDB")
if len(pot1_files) == 1:
# Avoid the merge. Just move the DDB file to the outdir of the work
shutil.copy(pot1_files[0], out_dvdb)
else:
# FIXME: The merge may require a non-negligible amount of memory if lots of qpts.
# Besides there are machines such as lemaitre3 that are problematic when
# running MPI applications on the front-end
mrgdvdb = wrappers.Mrgdvdb(manager=self[0].manager, verbose=0)
mrgdvdb.merge(self.outdir.path, pot1_files, out_dvdb, delete_source=delete_source)
return out_dvdb
class PhononWork(Work, MergeDdb):
"""
This work consists of nirred Phonon tasks where nirred is
the number of irreducible atomic perturbations for a given set of q-points.
It provides the callback method (on_all_ok) that calls mrgddb (mrgdv) to merge
all the partial DDB (POT) files produced. The two files are available in the
output directory of the Work.
"""
@classmethod
def from_scf_task(cls, scf_task, qpoints, is_ngqpt=False, tolerance=None, with_becs=False,
ddk_tolerance=None, manager=None):
"""
Construct a `PhononWork` from a :class:`ScfTask` object.
The input file for phonons is automatically generated from the input of the ScfTask.
Each phonon task depends on the WFK file produced by the `scf_task`.
Args:
scf_task: ScfTask object.
qpoints: q-points in reduced coordinates. Accepts single q-point, list of q-points
or three integers defining the q-mesh if `is_ngqpt`.
is_ngqpt: True if `qpoints` should be interpreted as divisions instead of q-points.
tolerance: dict {"varname": value} with the tolerance to be used in the phonon run.
None to use AbiPy default.
with_becs: Activate calculation of Electric field and Born effective charges.
ddk_tolerance: dict {"varname": value} with the tolerance used in the DDK run if with_becs.
None to use AbiPy default.
manager: :class:`TaskManager` object.
"""
if not isinstance(scf_task, ScfTask):
raise TypeError("task `%s` does not inherit from ScfTask" % scf_task)
if is_ngqpt:
qpoints = scf_task.input.abiget_ibz(ngkpt=qpoints, shiftk=[0, 0, 0], kptopt=1).points
qpoints = np.reshape(qpoints, (-1, 3))
new = cls(manager=manager)
if with_becs:
new.add_becs_from_scf_task(scf_task, ddk_tolerance, ph_tolerance=tolerance)
for qpt in qpoints:
if with_becs and np.sum(qpt ** 2) < 1e-12: continue
multi = scf_task.input.make_ph_inputs_qpoint(qpt, tolerance=tolerance)
for ph_inp in multi:
new.register_phonon_task(ph_inp, deps={scf_task: "WFK"})
return new
@classmethod
def from_scf_input(cls, scf_input, qpoints, is_ngqpt=False, tolerance=None,
with_becs=False, ddk_tolerance=None, manager=None):
"""
Similar to `from_scf_task`, the difference is that this method requires
an input for SCF calculation. A new ScfTask is created and added to the Work.
This API should be used if the DDB of the GS task should be merged.
"""
if is_ngqpt:
qpoints = scf_input.abiget_ibz(ngkpt=qpoints, shiftk=[0, 0, 0], kptopt=1).points
qpoints = np.reshape(qpoints, (-1, 3))
new = cls(manager=manager)
# Create ScfTask
scf_task = new.register_scf_task(scf_input)
if with_becs:
new.add_becs_from_scf_task(scf_task, ddk_tolerance, ph_tolerance=tolerance)
for qpt in qpoints:
if with_becs and np.sum(qpt ** 2) < 1e-12: continue
multi = scf_task.input.make_ph_inputs_qpoint(qpt, tolerance=tolerance)
for ph_inp in multi:
new.register_phonon_task(ph_inp, deps={scf_task: "WFK"})
return new
#@check_spectator
def on_all_ok(self):
"""
This method is called when all the q-points have been computed.
Ir runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
"""
# Merge DDB files.
out_ddb = self.merge_ddb_files()
# Merge DVDB files.
out_dvdb = self.merge_pot1_files()
return self.Results(node=self, returncode=0, message="DDB merge done")
class PhononWfkqWork(Work, MergeDdb):
"""
This work computes phonons with DFPT on an arbitrary q-mesh (usually denser than the k-mesh for electrons)
by computing WKQ files for each q-point.
The number of irreducible atomic perturbations for each q-point are taken into account.
It provides the callback method (on_all_ok) that calls mrgddb (mrgdv) to merge
all the partial DDB (POT) files produced. The two files are available in the
output directory of the Work. The WKQ files are removed at runtime.
"""
@classmethod
def from_scf_task(cls, scf_task, ngqpt, ph_tolerance=None, tolwfr=1.0e-22, nband=None,
with_becs=False, ddk_tolerance=None, shiftq=(0, 0, 0), is_ngqpt=True, remove_wfkq=True,
manager=None):
"""
Construct a `PhononWfkqWork` from a :class:`ScfTask` object.
The input files for WFQ and phonons are automatically generated from the input of the ScfTask.
Each phonon task depends on the WFK file produced by scf_task and the associated WFQ file.
Args:
scf_task: ScfTask object.
ngqpt: three integers defining the q-mesh
with_becs: Activate calculation of Electric field and Born effective charges.
ph_tolerance: dict {"varname": value} with the tolerance for the phonon run.
None to use AbiPy default.
tolwfr: tolerance used to compute WFQ.
ddk_tolerance: dict {"varname": value} with the tolerance used in the DDK run if with_becs.
None to use AbiPy default.
shiftq: Q-mesh shift. Multiple shifts are not supported.
is_ngqpt: the ngqpt is interpreted as a set of integers defining the q-mesh, otherwise
is an explicit list of q-points
remove_wfkq: Remove WKQ files when the children are completed.
manager: :class:`TaskManager` object.
.. note:
Use k-meshes with one shift and q-meshes that are multiple of ngkpt
to decrease the number of WFQ files to be computed.
"""
if not isinstance(scf_task, ScfTask):
raise TypeError("task `%s` does not inherit from ScfTask" % scf_task)
shiftq = np.reshape(shiftq, (3,))
if is_ngqpt:
qpoints = scf_task.input.abiget_ibz(ngkpt=ngqpt, shiftk=shiftq, kptopt=1).points
else:
qpoints = ngqpt
new = cls(manager=manager)
new.remove_wfkq = remove_wfkq
new.wfkq_tasks = []
new.wfkq_task_children = collections.defaultdict(list)
if with_becs:
# Add DDK and BECS.
new.add_becs_from_scf_task(scf_task, ddk_tolerance, ph_tolerance)
# Get ngkpt, shift for electrons from input.
# Won't try to skip WFQ if multiple shifts or off-diagonal kptrlatt
ngkpt, shiftk = scf_task.input.get_ngkpt_shiftk()
try_to_skip_wfkq = True
if ngkpt is None or len(shiftk) > 1 and is_ngqpt:
try_to_skip_wfkq = True
# TODO: One could avoid kptopt 3 by computing WFK in the IBZ and then rotating.
# but this has to be done inside Abinit.
for qpt in qpoints:
is_gamma = np.sum(qpt ** 2) < 1e-12
if with_becs and is_gamma: continue
# Avoid WFQ if k + q = k (requires ngkpt, multiple shifts are not supported)
need_wfkq = True
if is_gamma:
need_wfkq = False
elif try_to_skip_wfkq:
# k = (i + shiftk) / ngkpt
qinds = np.rint(qpt * ngqpt - shiftq)
f = (qinds * ngkpt) % ngqpt
need_wfkq = np.any(f != 0)
if need_wfkq:
nscf_inp = scf_task.input.new_with_vars(qpt=qpt, nqpt=1, iscf=-2, kptopt=3, tolwfr=tolwfr)
if nband:
nbdbuf = max(2,nband*0.1)
nscf_inp.set_vars(nband=nband+nbdbuf, nbdbuf=nbdbuf)
wfkq_task = new.register_nscf_task(nscf_inp, deps={scf_task: ["DEN", "WFK"]})
new.wfkq_tasks.append(wfkq_task)
multi = scf_task.input.make_ph_inputs_qpoint(qpt, tolerance=ph_tolerance)
for ph_inp in multi:
deps = {scf_task: "WFK", wfkq_task: "WFQ"} if need_wfkq else {scf_task: "WFK"}
#ph_inp["prtwf"] = -1
t = new.register_phonon_task(ph_inp, deps=deps)
if need_wfkq:
new.wfkq_task_children[wfkq_task].append(t)
return new
def on_ok(self, sender):
"""
This callback is called when one task reaches status `S_OK`.
It removes the WFKQ file if all its children have reached `S_OK`.
"""
if self.remove_wfkq:
for task in self.wfkq_tasks:
if task.status != task.S_OK: continue
children = self.wfkq_task_children[task]
if all(child.status == child.S_OK for child in children):
path = task.outdir.has_abiext("WFQ")
if path:
self.history.info("Removing WFQ: %s" % path)
os.remove(path)
return super().on_ok(sender)
#@check_spectator
def on_all_ok(self):
"""
This method is called when all the q-points have been computed.
Ir runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
"""
# Merge DDB files.
out_ddb = self.merge_ddb_files()
# Merge DVDB files.
out_dvdb = self.merge_pot1_files()
return self.Results(node=self, returncode=0, message="DDB merge done")
class GKKPWork(Work):
"""
This work computes electron-phonon matrix elements for all the q-points
present in a DVDB and DDB file
"""
@classmethod
def from_den_ddb_dvdb(cls, inp, den_path, ddb_path, dvdb_path, mpiprocs=1, remove_wfkq=True,
qpath=None, with_ddk=True, expand=True, manager=None):
"""
Construct a `PhononWfkqWork` from a DDB and DVDB file.
For each q found, a WFQ task and an EPH task computing the matrix elements are created.
"""
import abipy.abilab as abilab
# Create file nodes
den_file = FileNode(den_path)
ddb_file = FileNode(ddb_path)
dvdb_file = FileNode(dvdb_path)
# Create new work
new = cls(manager=manager)
new.remove_wfkq = remove_wfkq
new.wfkq_tasks = []
new.wfkq_task_children = collections.defaultdict(list)
if manager is None: manager = TaskManager.from_user_config()
tm = manager.new_with_fixed_mpi_omp(mpiprocs, 1)
# Create a WFK task
kptopt = 1 if expand else 3
nscf_inp = inp.new_with_vars(iscf=-2, kptopt=kptopt)
wfk_task = new.register_nscf_task(nscf_inp, deps={den_file: "DEN"},manager=tm)
new.wfkq_tasks.append(wfk_task)
new.wfk_task = wfk_task
# Read path and regular grid from DDB file
with abilab.abiopen(ddb_path) as ddb:
q_frac_coords = np.array([k.frac_coords for k in ddb.qpoints])
ddb_ngqpt = ddb.guessed_ngqpt
# If qpath is set, we read the list of q-points to be used to interpolate the DVDB file.
# The DVDB and DDB file have to correspond to a regular grid.
dvdb = dvdb_file
if qpath is None:
qpath = q_frac_coords
else:
interp_inp = inp.new_with_vars(optdriver=7, eph_task=-5, ddb_ngqpt=ddb_ngqpt,
ph_nqpath=len(qpath), ph_qpath=qpath, prtphdos=0)
dvdb = new.register_eph_task(interp_inp, deps={wfk_task: "WFK", ddb_file: "DDB", dvdb_file: "DVDB"},
manager=tm)
# Create a WFK expansion task
if expand:
fbz_nscf_inp = inp.new_with_vars(optdriver=8)
fbz_nscf_inp.set_spell_check(False)
fbz_nscf_inp.set_vars(wfk_task="wfk_fullbz")
tm_serial = manager.new_with_fixed_mpi_omp(1,1)
wfk_task = new.register_nscf_task(fbz_nscf_inp, deps={wfk_task: "WFK", den_file: "DEN"},
manager=tm_serial)
new.wfkq_tasks.append(wfk_task)
new.wfk_task = wfk_task
if with_ddk:
kptopt = 3 if expand else 1
ddk_inp = inp.new_with_vars(optdriver=8,kptopt=kptopt)
ddk_inp.set_spell_check(False)
ddk_inp.set_vars(wfk_task="wfk_ddk")
ddk_task = new.register_nscf_task(ddk_inp, deps={wfk_task: "WFK", den_file: "DEN"}, manager=tm)
new.wfkq_tasks.append(ddk_task)
# For each qpoint
for qpt in qpath:
is_gamma = np.sum(qpt ** 2) < 1e-12
if is_gamma:
# Create a link from WFK to WFQ on_ok
wfkq_task = wfk_task
deps = {wfk_task: ["WFK","WFQ"], ddb_file: "DDB", dvdb: "DVDB" }
else:
# Create a WFQ task
nscf_inp = nscf_inp.new_with_vars(kptopt=3, qpt=qpt, nqpt=1)
wfkq_task = new.register_nscf_task(nscf_inp, deps={den_file: "DEN"}, manager=tm)
new.wfkq_tasks.append(wfkq_task)
deps = {wfk_task: "WFK", wfkq_task: "WFQ", ddb_file: "DDB", dvdb: "DVDB" }
# Create a EPH task
eph_inp = inp.new_with_vars(optdriver=7, prtphdos=0, eph_task=-2, kptopt=3,
ddb_ngqpt=[1,1,1], nqpt=1, qpt=qpt)
t = new.register_eph_task(eph_inp, deps=deps, manager=tm)
new.wfkq_task_children[wfkq_task].append(t)
return new
@classmethod
def from_phononwfkq_work(cls, phononwfkq_work, nscf_vars={}, remove_wfkq=True, with_ddk=True, manager=None):
"""
Construct a `GKKPWork` from a `PhononWfkqWork` object.
The WFQ are the ones used for PhononWfkqWork so in principle have only valence bands
"""
# Get list of qpoints from the the phonon tasks in this work
qpoints = []
qpoints_deps = []
for task in phononwfkq_work:
if isinstance(task,PhononTask):
# Store qpoints
qpt = task.input.get("qpt", [0,0,0])
qpoints.append(qpt)
# Store dependencies
qpoints_deps.append(task.deps)
# Create file nodes
ddb_path = phononwfkq_work.outdir.has_abiext("DDB")
dvdb_path = phononwfkq_work.outdir.has_abiext("DVDB")
ddb_file = FileNode(ddb_path)
dvdb_file = FileNode(dvdb_path)
# Get scf_task from first q-point
for dep in qpoints_deps[0]:
if isinstance(dep.node,ScfTask) and dep.exts[0] == 'WFK':
scf_task = dep.node
# Create new work
new = cls(manager=manager)
new.remove_wfkq = remove_wfkq
new.wfkq_tasks = []
new.wfk_task = []
# Add one eph task per qpoint
for qpt,qpoint_deps in zip(qpoints,qpoints_deps):
# Create eph task
eph_input = scf_task.input.new_with_vars(optdriver=7, prtphdos=0, eph_task=-2,
ddb_ngqpt=[1,1,1], nqpt=1, qpt=qpt)
deps = {ddb_file: "DDB", dvdb_file: "DVDB" }
for dep in qpoint_deps:
deps[dep.node] = dep.exts[0]
# If no WFQ in deps link the WFK with WFQ extension
if 'WFQ' not in deps.values():
inv_deps = dict((v, k) for k, v in deps.items())
wfk_task = inv_deps['WFK']
wfk_path = wfk_task.outdir.has_abiext("WFK")
# Check if netcdf
filename, extension = os.path.splitext(wfk_path)
infile = 'out_WFQ' + extension
wfq_path = os.path.join(os.path.dirname(wfk_path), infile)
if not os.path.isfile(wfq_path): os.symlink(wfk_path, wfq_path)
deps[FileNode(wfq_path)] = 'WFQ'
new.register_eph_task(eph_input, deps=deps)
return new
def on_ok(self, sender):
"""
This callback is called when one task reaches status `S_OK`.
It removes the WFKQ file if all its children have reached `S_OK`.
"""
if self.remove_wfkq:
for task in self.wfkq_tasks:
if task.status != task.S_OK: continue
children = self.wfkq_task_children[task]
if all(child.status == child.S_OK for child in children):
path = task.outdir.has_abiext("WFQ")
if path:
self.history.info("Removing WFQ: %s" % path)
os.remove(path)
# If wfk task we create a link to a wfq file so abinit is happy
if sender == self.wfk_task:
wfk_path = self.wfk_task.outdir.has_abiext("WFK")
# Check if netcdf
filename, extension = os.path.splitext(wfk_path)
infile = 'out_WFQ' + extension
infile = os.path.join(os.path.dirname(wfk_path), infile)
os.symlink(wfk_path, infile)
return super().on_ok(sender)
class BecWork(Work, MergeDdb):
"""
Work for the computation of the Born effective charges.
This work consists of DDK tasks and phonon + electric field perturbation
It provides the callback method (on_all_ok) that calls mrgddb to merge the
partial DDB files produced by the work.
"""
@classmethod
def from_scf_task(cls, scf_task, ddk_tolerance=None, ph_tolerance=None, manager=None):
"""
Build tasks for the computation of Born effective charges from a ground-state task.
Args:
scf_task: ScfTask object.
ddk_tolerance: tolerance used in the DDK run if with_becs. None to use AbiPy default.
ph_tolerance: dict {"varname": value} with the tolerance used in the phonon run.
None to use AbiPy default.
manager: :class:`TaskManager` object.
"""
new = cls(manager=manager)
new.add_becs_from_scf_task(scf_task, ddk_tolerance, ph_tolerance)
return new
def on_all_ok(self):
"""
This method is called when all tasks reach S_OK
Ir runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
"""
# Merge DDB files.
out_ddb = self.merge_ddb_files()
return self.Results(node=self, returncode=0, message="DDB merge done")
class DteWork(Work, MergeDdb):
"""
Work for the computation of the third derivative of the energy.
This work consists of DDK tasks and electric field perturbation.
It provides the callback method (on_all_ok) that calls mrgddb to merge the partial DDB files produced
"""
@classmethod
def from_scf_task(cls, scf_task, ddk_tolerance=None, manager=None):
"""
Build a DteWork from a ground-state task.
Args:
scf_task: ScfTask object.
ddk_tolerance: tolerance used in the DDK run if with_becs. None to use AbiPy default.
manager: :class:`TaskManager` object.
"""
if not isinstance(scf_task, ScfTask):
raise TypeError("task `%s` does not inherit from ScfTask" % scf_task)
new = cls(manager=manager)
# DDK calculations
multi_ddk = scf_task.input.make_ddk_inputs(tolerance=ddk_tolerance)
ddk_tasks = []
for ddk_inp in multi_ddk:
ddk_task = new.register_ddk_task(ddk_inp, deps={scf_task: "WFK"})
ddk_tasks.append(ddk_task)
# Build the list of inputs for electric field perturbation
# Each task is connected to all the previous DDK, DDE task and to the scf_task.
multi_dde = scf_task.input.make_dde_inputs(use_symmetries=False)
# To compute the nonlinear coefficients all the directions of the perturbation
# have to be taken in consideration
# DDE calculations
dde_tasks = []
dde_deps = {ddk_task: "DDK" for ddk_task in ddk_tasks}
dde_deps.update({scf_task: "WFK"})
for dde_inp in multi_dde:
dde_task = new.register_dde_task(dde_inp, deps=dde_deps)
dde_tasks.append(dde_task)
# DTE calculations
dte_deps = {scf_task: "WFK DEN"}
dte_deps.update({dde_task: "1WF 1DEN" for dde_task in dde_tasks})
multi_dte = scf_task.input.make_dte_inputs()
dte_tasks = []
for dte_inp in multi_dte:
dte_task = new.register_dte_task(dte_inp, deps=dte_deps)
dte_tasks.append(dte_task)
return new
def on_all_ok(self):
"""
This method is called when all tasks reach S_OK
Ir runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
"""
# Merge DDB files.
out_ddb = self.merge_ddb_files()
return self.Results(node=self, returncode=0, message="DDB merge done")
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.