repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
claudep/pootle | tests/views/timeline.py | 1 | 9375 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from hashlib import md5
from itertools import groupby
import json
import pytest
from django.contrib.auth import get_user_model
from django.db.models import Q
from django.template import loader
from django.urls import reverse
from pootle.core.delegate import review
from pootle_comment.forms import UnsecuredCommentForm
from pootle_misc.checks import check_names
from pootle_statistics.models import Submission, SubmissionFields
from pootle_store.constants import (
FUZZY, OBSOLETE, STATES_MAP, TRANSLATED, UNTRANSLATED)
from pootle_store.fields import to_python
from pootle_store.models import (
Suggestion, QualityCheck, Unit)
class ProxyTimelineLanguage(object):
def __init__(self, code):
self.code = code
class ProxyTimelineUser(object):
def __init__(self, submission):
self.submission = submission
@property
def username(self):
return self.submission["submitter__username"]
@property
def email_hash(self):
return md5(self.submission['submitter__email']).hexdigest()
@property
def display_name(self):
return (
self.submission["submitter__full_name"].strip()
if self.submission["submitter__full_name"].strip()
else self.submission["submitter__username"])
def _calculate_timeline(request, unit):
submission_filter = (
Q(field__in=[SubmissionFields.TARGET, SubmissionFields.STATE,
SubmissionFields.COMMENT, SubmissionFields.NONE]))
timeline = (
Submission.objects.filter(unit=unit)
.filter(submission_filter)
.exclude(field=SubmissionFields.COMMENT,
creation_time=unit.commented_on)
.order_by("id"))
User = get_user_model()
entries_group = []
context = {}
timeline_fields = [
"type", "old_value", "new_value", "submitter_id", "creation_time",
"translation_project__language__code", "field", "suggestion_id",
"suggestion__target_f", "quality_check__name", "submitter__username",
"submitter__full_name", "suggestion__user__full_name", "submitter__email",
"suggestion__user__username"]
grouped_timeline = groupby(
timeline.values(*timeline_fields),
key=lambda item: "\001".join([
str(x) for x in
[
item['submitter_id'],
item['creation_time'],
item['suggestion_id'],
]
])
)
# Group by submitter id and creation_time because
# different submissions can have same creation time
for key_, values in grouped_timeline:
entry_group = {
'entries': [],
}
for item in values:
# Only add creation_time information for the whole entry group once
entry_group['datetime'] = item['creation_time']
# Only add submitter information for the whole entry group once
entry_group.setdefault('submitter', ProxyTimelineUser(item))
context.setdefault(
'language',
ProxyTimelineLanguage(item['translation_project__language__code']))
entry = {
'field': item['field'],
'field_name': SubmissionFields.NAMES_MAP.get(item['field'], None),
'type': item['type']}
if item['field'] == SubmissionFields.STATE:
entry['old_value'] = STATES_MAP[int(to_python(item['old_value']))]
entry['new_value'] = STATES_MAP[int(to_python(item['new_value']))]
elif item['suggestion_id']:
entry.update({
'suggestion_text': item['suggestion__target_f']})
elif item['quality_check__name']:
check_name = item['quality_check__name']
check_url = (
u''.join(
[reverse('pootle-checks-descriptions'),
'#', check_name]))
entry.update({
'check_name': check_name,
'check_display_name': check_names[check_name],
'checks_url': check_url})
else:
entry['new_value'] = to_python(item['new_value'])
entry_group['entries'].append(entry)
entries_group.append(entry_group)
created = {
'created': True,
'submitter': User.objects.get_system_user()}
if unit.creation_time:
created['datetime'] = unit.creation_time
entries_group[:0] = [created]
# Let's reverse the chronological order
entries_group.reverse()
context['entries_group'] = entries_group
t = loader.get_template('editor/units/xhr_timeline.html')
return t.render(context=context, request=request)
def _timeline_test(client, request_user, unit):
url = reverse("pootle-xhr-units-timeline", kwargs=dict(uid=unit.id))
user = request_user["user"]
if user.username != "nobody":
client.login(
username=user.username,
password=request_user["password"])
response = client.get(
url, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
no_permission = (
not user.is_superuser
and unit not in Unit.objects.get_translatable(user))
if no_permission:
assert response.status_code == 403
assert "timeline" not in response
return
request = response.wsgi_request
result = json.loads(response.content)
assert result["uid"] == unit.id
assert result["timeline"] == _calculate_timeline(request, unit)
@pytest.mark.django_db
def test_timeline_view_units(client, request_users, system, admin):
_timeline_test(
client,
request_users,
Unit.objects.filter(state=TRANSLATED).first())
_timeline_test(
client,
request_users,
Unit.objects.filter(state=UNTRANSLATED).first())
@pytest.mark.xfail(
reason="timeline does not currently check permissions correctly")
@pytest.mark.django_db
def test_timeline_view_unit_obsolete(client, request_users, system, admin):
_timeline_test(
client,
request_users,
Unit.objects.filter(state=OBSOLETE).first())
@pytest.mark.xfail(
reason="timeline does not currently check permissions correctly")
@pytest.mark.django_db
def test_timeline_view_unit_disabled_project(client, request_users,
system, admin):
unit = Unit.objects.filter(
store__translation_project__project__disabled=True,
state=TRANSLATED).first()
_timeline_test(
client,
request_users,
unit)
@pytest.mark.django_db
def test_timeline_view_unit_with_suggestion(client, request_users,
system, admin, store0):
# test with "state change" subission - apparently this is what is required
# to get one
suggestion = Suggestion.objects.filter(
unit__store=store0,
state__name="pending",
unit__state=UNTRANSLATED).first()
unit = suggestion.unit
unit.state = FUZZY
unit.save()
review.get(Suggestion)([suggestion], admin).accept()
_timeline_test(
client,
request_users,
unit)
@pytest.mark.django_db
def test_timeline_view_unit_with_qc(client, request_users, system, admin, store0):
# check a Unit with a quality check
qc_filter = dict(
unit__store=store0,
unit__state=TRANSLATED,
unit__store__translation_project__project__disabled=False)
qc = QualityCheck.objects.filter(**qc_filter).first()
unit = qc.unit
unit.toggle_qualitycheck(qc.id, True, admin)
_timeline_test(
client,
request_users,
unit)
@pytest.mark.django_db
def test_timeline_view_unit_with_suggestion_and_comment(client, request_users,
system, admin, store0):
# test with "state change" subission - apparently this is what is required
# to get one
suggestion = Suggestion.objects.filter(
unit__store=store0,
state__name="pending",
unit__state=UNTRANSLATED).first()
unit = suggestion.unit
unit.state = FUZZY
unit.save()
review.get(Suggestion)([suggestion], admin).accept()
form = UnsecuredCommentForm(suggestion, dict(
comment='This is a comment!',
user=admin,
))
if form.is_valid():
form.save()
_timeline_test(
client,
request_users,
unit)
@pytest.mark.django_db
def test_timeline_view_unit_with_creation(client, request_users,
system, admin, store0):
# add a creation submission for a unit and test with that
unit = Unit.objects.create(
state=TRANSLATED, source_f="Foo", target_f="Bar",
store=store0)
# save and get the unit to deal with mysql's microsecond issues
unit.save()
unit = Unit.objects.get(pk=unit.pk)
_timeline_test(
client,
request_users,
unit)
| gpl-3.0 |
jorik041/MITMf | core/responder/fingerprinter/RelayPackets.py | 9 | 19989 | # NBT-NS/LLMNR Responder
# Created by Laurent Gaffie
# Copyright (C) 2014 Trustwave Holdings, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import struct
from odict import OrderedDict
class Packet():
fields = OrderedDict([
("data", ""),
])
def __init__(self, **kw):
self.fields = OrderedDict(self.__class__.fields)
for k,v in kw.items():
if callable(v):
self.fields[k] = v(self.fields[k])
else:
self.fields[k] = v
def __str__(self):
return "".join(map(str, self.fields.values()))
##################################################################################
#SMB Client Stuff
##################################################################################
def longueur(payload):
length = struct.pack(">i", len(''.join(payload)))
return length
class SMBHeader(Packet):
fields = OrderedDict([
("proto", "\xff\x53\x4d\x42"),
("cmd", "\x72"),
("error-code", "\x00\x00\x00\x00" ),
("flag1", "\x00"),
("flag2", "\x00\x00"),
("pidhigh", "\x00\x00"),
("signature", "\x00\x00\x00\x00\x00\x00\x00\x00"),
("reserved", "\x00\x00"),
("tid", "\x00\x00"),
("pid", "\x00\x4e"),
("uid", "\x00\x08"),
("mid", "\x00\x00"),
])
class SMBNego(Packet):
fields = OrderedDict([
("Wordcount", "\x00"),
("Bcc", "\x62\x00"),
("Data", "")
])
def calculate(self):
self.fields["Bcc"] = struct.pack("<h",len(str(self.fields["Data"])))
class SMBNegoData(Packet):
fields = OrderedDict([
("Separator1","\x02" ),
("Dialect1", "\x50\x43\x20\x4e\x45\x54\x57\x4f\x52\x4b\x20\x50\x52\x4f\x47\x52\x41\x4d\x20\x31\x2e\x30\x00"),
("Separator2","\x02"),
("Dialect2", "\x4c\x41\x4e\x4d\x41\x4e\x31\x2e\x30\x00"),
("Separator3","\x02"),
("Dialect3", "\x57\x69\x6e\x64\x6f\x77\x73\x20\x66\x6f\x72\x20\x57\x6f\x72\x6b\x67\x72\x6f\x75\x70\x73\x20\x33\x2e\x31\x61\x00"),
("Separator4","\x02"),
("Dialect4", "\x4c\x4d\x31\x2e\x32\x58\x30\x30\x32\x00"),
("Separator5","\x02"),
("Dialect5", "\x4c\x41\x4e\x4d\x41\x4e\x32\x2e\x31\x00"),
("Separator6","\x02"),
("Dialect6", "\x4e\x54\x20\x4c\x4d\x20\x30\x2e\x31\x32\x00"),
])
class SMBSessionTreeData(Packet):
fields = OrderedDict([
("Wordcount", "\x0d"),
("AndXCommand", "\x75"),
("Reserved", "\x00" ),
("Andxoffset", "\x7c\x00"),
("Maxbuff","\x04\x11"),
("Maxmpx", "\x32\x00"),
("Vcnum","\x00\x00"),
("Sessionkey", "\x00\x00\x00\x00"),
("AnsiPassLength","\x18\x00"),
("UnicodePassLength", "\x00\x00"),
("Reserved2","\x00\x00\x00\x00"),
("Capabilities", "\xd4\x00\x00\x00"),
("Bcc","\x3f\x00"),
("AnsiPasswd", "\xe3\xa7\x10\x56\x58\xed\x92\xa1\xea\x9d\x55\xb1\x63\x99\x7f\xbe\x1c\xbd\x6c\x0a\xf8\xef\xb2\x89"),
("UnicodePasswd", "\xe3\xa7\x10\x56\x58\xed\x92\xa1\xea\x9d\x55\xb1\x63\x99\x7f\xbe\x1c\xbd\x6c\x0a\xf8\xef\xb2\x89"),
("Username","Administrator"),
("UsernameTerminator","\x00\x00"),
("Domain","SMB"),
("DomainTerminator","\x00\x00"),
("Nativeos",""),
("NativeosTerminator","\x00\x00"),
("Lanmanager",""),
("LanmanagerTerminator","\x00\x00\x00"),
("Wordcount2","\x04"),
("Andxcmd2","\xff"),
("Reserved3","\x00"),
("Andxoffset2","\x06\x01"),
("Flags","\x08\x00"),
("PasswordLength","\x01\x00"),
("Bcc2","\x19\x00"),
("Passwd","\x00"),
("PrePath","\\\\"),
("Targ", "CSCDSFCS"),
("IPC", "\\IPC$"),
("TerminatorPath","\x00\x00"),
("Service","?????"),
("TerminatorService","\x00"),
])
def calculate(self):
##Convert first
self.fields["Username"] = self.fields["Username"].encode('utf-16be')
self.fields["Domain"] = self.fields["Domain"].encode('utf-16be')
self.fields["Nativeos"] = self.fields["Nativeos"].encode('utf-16be')
self.fields["Lanmanager"] = self.fields["Lanmanager"].encode('utf-16be')
self.fields["PrePath"] = self.fields["PrePath"].encode('utf-16le')
self.fields["Targ"] = self.fields["Targ"].encode('utf-16le')
self.fields["IPC"] = self.fields["IPC"].encode('utf-16le')
##Then calculate
data1= str(self.fields["AnsiPasswd"])+(self.fields["UnicodePasswd"])+str(self.fields["Username"])+str(self.fields["UsernameTerminator"])+str(self.fields["Domain"])+str(self.fields["DomainTerminator"])+str(self.fields["Nativeos"])+str(self.fields["NativeosTerminator"])+str(self.fields["Lanmanager"])+str(self.fields["LanmanagerTerminator"])
data2= str(self.fields["Passwd"])+str(self.fields["PrePath"])+str(self.fields["Targ"])+str(self.fields["IPC"])+str(self.fields["TerminatorPath"])+str(self.fields["Service"])+str(self.fields["TerminatorService"])
self.fields["Bcc"] = struct.pack("<h",len(data1))
self.fields["Bcc2"] = struct.pack("<h",len(data2))
self.fields["Andxoffset"] = struct.pack("<h",len(data1)+32+29)
self.fields["AnsiPassLength"] = struct.pack("<h",len(str(self.fields["AnsiPasswd"])))
self.fields["UnicodePassLength"] = struct.pack("<h",len(str(self.fields["UnicodePasswd"])))
self.fields["PasswordLength"] = struct.pack("<h",len(str(self.fields["Passwd"])))
class SMBNTCreateData(Packet):
fields = OrderedDict([
("Wordcount", "\x18"),
("AndXCommand", "\xff"),
("Reserved", "\x00" ),
("Andxoffset", "\x00\x00"),
("Reserved2", "\x00"),
("FileNameLen", "\x07\x00"),
("CreateFlags", "\x16\x00\x00\x00"),
("RootFID", "\x00\x00\x00\x00"),
("AccessMask", "\x00\x00\x00\x02"),
("AllocSize", "\x00\x00\x00\x00\x00\x00\x00\x00"),
("FileAttrib", "\x00\x00\x00\x00"),
("ShareAccess", "\x07\x00\x00\x00"),
("Disposition", "\x01\x00\x00\x00"),
("CreateOptions", "\x00\x00\x00\x00"),
("Impersonation", "\x02\x00\x00\x00"),
("SecurityFlags", "\x00"),
("Bcc", "\x08\x00"),
("FileName", "\\svcctl"),
("FileNameNull", "\x00"),
])
def calculate(self):
Data1= str(self.fields["FileName"])+str(self.fields["FileNameNull"])
self.fields["FileNameLen"] = struct.pack("<h",len(str(self.fields["FileName"])))
self.fields["Bcc"] = struct.pack("<h",len(Data1))
class SMBReadData(Packet):
fields = OrderedDict([
("Wordcount", "\x0a"),
("AndXCommand", "\xff"),
("Reserved", "\x00" ),
("Andxoffset", "\x00\x00"),
("FID", "\x00\x00"),
("Offset", "\x19\x03\x00\x00"),
("MaxCountLow", "\xed\x01"),
("MinCount", "\xed\x01"),
("Hidden", "\xff\xff\xff\xff"),
("Remaining", "\x00\x00"),
("Bcc", "\x00\x00"),
("Data", ""),
])
def calculate(self):
self.fields["Bcc"] = struct.pack("<h",len(str(self.fields["Data"])))
class SMBWriteData(Packet):
fields = OrderedDict([
("Wordcount", "\x0e"),
("AndXCommand", "\xff"),
("Reserved", "\x00" ),
("Andxoffset", "\x00\x00"),
("FID", "\x06\x40"),
("Offset", "\xea\x03\x00\x00"),
("Reserved2", "\xff\xff\xff\xff"),
("WriteMode", "\x08\x00"),
("Remaining", "\xdc\x02"),
("DataLenHi", "\x00\x00"),
("DataLenLow", "\xdc\x02"),
("DataOffset", "\x3f\x00"),
("HiOffset", "\x00\x00\x00\x00"),
("Bcc", "\xdc\x02"),
("Data", ""),
])
def calculate(self):
self.fields["Remaining"] = struct.pack("<h",len(str(self.fields["Data"])))
self.fields["DataLenLow"] = struct.pack("<h",len(str(self.fields["Data"])))
self.fields["Bcc"] = struct.pack("<h",len(str(self.fields["Data"])))
class SMBDCEData(Packet):
fields = OrderedDict([
("Version", "\x05"),
("VersionLow", "\x00"),
("PacketType", "\x0b"),
("PacketFlag", "\x03"),
("DataRepresent", "\x10\x00\x00\x00"),
("FragLen", "\x2c\x02"),
("AuthLen", "\x00\x00"),
("CallID", "\x00\x00\x00\x00"),
("MaxTransFrag", "\xd0\x16"),
("MaxRecvFrag", "\xd0\x16"),
("GroupAssoc", "\x00\x00\x00\x00"),
("CTXNumber", "\x01"),
("CTXPadding", "\x00\x00\x00"),
("CTX0ContextID", "\x00\x00"),
("CTX0ItemNumber", "\x01\x00"),
("CTX0UID", "\x81\xbb\x7a\x36\x44\x98\xf1\x35\xad\x32\x98\xf0\x38\x00\x10\x03"),
("CTX0UIDVersion", "\x02\x00"),
("CTX0UIDVersionlo","\x00\x00"),
("CTX0UIDSyntax", "\x04\x5d\x88\x8a\xeb\x1c\xc9\x11\x9f\xe8\x08\x00\x2b\x10\x48\x60"),
("CTX0UIDSyntaxVer","\x02\x00\x00\x00"),
])
def calculate(self):
Data1= str(self.fields["Version"])+str(self.fields["VersionLow"])+str(self.fields["PacketType"])+str(self.fields["PacketFlag"])+str(self.fields["DataRepresent"])+str(self.fields["FragLen"])+str(self.fields["AuthLen"])+str(self.fields["CallID"])+str(self.fields["MaxTransFrag"])+str(self.fields["MaxRecvFrag"])+str(self.fields["GroupAssoc"])+str(self.fields["CTXNumber"])+str(self.fields["CTXPadding"])+str(self.fields["CTX0ContextID"])+str(self.fields["CTX0ItemNumber"])+str(self.fields["CTX0UID"])+str(self.fields["CTX0UIDVersion"])+str(self.fields["CTX0UIDVersionlo"])+str(self.fields["CTX0UIDSyntax"])+str(self.fields["CTX0UIDSyntaxVer"])
self.fields["FragLen"] = struct.pack("<h",len(Data1))
class SMBDCEPacketData(Packet):
fields = OrderedDict([
("Version", "\x05"),
("VersionLow", "\x00"),
("PacketType", "\x00"),
("PacketFlag", "\x03"),
("DataRepresent", "\x10\x00\x00\x00"),
("FragLen", "\x2c\x02"),
("AuthLen", "\x00\x00"),
("CallID", "\x00\x00\x00\x00"),
("AllocHint", "\x38\x00\x00\x00"),
("ContextID", "\x00\x00"),
("Opnum", "\x0f\x00"),
("Data", ""),
])
def calculate(self):
Data1= str(self.fields["Version"])+str(self.fields["VersionLow"])+str(self.fields["PacketType"])+str(self.fields["PacketFlag"])+str(self.fields["DataRepresent"])+str(self.fields["FragLen"])+str(self.fields["AuthLen"])+str(self.fields["CallID"])+str(self.fields["AllocHint"])+str(self.fields["ContextID"])+str(self.fields["Opnum"])+str(self.fields["Data"])
self.fields["FragLen"] = struct.pack("<h",len(Data1))
self.fields["AllocHint"] = struct.pack("<i",len(str(self.fields["Data"])))
class SMBDCESVCCTLOpenManagerW(Packet):
fields = OrderedDict([
("MachineNameRefID", "\xb5\x97\xb9\xbc"),
("MaxCount", "\x0f\x00\x00\x00"),
("Offset", "\x00\x00\x00\x00"),
("ActualCount", "\x0f\x00\x00\x00"),
("MachineName", "\\\\169.220.1.11"),##This is not taken into consideration.
("MachineNameNull", "\x00\x00\x00\x00"),
("DbPointer", "\x00\x00\x00\x00"),
("AccessMask", "\x3f\x00\x0f\x00"),
])
def calculate(self):
## Convert to UTF-16LE
self.fields["MachineName"] = self.fields["MachineName"].encode('utf-16le')
class SMBDCESVCCTLCreateService(Packet):
fields = OrderedDict([
("ContextHandle", ""),
("MaxCount", "\x0c\x00\x00\x00"),
("Offset", "\x00\x00\x00\x00"),
("ActualCount", "\x0c\x00\x00\x00"),
("ServiceName", "AyAGaxwLhCP"),
("MachineNameNull", "\x00\x00"),
("ReferentID", "\x9c\xfa\x9a\xc9"),
("MaxCountRefID", "\x11\x00\x00\x00"),
("OffsetID", "\x00\x00\x00\x00"),
("ActualCountRefID", "\x11\x00\x00\x00"),
("DisplayNameID", "DhhUFcsvrfJvLwRq"),
("DisplayNameIDNull", "\x00\x00\x00\x00"),
("AccessMask", "\xff\x01\x0f\x00"),
("ServerType", "\x10\x01\x00\x00"),
("ServiceStartType", "\x03\x00\x00\x00"),
("ServiceErrorCtl", "\x00\x00\x00\x00"),
("BinPathMaxCount", "\xb6\x00\x00\x00"),
("BinPathOffset", "\x00\x00\x00\x00"),
("BinPathActualCount", "\xb6\x00\x00\x00"),
("BinPathName", "%COMSPEC% /C \""),
("BinCMD", ""),
("BintoEnd", "\""),
("BinPathNameNull", "\x00\x00"),
("Nullz", "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"),
])
def calculate(self):
BinDataLen = str(self.fields["BinPathName"])+str(self.fields["BinCMD"])+str(self.fields["BintoEnd"])
## Calculate first
self.fields["BinPathMaxCount"] = struct.pack("<i",len(BinDataLen)+1)
self.fields["BinPathActualCount"] = struct.pack("<i",len(BinDataLen)+1)
self.fields["MaxCount"] = struct.pack("<i",len(str(self.fields["ServiceName"]))+1)
self.fields["ActualCount"] = struct.pack("<i",len(str(self.fields["ServiceName"]))+1)
self.fields["MaxCountRefID"] = struct.pack("<i",len(str(self.fields["DisplayNameID"]))+1)
self.fields["ActualCountRefID"] = struct.pack("<i",len(str(self.fields["DisplayNameID"]))+1)
## Then convert to UTF-16LE, yeah it's weird..
self.fields["ServiceName"] = self.fields["ServiceName"].encode('utf-16le')
self.fields["DisplayNameID"] = self.fields["DisplayNameID"].encode('utf-16le')
self.fields["BinPathName"] = self.fields["BinPathName"].encode('utf-16le')
self.fields["BinCMD"] = self.fields["BinCMD"].encode('utf-16le')
self.fields["BintoEnd"] = self.fields["BintoEnd"].encode('utf-16le')
class SMBDCESVCCTLOpenService(Packet):
fields = OrderedDict([
("ContextHandle", ""),
("MaxCount", "\x0c\x00\x00\x00"),
("Offset", "\x00\x00\x00\x00"),
("ActualCount", "\x0c\x00\x00\x00"),
("ServiceName", ""),
("MachineNameNull", "\x00\x00"),
("AccessMask", "\xff\x01\x0f\x00"),
])
def calculate(self):
## Calculate first
self.fields["MaxCount"] = struct.pack("<i",len(str(self.fields["ServiceName"]))+1)
self.fields["ActualCount"] = struct.pack("<i",len(str(self.fields["ServiceName"]))+1)
## Then convert to UTF-16LE, yeah it's weird..
self.fields["ServiceName"] = self.fields["ServiceName"].encode('utf-16le')
class SMBDCESVCCTLStartService(Packet):
fields = OrderedDict([
("ContextHandle", ""),
("MaxCount", "\x00\x00\x00\x00\x00\x00\x00\x00"),
])
def ParseAnswerKey(data,host):
key = data[73:81]
print "Key retrieved is:%s from host:%s"%(key.encode("hex"),host)
return key
##################################################################################
#SMB Server Stuff
##################################################################################
#Calculate total SMB packet len.
def longueur(payload):
length = struct.pack(">i", len(''.join(payload)))
return length
#Set MID SMB Header field.
def midcalc(data):
pack=data[34:36]
return pack
#Set UID SMB Header field.
def uidcalc(data):
pack=data[32:34]
return pack
#Set PID SMB Header field.
def pidcalc(data):
pack=data[30:32]
return pack
#Set TID SMB Header field.
def tidcalc(data):
pack=data[28:30]
return pack
#SMB Header answer packet.
class SMBHeader(Packet):
fields = OrderedDict([
("proto", "\xff\x53\x4d\x42"),
("cmd", "\x72"),
("errorcode", "\x00\x00\x00\x00" ),
("flag1", "\x80"),
("flag2", "\x00\x00"),
("pidhigh", "\x00\x00"),
("signature", "\x00\x00\x00\x00\x00\x00\x00\x00"),
("reserved", "\x00\x00"),
("tid", "\x00\x00"),
("pid", "\xff\xfe"),
("uid", "\x00\x00"),
("mid", "\x00\x00"),
])
#SMB Negotiate Answer packet.
class SMBNegoAns(Packet):
fields = OrderedDict([
("Wordcount", "\x11"),
("Dialect", ""),
("Securitymode", "\x03"),
("MaxMpx", "\x32\x00"),
("MaxVc", "\x01\x00"),
("Maxbuffsize", "\x04\x11\x00\x00"),
("Maxrawbuff", "\x00\x00\x01\x00"),
("Sessionkey", "\x00\x00\x00\x00"),
("Capabilities", "\xfd\x43\x00\x00"),
("Systemtime", "\xc2\x74\xf2\x53\x70\x02\xcf\x01\x2c\x01"),
("Keylength", "\x08"),
("Bcc", "\x10\x00"),
("Key", "\x0d\x0d\x0d\x0d\x0d\x0d\x0d\x0d"),
("Domain", ""),
])
def calculate(self):
##Then calculate.
CompleteBCCLen = str(self.fields["Key"])+str(self.fields["Domain"])
self.fields["Bcc"] = struct.pack("<h",len(CompleteBCCLen))
self.fields["Keylength"] = struct.pack("<h",len(self.fields["Key"]))[0]
# SMB Session/Tree Answer.
class SMBSessTreeAns(Packet):
fields = OrderedDict([
("Wordcount", "\x03"),
("Command", "\x75"),
("Reserved", "\x00"),
("AndXoffset", "\x4e\x00"),
("Action", "\x01\x00"),
("Bcc", "\x25\x00"),
("NativeOs", "Windows 5.1"),
("NativeOsNull", "\x00"),
("NativeLan", "Windows 2000 LAN Manager"),
("NativeLanNull", "\x00"),
("WordcountTree", "\x03"),
("AndXCommand", "\xff"),
("Reserved1", "\x00"),
("AndxOffset", "\x00\x00"),
("OptionalSupport", "\x01\x00"),
("Bcc2", "\x08\x00"),
("Service", "A:"),
("ServiceNull", "\x00"),
("FileSystem", "NTFS"),
("FileSystemNull", "\x00"),
])
def calculate(self):
##AndxOffset
CalculateCompletePacket = str(self.fields["Wordcount"])+str(self.fields["Command"])+str(self.fields["Reserved"])+str(self.fields["AndXoffset"])+str(self.fields["Action"])+str(self.fields["Bcc"])+str(self.fields["NativeOs"])+str(self.fields["NativeOsNull"])+str(self.fields["NativeLan"])+str(self.fields["NativeLanNull"])
self.fields["AndXoffset"] = struct.pack("<i", len(CalculateCompletePacket)+32)[:2]#SMB Header is *always* 32.
##BCC 1 and 2
CompleteBCCLen = str(self.fields["NativeOs"])+str(self.fields["NativeOsNull"])+str(self.fields["NativeLan"])+str(self.fields["NativeLanNull"])
self.fields["Bcc"] = struct.pack("<h",len(CompleteBCCLen))
CompleteBCC2Len = str(self.fields["Service"])+str(self.fields["ServiceNull"])+str(self.fields["FileSystem"])+str(self.fields["FileSystemNull"])
self.fields["Bcc2"] = struct.pack("<h",len(CompleteBCC2Len))
class SMBSessEmpty(Packet):
fields = OrderedDict([
("Empty", "\x00\x00\x00"),
])
| gpl-3.0 |
Lemma1/MAC-POSTS | doc_builder/sphinx-contrib/matlabdomain/tests/test_mat_types.py | 1 | 4777 | #! /usr/bin/env python
from sphinxcontrib import mat_documenters as doc
from nose.tools import eq_, ok_
import os
from pprint import pprint
DIRNAME = doc.MatObject.basedir = os.path.abspath(os.path.dirname(__file__))
def test_ellipsis_after_equals():
"""
test function with ellipsis after equals
"""
# test module
test_data = doc.MatObject.matlabify('test_data')
test_submodule = test_data.getter('test_submodule')
ok_(isinstance(test_submodule, doc.MatModule))
eq_(test_submodule.__package__, 'test_data.test_submodule')
f = test_submodule.getter('f_ellipsis_after_equals')
ok_(isinstance(f, doc.MatFunction))
eq_(f.retv, ['output'])
eq_(f.args, ['arg'])
return f
def test_no_args():
"""
test function with no args
"""
# test module
test_data = doc.MatObject.matlabify('test_data')
test_submodule = test_data.getter('test_submodule')
ok_(isinstance(test_submodule, doc.MatModule))
eq_(test_submodule.__package__, 'test_data.test_submodule')
f = test_submodule.getter('f_no_args')
ok_(isinstance(f, doc.MatFunction))
eq_(f.retv, ['output', 'with', 'ellipsis'])
ok_(not f.args)
return f
def test_no_outputs():
"""
test function with no outputs
"""
# test module
test_data = doc.MatObject.matlabify('test_data')
test_submodule = test_data.getter('test_submodule')
ok_(isinstance(test_submodule, doc.MatModule))
eq_(test_submodule.__package__, 'test_data.test_submodule')
f = test_submodule.getter('f_no_outputs')
ok_(isinstance(f, doc.MatFunction))
ok_(not f.retv)
eq_(f.args, ['arg'])
return f
def test_output_with_ellipsis():
"""
test function output with ellipsis
"""
# test module
test_data = doc.MatObject.matlabify('test_data')
test_submodule = test_data.getter('test_submodule')
ok_(isinstance(test_submodule, doc.MatModule))
eq_(test_submodule.__package__, 'test_data.test_submodule')
f = test_submodule.getter('f_output_with_ellipsis')
ok_(isinstance(f, doc.MatFunction))
eq_(f.retv, ['output', 'with', 'ellipsis'])
eq_(f.args, ['arg'])
return f
def test_output_without_commas():
"""
test function output without commas
"""
# test module
test_data = doc.MatObject.matlabify('test_data')
test_submodule = test_data.getter('test_submodule')
ok_(isinstance(test_submodule, doc.MatModule))
eq_(test_submodule.__package__, 'test_data.test_submodule')
f = test_submodule.getter('f_output_without_commas')
ok_(isinstance(f, doc.MatFunction))
eq_(f.retv, ['output', 'with', 'ellipsis'])
eq_(f.args, ['arg'])
return f
def test_inheritance():
"""
test inheritance from different module
"""
# test module
test_data = doc.MatObject.matlabify('test_data')
test_submodule = test_data.getter('test_submodule')
sfdm = test_submodule.getter('super_from_diff_mod')
ok_(isinstance(sfdm, doc.MatClass))
eq_(sfdm.bases,['MyAbstractClass', 'MyHandleClass'])
bases = sfdm.getter('__bases__')
eq_(bases['MyAbstractClass'].module, 'test_data')
eq_(bases['MyHandleClass'].module, 'test_data')
return sfdm
def test_property_with_ellipsis():
"""
test class property with ellipsis in an array or in an expression
"""
test_data = doc.MatObject.matlabify('test_data')
ellipsis_class = test_data.getter('EllipsisProperties')
ok_(isinstance(ellipsis_class, doc.MatClass))
A = ellipsis_class.getter('A')
eq_(ellipsis_class.properties['A']['default'], A.default)
B = ellipsis_class.getter('B')
eq_(ellipsis_class.properties['B']['default'], B.default)
C = ellipsis_class.getter('C')
eq_(ellipsis_class.properties['C']['default'], C.default)
return ellipsis_class, A, B, C
if __name__ == '__main__':
f1 = test_ellipsis_after_equals
print f1.__name__
print f1.__module__
print f1.__doc__
f2 = test_no_args()
print f2.__name__
print f2.__module__
print f2.__doc__
f3 = test_no_outputs()
print f3.__name__
print f3.__module__
print f3.__doc__
f4 = test_output_with_ellipsis()
print f4.__name__
print f4.__module__
print f4.__doc__
f5 = test_output_without_commas()
print f5.__name__
print f5.__module__
print f5.__doc__
sfdm = test_inheritance()
print sfdm.__name__
print sfdm.__module__
print sfdm.__doc__
ep, A, B, C = test_property_with_ellipsis()
print ep.__name__
print ep.__module__
print ep.__doc__
pprint(A.__dict__)
pprint(B.__dict__)
pprint(C.__dict__)
| mit |
spicykaiju/pyvmomi | pyVmomi/SoapAdapter.py | 7 | 61763 | # VMware vSphere Python SDK
# Copyright (c) 2008-2015 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from six import PY2
from six import PY3
from six import reraise
from six.moves import http_client
if PY3:
long = int
basestring = str
from six import u
import sys
import os
import socket
import subprocess
import time
from six.moves.urllib.parse import urlparse
from datetime import datetime
from xml.parsers.expat import ParserCreate
# We have our own escape functionality.
# from xml.sax.saxutils import escape
if PY2:
from cStringIO import StringIO
if PY3:
from io import StringIO
from pyVmomi.VmomiSupport import *
from pyVmomi.StubAdapterAccessorImpl import StubAdapterAccessorMixin
import pyVmomi.Iso8601
import base64
from xml.parsers.expat import ExpatError
import copy
import contextlib
try:
USERWORLD = os.uname()[0] == 'VMkernel'
except:
USERWORLD = False
# Timeout value used for idle connections in client connection pool.
# Default value is 900 seconds (15 minutes).
CONNECTION_POOL_IDLE_TIMEOUT_SEC = 900
NS_SEP = " "
XML_ENCODING = 'UTF-8'
XML_HEADER = '<?xml version="1.0" encoding="{0}"?>'.format(XML_ENCODING)
XMLNS_SOAPENC = "http://schemas.xmlsoap.org/soap/encoding/"
XMLNS_SOAPENV = "http://schemas.xmlsoap.org/soap/envelope/"
XSI_TYPE = XMLNS_XSI + NS_SEP + u('type')
# Note: Must make a copy to use the SOAP_NSMAP
# TODO: Change to frozendict, if available
SOAP_NSMAP = { XMLNS_SOAPENC: 'soapenc', XMLNS_SOAPENV: 'soapenv',
XMLNS_XSI: 'xsi', XMLNS_XSD: 'xsd' }
SOAP_ENVELOPE_TAG = "{0}:Envelope".format(SOAP_NSMAP[XMLNS_SOAPENV])
SOAP_HEADER_TAG = "{0}:Header".format(SOAP_NSMAP[XMLNS_SOAPENV])
SOAP_FAULT_TAG = "{0}:Fault".format(SOAP_NSMAP[XMLNS_SOAPENV])
SOAP_BODY_TAG = "{0}:Body".format(SOAP_NSMAP[XMLNS_SOAPENV])
SOAP_ENVELOPE_START = '<{0} '.format(SOAP_ENVELOPE_TAG) + \
' '.join(['xmlns:' + prefix + '="' + urn + '"' \
for urn, prefix in iteritems(SOAP_NSMAP)]) + \
'>\n'
SOAP_ENVELOPE_END = "\n</{0}>".format(SOAP_ENVELOPE_TAG)
SOAP_HEADER_START = "<{0}>".format(SOAP_HEADER_TAG)
SOAP_HEADER_END = "</{0}>".format(SOAP_HEADER_TAG)
SOAP_BODY_START = "<{0}>".format(SOAP_BODY_TAG)
SOAP_BODY_END = "</{0}>".format(SOAP_BODY_TAG)
SOAP_START = SOAP_ENVELOPE_START + SOAP_BODY_START + '\n'
SOAP_END = '\n' + SOAP_BODY_END + SOAP_ENVELOPE_END
WSSE_PREFIX = "wsse"
WSSE_HEADER_TAG = "{0}:Security".format(WSSE_PREFIX)
WSSE_NS_URL = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd"
WSSE_NS = 'xmlns:{0}="{1}"'.format(WSSE_PREFIX, WSSE_NS_URL)
WSSE_HEADER_START = "<{0} {1}>".format(WSSE_HEADER_TAG, WSSE_NS)
WSSE_HEADER_END = "</{0}>".format(WSSE_HEADER_TAG)
## MethodFault type
MethodFault = GetVmodlType("vmodl.MethodFault")
## Localized MethodFault type
LocalizedMethodFault = GetVmodlType("vmodl.LocalizedMethodFault")
def encode(string, encoding):
if PY2:
return string.encode(encoding)
return u(string)
## Escape <, >, &
def XmlEscape(xmlStr):
escaped = xmlStr.replace("&", "&").replace(">", ">").replace("<", "<")
return escaped
## Get the start tag, end tag, and text handlers of a class
def GetHandlers(obj):
return (obj.StartElementHandler,
obj.EndElementHandler,
obj.CharacterDataHandler,
obj.StartNamespaceDeclHandler,
obj.EndNamespaceDeclHandler)
## Set the start tag, end tag, and text handlers of a parser
def SetHandlers(obj, handlers):
(obj.StartElementHandler,
obj.EndElementHandler,
obj.CharacterDataHandler,
obj.StartNamespaceDeclHandler,
obj.EndNamespaceDeclHandler) = handlers
## Serialize an object
#
# This function assumes CheckField(info, val) was already called
# @param val the value to serialize
# @param info the field
# @param version the version
# @param nsMap a dict of xml ns -> prefix
# @return the serialized object as a string
def Serialize(val, info=None, version=None, nsMap=None, encoding=None):
if version is None:
try:
if isinstance(val, list):
itemType = val.Item
version = itemType._version
else:
if val is None:
# neither val nor version is given
return ''
# Pick up the version from val
version = val._version
except AttributeError:
version = BASE_VERSION
if info is None:
info = Object(name="object", type=object, version=version, flags=0)
writer = StringIO()
SoapSerializer(writer, version, nsMap, encoding).Serialize(val, info)
return writer.getvalue()
## Serialize fault detail
#
# Serializes a fault as the content of the detail element in a
# soapenv:Fault (i.e. without a LocalizedMethodFault wrapper).
#
# This function assumes CheckField(info, val) was already called
# @param val the value to serialize
# @param info the field
# @param version the version
# @param nsMap a dict of xml ns -> prefix
# @return the serialized object as a string
def SerializeFaultDetail(val, info=None, version=None, nsMap=None, encoding=None):
if version is None:
try:
if not isinstance(val, MethodFault):
raise TypeError('{0} is not a MethodFault'.format(str(val)))
version = val._version
except AttributeError:
version = BASE_VERSION
if info is None:
info = Object(name="object", type=object, version=version, flags=0)
writer = StringIO()
SoapSerializer(writer, version, nsMap, encoding).SerializeFaultDetail(val, info)
return writer.getvalue()
## SOAP serializer
#
class SoapSerializer:
""" SoapSerializer """
## Serializer constructor
#
# @param writer File writer
# @param version the version
# @param nsMap a dict of xml ns -> prefix
def __init__(self, writer, version, nsMap, encoding):
""" Constructor """
self.writer = writer
self.version = version
self.nsMap = nsMap and nsMap or {}
self.encoding = encoding and encoding or XML_ENCODING
for ns, prefix in iteritems(self.nsMap):
if prefix == '':
self.defaultNS = ns
break
else:
self.defaultNS = ''
# Additional attr for outermost tag
self.outermostAttrs = ''
# Fill in required xmlns, if not defined
for nsPrefix, ns, attrName in [('xsi', XMLNS_XSI, 'xsiPrefix'),
('xsd', XMLNS_XSD, 'xsdPrefix')]:
prefix = self.nsMap.get(ns)
if not prefix:
prefix = nsPrefix
self.outermostAttrs += ' xmlns:{0}="{1}"'.format(prefix, ns)
self.nsMap = self.nsMap.copy()
self.nsMap[ns] = prefix
setattr(self, attrName, prefix + ":")
## Serialize an object
#
# This function assumes CheckField(info, val) was already called
# @param val the value to serialize
# @param info the field
def Serialize(self, val, info):
""" Serialize an object """
self._Serialize(val, info, self.defaultNS)
## Serialize fault detail
#
# Serializes a fault as the content of the detail element in a
# soapenv:Fault (i.e. without a LocalizedMethodFault wrapper).
#
# This function assumes CheckField(info, val) was already called
# @param val the value to serialize
# @param info the field
def SerializeFaultDetail(self, val, info):
""" Serialize an object """
self._SerializeDataObject(val, info, '', self.defaultNS)
def _NSPrefix(self, ns):
""" Get xml ns prefix. self.nsMap must be set """
if ns == self.defaultNS:
return ''
prefix = self.nsMap[ns]
return prefix and prefix + ':' or ''
def _QName(self, typ, defNS):
""" Get fully qualified wsdl name (prefix:name) """
attr = ''
ns, name = GetQualifiedWsdlName(typ)
if ns == defNS:
prefix = ''
else:
try:
prefix = self.nsMap[ns]
except KeyError:
# We have not seen this ns before
prefix = ns.split(':', 1)[-1]
attr = ' xmlns:{0}="{1}"'.format(prefix, ns)
return attr, prefix and prefix + ':' + name or name
## Serialize an object (internal)
#
# @param val the value to serialize
# @param info the field
# @param defNS the default namespace
def _Serialize(self, val, info, defNS):
""" Serialize an object """
if not IsChildVersion(self.version, info.version):
return
if val is None:
if info.flags & F_OPTIONAL:
return
else:
raise TypeError('Field "{0}" is not optional'.format(info.name))
elif isinstance(val, list) and len(val) == 0:
if info.type is object:
# Make sure an empty array assigned to Any is typed
if not isinstance(val, Array):
raise TypeError('Field "{0}": Cannot assign empty native python array to an Any'.format(info.name))
elif info.flags & F_OPTIONAL:
# Skip optional non-Any
return
else:
raise TypeError('Field "{0}" not optional'.format(info.name))
if self.outermostAttrs:
attr = self.outermostAttrs
self.outermostAttrs = None
else:
attr = ''
currDefNS = defNS
# Emit default ns if tag ns is not the same
currTagNS = GetWsdlNamespace(info.version)
if currTagNS != defNS:
attr += ' xmlns="{0}"'.format(currTagNS)
currDefNS = currTagNS
if isinstance(val, DataObject):
if isinstance(val, MethodFault):
newVal = LocalizedMethodFault(fault=val, localizedMessage=val.msg)
if info.type is object:
faultType = object
else:
faultType = LocalizedMethodFault
newInfo = Object(name=info.name, type=faultType,
version=info.version, flags=info.flags)
self._SerializeDataObject(newVal, newInfo, attr, currDefNS)
else:
self._SerializeDataObject(val, info, attr, currDefNS)
elif isinstance(val, ManagedObject):
if info.type is object:
nsattr, qName = self._QName(ManagedObject, currDefNS)
attr += '{0} {1}type="{2}"'.format(nsattr, self.xsiPrefix, qName)
if val._serverGuid is not None:
attr += ' serverGuid="{0}"'.format(val._serverGuid)
# val in vim type attr is not namespace qualified
# TODO: Add a new "typens" attr?
ns, name = GetQualifiedWsdlName(Type(val))
attr += ' type="{0}"'.format(name)
self.writer.write('<{0}{1}>{2}</{3}>'.format(info.name, attr,
encode(val._moId, self.encoding),
info.name))
elif isinstance(val, list):
if info.type is object:
itemType = val.Item
if (itemType is ManagedMethod or itemType is PropertyPath
or itemType is type):
tag = 'string'
typ = GetVmodlType("string[]")
elif issubclass(itemType, ManagedObject):
tag = 'ManagedObjectReference'
typ = ManagedObject.Array
else:
tag = GetWsdlName(itemType)
typ = Type(val)
nsattr, qName = self._QName(typ, currDefNS)
# For WSDL, since we set tag of ManagedObjects to ManagedObjectReferences,
# the name of its array should be ArrayOfManagedObjectReference
if qName.endswith("ArrayOfManagedObject"):
qName += "Reference"
attr += '{0} {1}type="{2}"'.format(nsattr, self.xsiPrefix, qName)
self.writer.write('<{0}{1}>'.format(info.name, attr))
itemInfo = Object(name=tag, type=itemType,
version=info.version, flags=info.flags)
for it in val:
self._Serialize(it, itemInfo, currDefNS)
self.writer.write('</{0}>'.format(info.name))
else:
itemType = info.type.Item
itemInfo = Object(name=info.name, type=itemType,
version=info.version, flags=info.flags)
for it in val:
self._Serialize(it, itemInfo, defNS)
elif isinstance(val, type) or isinstance(val, type(Exception)):
if info.type is object:
attr += ' {0}type="{1}string"'.format(self.xsiPrefix, self.xsdPrefix)
self.writer.write('<{0}{1}>{2}</{0}>'.format(
info.name, attr, GetWsdlName(val)))
elif isinstance(val, ManagedMethod):
if info.type is object:
attr += ' {0}type="{1}string"'.format(self.xsiPrefix, self.xsdPrefix)
self.writer.write('<{0}{1}>{2}</{0}>'.format(
info.name, attr, val.info.wsdlName))
elif isinstance(val, datetime):
if info.type is object:
nsattr, qName = self._QName(Type(val), currDefNS)
attr += '{0} {1}type="{2}"'.format(nsattr, self.xsiPrefix, qName)
result = Iso8601.ISO8601Format(val)
self.writer.write('<{0}{1}>{2}</{0}>'.format(info.name, attr, result))
elif isinstance(val, binary):
if info.type is object:
nsattr, qName = self._QName(Type(val), currDefNS)
attr += '{0} {1}type="{2}"'.format(nsattr, self.xsiPrefix, qName)
result = base64.b64encode(val)
self.writer.write('<{0}{1}>{2}</{0}>'.format(info.name, attr, result))
elif isinstance(val, bool):
if info.type is object:
nsattr, qName = self._QName(Type(val), currDefNS)
attr += '{0} {1}type="{2}"'.format(nsattr, self.xsiPrefix, qName)
result = val and "true" or "false"
self.writer.write('<{0}{1}>{2}</{0}>'.format(info.name, attr, result))
else:
if info.type is object:
if isinstance(val, PropertyPath):
attr += ' {0}type="{1}string"'.format(self.xsiPrefix, self.xsdPrefix)
else:
nsattr, qName = self._QName(Type(val), currDefNS)
attr += '{0} {1}type="{2}"'.format(nsattr, self.xsiPrefix, qName)
if not isinstance(val, text_type):
# Use UTF-8 rather than self.encoding. self.encoding is for
# output of serializer, while 'val' is our input. And regardless
# of what our output is, our input should be always UTF-8. Yes,
# it means that if you emit output in other encoding than UTF-8,
# you cannot serialize it again once more. That's feature, not
# a bug.
val = str(val)
if PY2:
val = val.decode('UTF-8')
result = XmlEscape(val)
self.writer.write('<{0}{1}>{2}</{0}>'.format(info.name, attr,
encode(result,
self.encoding)))
## Serialize a a data object (internal)
#
# @param val the value to serialize
# @param info the field
# @param attr attributes to serialized in the outermost elementt
# @param currDefNS the current default namespace
def _SerializeDataObject(self, val, info, attr, currDefNS):
if info.flags & F_LINK:
# Attribute is a link and Object is present instead of its key.
# We need to serialize just the key and not the entire object
self._Serialize(val.key, info, currDefNS)
return
dynType = GetCompatibleType(Type(val), self.version)
if dynType != info.type:
nsattr, qName = self._QName(dynType, currDefNS)
attr += '{0} {1}type="{2}"'.format(nsattr, self.xsiPrefix, qName)
self.writer.write('<{0}{1}>'.format(info.name, attr))
if dynType is LocalizedMethodFault:
# Serialize a MethodFault as LocalizedMethodFault on wire
# See PR 670229
for prop in val._GetPropertyList():
propVal = getattr(val, prop.name)
if prop.name == 'fault':
propVal = copy.copy(propVal)
propVal.msg = None
self._SerializeDataObject(propVal, prop, '', currDefNS)
else:
self._Serialize(propVal, prop, currDefNS)
else:
for prop in val._GetPropertyList():
self._Serialize(getattr(val, prop.name), prop, currDefNS)
self.writer.write('</{0}>'.format(info.name))
class ParserError(KeyError):
# NOTE (hartsock): extends KeyError since parser logic is written to
# catch KeyError types. Normally, I would want PerserError to be a root
# type for all parser faults.
pass
def ReadDocument(parser, data):
# NOTE (hartsock): maintaining library internal consistency here, this is
# a refactoring that rolls up some repeated code blocks into a method so
# that we can refactor XML parsing behavior in a single place.
if not isinstance(data, str):
data = data.read()
try:
parser.Parse(data)
except Exception:
# wrap all parser faults with additional information for later
# bug reporting on the XML parser code itself.
(ec, ev, tb) = sys.exc_info()
line = parser.CurrentLineNumber
col = parser.CurrentColumnNumber
pe = ParserError("xml document: "
"{0} parse error at: "
"line:{1}, col:{2}".format(data, line, col))
# use six.reraise for python 2.x and 3.x compatability
reraise(ParserError, pe, tb)
## Deserialize an object from a file or string
#
# This function will deserialize one top-level XML node.
# @param data the data to deserialize (a file object or string)
# @param resultType expected result type
# @param stub stub for moRef deserialization
# @return the deserialized object
def Deserialize(data, resultType=object, stub=None):
parser = ParserCreate(namespace_separator=NS_SEP)
ds = SoapDeserializer(stub)
ds.Deserialize(parser, resultType)
ReadDocument(parser, data)
return ds.GetResult()
## Expat deserializer namespace handler
class ExpatDeserializerNSHandlers:
def __init__(self, nsMap=None):
# nsMap is a dict of ns prefix to a stack (list) of namespaces
# The last element of the stack is current namespace
if not nsMap:
nsMap = {}
self.nsMap = nsMap
## Get current default ns
def GetCurrDefNS(self):
namespaces = self.nsMap.get(None)
if namespaces:
ns = namespaces[-1]
else:
ns = ""
return ns
## Get namespace and wsdl name from tag
def GetNSAndWsdlname(self, tag):
""" Map prefix:name tag into ns, name """
idx = tag.find(":")
if idx >= 0:
prefix, name = tag[:idx], tag[idx + 1:]
else:
prefix, name = None, tag
# Map prefix to ns
ns = self.nsMap[prefix][-1]
return ns, name
## Handle namespace begin
def StartNamespaceDeclHandler(self, prefix, uri):
namespaces = self.nsMap.get(prefix)
if namespaces:
namespaces.append(uri)
else:
self.nsMap[prefix] = [uri]
## Handle namespace end
def EndNamespaceDeclHandler(self, prefix):
self.nsMap[prefix].pop()
## SOAP -> Python Deserializer
class SoapDeserializer(ExpatDeserializerNSHandlers):
## Constructor
#
# @param self self
# @param stub Stub adapter to use for deserializing moRefs
def __init__(self, stub=None, version=None):
ExpatDeserializerNSHandlers.__init__(self)
self.stub = stub
if version:
self.version = version
elif self.stub:
self.version = self.stub.version
else:
self.version = None
self.result = None
## Deserialize a SOAP object
#
# @param self self
# @param parser an expat parser
# @param resultType the static type of the result
# @param isFault true if the response is a fault response
# @param nsMap a dict of prefix -> [xml ns stack]
# @return the deserialized object
def Deserialize(self, parser, resultType=object, isFault=False, nsMap=None):
self.isFault = isFault
self.parser = parser
self.origHandlers = GetHandlers(parser)
SetHandlers(parser, GetHandlers(self))
self.resultType = resultType
self.stack = []
self.data = ""
self.serverGuid = None
if issubclass(resultType, list):
self.result = resultType()
else:
self.result = None
if not nsMap:
nsMap = {}
self.nsMap = nsMap
## Get the result of deserialization
# The links will not be resolved. User needs to explicitly resolve them
# using LinkResolver.
def GetResult(self):
return self.result
def SplitTag(self, tag):
""" Split tag into ns, name """
idx = tag.find(NS_SEP)
if idx >= 0:
return tag[:idx], tag[idx + 1:]
else:
return "", tag
def LookupWsdlType(self, ns, name, allowManagedObjectReference=False):
""" Lookup wsdl type. Handle special case for some vmodl version """
try:
return GetWsdlType(ns, name)
except KeyError:
if allowManagedObjectReference:
if name.endswith('ManagedObjectReference') and ns == XMLNS_VMODL_BASE:
return GetWsdlType(ns, name[:-len('Reference')])
# WARNING!!! This is a temporary hack to get around server not
# honoring @service tag (see bug 521744). Once it is fix, I am
# going to back out this change
if name.endswith('ManagedObjectReference') and allowManagedObjectReference:
return GetWsdlType(XMLNS_VMODL_BASE, name[:-len('Reference')])
return GuessWsdlType(name)
## Handle an opening XML tag
def StartElementHandler(self, tag, attr):
self.data = ""
self.serverGuid = None
deserializeAsLocalizedMethodFault = True
if not self.stack:
if self.isFault:
ns, name = self.SplitTag(tag)
objType = self.LookupWsdlType(ns, name[:-5])
# Only top level soap fault should be deserialized as method fault
deserializeAsLocalizedMethodFault = False
else:
objType = self.resultType
elif isinstance(self.stack[-1], list):
objType = self.stack[-1].Item
elif isinstance(self.stack[-1], DataObject):
# TODO: Check ns matches DataObject's namespace
ns, name = self.SplitTag(tag)
objType = self.stack[-1]._GetPropertyInfo(name).type
# LocalizedMethodFault <fault> tag should be deserialized as method fault
if name == "fault" and isinstance(self.stack[-1], LocalizedMethodFault):
deserializeAsLocalizedMethodFault = False
else:
raise TypeError("Invalid type for tag {0}".format(tag))
xsiType = attr.get(XSI_TYPE)
if xsiType:
# Ignore dynamic type for TypeName, MethodName, PropertyPath
# @bug 150459
if not (objType is type or objType is ManagedMethod or \
objType is PropertyPath):
ns, name = self.GetNSAndWsdlname(xsiType)
dynType = self.LookupWsdlType(ns, name, allowManagedObjectReference=True)
# TODO: Should be something like...
# dynType must be narrower than objType, except for
# ManagedObjectReference
if not (issubclass(dynType, list) and issubclass(objType, list)):
objType = dynType
else:
if issubclass(objType, list):
objType = objType.Item
if self.version:
objType = GetCompatibleType(objType, self.version)
if issubclass(objType, ManagedObject):
typeAttr = attr[u('type')]
# val in vim type attr is not namespace qualified
# However, this doesn't hurt to strip out namespace
# TODO: Get the ns from "typens" attr?
ns, name = self.GetNSAndWsdlname(typeAttr)
if u('serverGuid') in attr:
self.serverGuid = attr[u('serverGuid')]
self.stack.append(GuessWsdlType(name))
elif issubclass(objType, DataObject) or issubclass(objType, list):
if deserializeAsLocalizedMethodFault and issubclass(objType, Exception):
objType = LocalizedMethodFault
self.stack.append(objType())
else:
self.stack.append(objType)
## Handle a closing XML tag
def EndElementHandler(self, tag):
try:
obj = self.stack.pop()
except IndexError:
SetHandlers(self.parser, self.origHandlers)
handler = self.parser.EndElementHandler
del self.parser, self.origHandlers, self.stack, self.resultType
if handler:
return handler(tag)
return
data = self.data
if isinstance(obj, type) or isinstance(obj, type(Exception)):
if obj is type:
if data is None or data == '':
obj = None
else:
try:
# val in type val is not namespace qualified
# However, this doesn't hurt to strip out namespace
ns, name = self.GetNSAndWsdlname(data)
obj = GuessWsdlType(name)
except KeyError:
raise TypeError(data)
elif obj is ManagedMethod:
# val in Method val is not namespace qualified
# However, this doesn't hurt to strip out namespace
ns, name = self.GetNSAndWsdlname(data)
obj = GuessWsdlMethod(name)
elif obj is bool:
if data == "0" or data.lower() == "false":
obj = bool(False)
elif data == "1" or data.lower() == "true":
obj = bool(True)
else:
raise TypeError(data)
elif obj is binary:
# Raise type error if decode failed
obj = obj(base64.b64decode(data))
elif obj is str:
try:
obj = str(data)
except ValueError:
obj = data
elif obj is datetime:
obj = pyVmomi.Iso8601.ParseISO8601(data)
if not obj:
raise TypeError(data)
# issubclass is very expensive. Test last
elif issubclass(obj, ManagedObject):
obj = obj(data, self.stub, self.serverGuid)
elif issubclass(obj, Enum):
obj = getattr(obj, data)
else:
obj = obj(data)
elif isinstance(obj, LocalizedMethodFault):
obj.fault.msg = obj.localizedMessage
obj = obj.fault
if self.stack:
top = self.stack[-1]
if isinstance(top, list):
top.append(obj)
elif isinstance(top, DataObject):
ns, name = self.SplitTag(tag)
info = top._GetPropertyInfo(name)
if not isinstance(obj, list) and issubclass(info.type, list):
getattr(top, info.name).append(obj)
else:
setattr(top, info.name, obj)
else:
ns, name = self.SplitTag(tag)
setattr(top, name, obj)
else:
if not isinstance(obj, list) and issubclass(self.resultType, list):
self.result.append(obj)
else:
self.result = obj
SetHandlers(self.parser, self.origHandlers)
del self.parser, self.origHandlers, self.stack, self.resultType
## Handle text data
def CharacterDataHandler(self, data):
self.data += data
## SOAP Response Deserializer class
class SoapResponseDeserializer(ExpatDeserializerNSHandlers):
## Constructor
#
# @param self self
# @param stub Stub adapter to use for deserializing moRefs
def __init__(self, stub):
ExpatDeserializerNSHandlers.__init__(self)
self.stub = stub
self.deser = SoapDeserializer(stub)
self.soapFaultTag = XMLNS_SOAPENV + NS_SEP + "Fault"
## Deserialize a SOAP response
#
# @param self self
# @param response the response (a file object or a string)
# @param resultType expected result type
# @param nsMap a dict of prefix -> [xml ns stack]
# @return the deserialized object
def Deserialize(self, response, resultType, nsMap=None):
self.resultType = resultType
self.stack = []
self.msg = ""
self.deser.result = None
self.isFault = False
self.parser = ParserCreate(namespace_separator=NS_SEP)
try: # buffer_text only in python >= 2.3
self.parser.buffer_text = True
except AttributeError:
pass
if not nsMap:
nsMap = {}
self.nsMap = nsMap
SetHandlers(self.parser, GetHandlers(self))
ReadDocument(self.parser, response)
result = self.deser.GetResult()
if self.isFault:
if result is None:
result = GetVmodlType("vmodl.RuntimeFault")()
result.msg = self.msg
del self.resultType, self.stack, self.parser, self.msg, self.data, self.nsMap
return result
## Handle an opening XML tag
def StartElementHandler(self, tag, attr):
self.data = ""
if tag == self.soapFaultTag:
self.isFault = True
elif self.isFault and tag == "detail":
self.deser.Deserialize(self.parser, object, True, self.nsMap)
elif tag.endswith("Response"):
self.deser.Deserialize(self.parser, self.resultType, False, self.nsMap)
## Handle text data
def CharacterDataHandler(self, data):
self.data += data
## Handle a closing XML tag
def EndElementHandler(self, tag):
if self.isFault and tag == "faultstring":
try:
self.msg = str(self.data)
except ValueError:
self.msg = self.data
## Base class that implements common functionality for stub adapters.
## Method that must be provided by the implementation class:
## -- InvokeMethod(ManagedObject mo, Object methodInfo, Object[] args)
class StubAdapterBase(StubAdapterAccessorMixin):
def __init__(self, version):
StubAdapterAccessorMixin.__init__(self)
self.ComputeVersionInfo(version)
## Compute the version information for the specified namespace
#
# @param ns the namespace
def ComputeVersionInfo(self, version):
versionNS = GetVersionNamespace(version)
if versionNS.find("/") >= 0:
self.versionId = '"urn:{0}"'.format(versionNS)
else:
self.versionId = ''
self.version = version
## Base class that implements common functionality for SOAP-based stub adapters.
## Method that must be provided by the implementation class:
## -- InvokeMethod(ManagedObject mo, Object methodInfo, Object[] args)
class SoapStubAdapterBase(StubAdapterBase):
## Serialize a VMOMI request to SOAP
#
# @param version API version
# @param mo the 'this'
# @param info method info
# @param args method arguments
# @return the serialized request
def SerializeRequest(self, mo, info, args):
if not IsChildVersion(self.version, info.version):
raise GetVmodlType("vmodl.fault.MethodNotFound")(receiver=mo,
method=info.name)
nsMap = SOAP_NSMAP.copy()
defaultNS = GetWsdlNamespace(self.version)
nsMap[defaultNS] = ''
# Add xml header and soap envelope
result = [XML_HEADER, '\n', SOAP_ENVELOPE_START]
# Add request context and samlToken to soap header, if exists
reqContexts = GetRequestContext()
samlToken = getattr(self, 'samlToken', None)
if reqContexts or samlToken:
result.append(SOAP_HEADER_START)
for key, val in iteritems(reqContexts):
# Note: Support req context of string type only
if not isinstance(val, basestring):
raise TypeError("Request context key ({0}) has non-string value ({1}) of {2}".format(key, val, type(val)))
ret = Serialize(val,
Object(name=key, type=str, version=self.version),
self.version,
nsMap)
result.append(ret)
if samlToken:
result.append('{0} {1} {2}'.format(WSSE_HEADER_START,
samlToken,
WSSE_HEADER_END))
result.append(SOAP_HEADER_END)
result.append('\n')
# Serialize soap body
result.extend([SOAP_BODY_START,
'<{0} xmlns="{1}">'.format(info.wsdlName, defaultNS),
Serialize(mo, Object(name="_this", type=ManagedObject,
version=self.version),
self.version, nsMap)])
# Serialize soap request parameters
for (param, arg) in zip(info.params, args):
result.append(Serialize(arg, param, self.version, nsMap))
result.extend(['</{0}>'.format(info.wsdlName), SOAP_BODY_END, SOAP_ENVELOPE_END])
return ''.join(result)
## Subclass of HTTPConnection that connects over a Unix domain socket
## instead of a TCP port. The path of the socket is passed in place of
## the hostname. Fairly gross but does the job.
# NOTE (hartsock): rewrite this class as a wrapper, see HTTPSConnectionWrapper
# below for a guide.
class UnixSocketConnection(http_client.HTTPConnection):
# The HTTPConnection ctor expects a single argument, which it interprets
# as the host to connect to; for UnixSocketConnection, we instead interpret
# the parameter as the filesystem path of the Unix domain socket.
def __init__(self, path):
# Pass '' as the host to HTTPConnection; it doesn't really matter
# what we pass (since we've overridden the connect method) as long
# as it's a valid string.
http_client.HTTPConnection.__init__(self, '')
self.path = path
def connect(self):
# Hijack the connect method of HTTPConnection to connect to the
# specified Unix domain socket instead. Obey the same contract
# as HTTPConnection.connect, which puts the socket in self.sock.
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.path)
self.sock = sock
try:
# The ssl module is not available in python versions less than 2.6
SSL_THUMBPRINTS_SUPPORTED = True
import ssl
import hashlib
def _VerifyThumbprint(thumbprint, connection):
'''If there is a thumbprint, connect to the server and verify that the
SSL certificate matches the given thumbprint. An exception is thrown
if there is a mismatch.'''
if thumbprint and isinstance(connection, http_client.HTTPSConnection):
if not connection.sock:
connection.connect()
derCert = connection.sock.getpeercert(True)
sha1 = hashlib.sha1()
sha1.update(derCert)
sha1Digest = sha1.hexdigest().lower()
if sha1Digest != thumbprint:
raise Exception("Server has wrong SHA1 thumbprint: {0} "
"(required) != {1} (server)".format(
thumbprint, sha1Digest))
# Function used to wrap sockets with SSL
_SocketWrapper = ssl.wrap_socket
except ImportError:
SSL_THUMBPRINTS_SUPPORTED = False
def _VerifyThumbprint(thumbprint, connection):
if thumbprint and isinstance(connection, http_client.HTTPSConnection):
raise Exception(
"Thumbprint verification not supported on python < 2.6")
def _SocketWrapper(rawSocket, keyfile, certfile, *args, **kwargs):
wrappedSocket = socket.ssl(rawSocket, keyfile, certfile)
return http_client.FakeSocket(rawSocket, wrappedSocket)
## https connection wrapper
#
# NOTE (hartsock): do not override core library types or implementations
# directly because this makes brittle code that is too easy to break and
# closely tied to implementation details we do not control. Instead, wrap
# the core object to introduce additional behaviors.
#
# Purpose:
# Support ssl.wrap_socket params which are missing from httplib
# HTTPSConnection (e.g. ca_certs)
# Note: Only works iff the ssl params are passing in as kwargs
class HTTPSConnectionWrapper(object):
def __init__(self, *args, **kwargs):
wrapped = http_client.HTTPSConnection(*args, **kwargs)
# Extract ssl.wrap_socket param unknown to httplib.HTTPConnection,
# and push back the params in connect()
self._sslArgs = {}
tmpKwargs = kwargs.copy()
for key in ["server_side", "cert_reqs", "ssl_version", "ca_certs",
"do_handshake_on_connect", "suppress_ragged_eofs",
"ciphers"]:
if key in tmpKwargs:
self._sslArgs[key] = tmpKwargs.pop(key)
self._wrapped = wrapped
## Override connect to allow us to pass in additional ssl paramters to
# ssl.wrap_socket (e.g. cert_reqs, ca_certs for ca cert verification)
def connect(self, wrapped):
if len(self._sslArgs) == 0 or hasattr(self, '_baseclass'):
# No override
return wrapped.connect
# Big hack. We have to copy and paste the httplib connect fn for
# each python version in order to handle extra ssl paramters. Yuk!
if hasattr(self, "source_address"):
# Python 2.7
sock = socket.create_connection((self.host, self.port),
self.timeout, self.source_address)
if wrapped._tunnel_host:
wrapped.sock = sock
wrapped._tunnel()
wrapped.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, **self._sslArgs)
elif hasattr(self, "timeout"):
# Python 2.6
sock = socket.create_connection((self.host, self.port), self.timeout)
wrapped.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, **self._sslArgs)
return wrapped.connect
# TODO: Additional verification of peer cert if needed
#cert_reqs = self._sslArgs.get("cert_reqs", ssl.CERT_NONE)
#ca_certs = self._sslArgs.get("ca_certs", None)
#if cert_reqs != ssl.CERT_NONE and ca_certs:
# if hasattr(self.sock, "getpeercert"):
# # TODO: verify peer cert
# dercert = self.sock.getpeercert(False)
# # pemcert = ssl.DER_cert_to_PEM_cert(dercert)
def __getattr__(self, item):
if item == 'connect':
return self.connect(self._wrapped)
return getattr(self._wrapped, item)
## Stand-in for the HTTPSConnection class that will connect to a proxy and
## issue a CONNECT command to start an SSL tunnel.
class SSLTunnelConnection(object):
# @param proxyPath The path to pass to the CONNECT command.
def __init__(self, proxyPath):
self.proxyPath = proxyPath
# Connects to a proxy server and initiates a tunnel to the destination
# specified by proxyPath. If successful, a new HTTPSConnection is returned.
#
# @param path The destination URL path.
# @param key_file The SSL key file to use when wrapping the socket.
# @param cert_file The SSL certificate file to use when wrapping the socket.
# @param kwargs In case caller passed in extra parameters not handled by
# SSLTunnelConnection
def __call__(self, path, key_file=None, cert_file=None, **kwargs):
# Don't pass any keyword args that HTTPConnection won't understand.
for arg in kwargs.keys():
if arg not in ("port", "strict", "timeout", "source_address"):
del kwargs[arg]
tunnel = http_client.HTTPConnection(path, **kwargs)
tunnel.request('CONNECT', self.proxyPath)
resp = tunnel.getresponse()
if resp.status != 200:
raise http_client.HTTPException("{0} {1}".format(resp.status, resp.reason))
retval = http_client.HTTPSConnection(path)
retval.sock = _SocketWrapper(tunnel.sock,
keyfile=key_file, certfile=cert_file)
return retval
class GzipReader:
GZIP = 1
DEFLATE = 2
def __init__(self, rfile, encoding=GZIP, readChunkSize=512):
self.rfile = rfile
self.chunks = []
self.bufSize = 0 # Remaining buffer
assert(encoding in (GzipReader.GZIP, GzipReader.DEFLATE))
self.encoding = encoding
self.unzip = None
self.readChunkSize = readChunkSize
def _CreateUnzip(self, firstChunk):
import zlib
if self.encoding == GzipReader.GZIP:
wbits = zlib.MAX_WBITS + 16
elif self.encoding == GzipReader.DEFLATE:
# Sniff out real deflate format
chunkLen = len(firstChunk)
# Assume raw deflate
wbits = -zlib.MAX_WBITS
if firstChunk[:3] == ['\x1f', '\x8b', '\x08']:
# gzip: Apache mod_deflate will send gzip. Yurk!
wbits = zlib.MAX_WBITS + 16
elif chunkLen >= 2:
b0 = ord(firstChunk[0])
b1 = ord(firstChunk[1])
if (b0 & 0xf) == 8 and (((b0 * 256 + b1)) % 31) == 0:
# zlib deflate
wbits = min(((b0 & 0xf0) >> 4) + 8, zlib.MAX_WBITS)
else:
assert(False)
self.unzip = zlib.decompressobj(wbits)
return self.unzip
def read(self, bytes=-1):
chunks = self.chunks
bufSize = self.bufSize
while bufSize < bytes or bytes == -1:
# Read and decompress
chunk = self.rfile.read(self.readChunkSize)
if self.unzip == None:
self._CreateUnzip(chunk)
if chunk:
inflatedChunk = self.unzip.decompress(chunk)
bufSize += len(inflatedChunk)
chunks.append(inflatedChunk)
else:
# Returns whatever we have
break
if bufSize <= bytes or bytes == -1:
leftoverBytes = 0
leftoverChunks = []
else:
leftoverBytes = bufSize - bytes
# Adjust last chunk to hold only the left over bytes
lastChunk = chunks.pop()
chunks.append(lastChunk[:-leftoverBytes])
leftoverChunks = [lastChunk[-leftoverBytes:]]
self.chunks = leftoverChunks
self.bufSize = leftoverBytes
buf = "".join(chunks)
return buf
## SOAP stub adapter object
class SoapStubAdapter(SoapStubAdapterBase):
## Constructor
#
# The endpoint can be specified individually as either a host/port
# combination, or with a URL (using a url= keyword).
#
# @param self self
# @param host host
# @param port port (pass negative port number for no SSL)
# @param **** Deprecated. Please use version instead **** ns API namespace
# @param path location of SOAP VMOMI service
# @param url URL (overrides host, port, path if set)
# @param sock unix domain socket path (overrides host, port, url if set)
# @param poolSize size of HTTP connection pool
# @param certKeyFile The path to the PEM-encoded SSL private key file.
# @param certFile The path to the PEM-encoded SSL certificate file.
# @param httpProxyHost The host name of the proxy server.
# @param httpProxyPort The proxy server port.
# @param sslProxyPath Path to use when tunneling through VC's reverse proxy.
# @param thumbprint The SHA1 thumbprint of the server's SSL certificate.
# Some use a thumbprint of the form xx:xx:xx..:xx. We ignore the ":"
# characters. If set to None, any thumbprint is accepted.
# @param cacertsFile CA certificates file in PEM format
# @param version API version
# @param connectionPoolTimeout Timeout in secs for idle connections in client pool. Use -1 to disable any timeout.
# @param samlToken SAML Token that should be used in SOAP security header for login
def __init__(self, host='localhost', port=443, ns=None, path='/sdk',
url=None, sock=None, poolSize=5,
certFile=None, certKeyFile=None,
httpProxyHost=None, httpProxyPort=80, sslProxyPath=None,
thumbprint=None, cacertsFile=None, version=None,
acceptCompressedResponses=True,
connectionPoolTimeout=CONNECTION_POOL_IDLE_TIMEOUT_SEC,
samlToken=None):
if ns:
assert(version is None)
version = versionMap[ns]
elif not version:
version = 'vim.version.version1'
SoapStubAdapterBase.__init__(self, version=version)
self.cookie = ""
if sock:
self.scheme = UnixSocketConnection
# Store sock in the host member variable because that's where
# the UnixSocketConnection ctor expects to find it -- see above
self.host = sock
elif url:
scheme, self.host, urlpath = urlparse.urlparse(url)[:3]
# Only use the URL path if it's sensible, otherwise use the path
# keyword argument as passed in.
if urlpath not in ('', '/'):
path = urlpath
self.scheme = scheme == "http" and http_client.HTTPConnection \
or scheme == "https" and HTTPSConnectionWrapper
else:
port, self.scheme = port < 0 and (-port, http_client.HTTPConnection) \
or (port, HTTPSConnectionWrapper)
if host.find(':') != -1: # is IPv6?
host = '[' + host + ']'
self.host = '{0}:{1}'.format(host, port)
self.path = path
if thumbprint:
self.thumbprint = thumbprint.replace(":", "").lower()
if len(self.thumbprint) != 40:
raise Exception("Invalid SHA1 thumbprint -- {0}".format(thumbprint))
else:
self.thumbprint = None
if sslProxyPath:
self.scheme = SSLTunnelConnection(sslProxyPath)
elif httpProxyHost:
if self.scheme == HTTPSConnectionWrapper:
self.scheme = SSLTunnelConnection(self.host)
else:
if url:
self.path = url
else:
self.path = "http://{0}/{1}".format(self.host, path)
# Swap the actual host with the proxy.
self.host = "{0}:{1}".format(httpProxyHost, httpProxyPort)
self.poolSize = poolSize
self.pool = []
self.connectionPoolTimeout = connectionPoolTimeout
self.lock = threading.Lock()
self.schemeArgs = {}
if certKeyFile:
self.schemeArgs['key_file'] = certKeyFile
if certFile:
self.schemeArgs['cert_file'] = certFile
if cacertsFile:
self.schemeArgs['ca_certs'] = cacertsFile
self.schemeArgs['cert_reqs'] = ssl.CERT_REQUIRED
self.samlToken = samlToken
self.requestModifierList = []
self._acceptCompressedResponses = acceptCompressedResponses
# Context modifier used to modify the SOAP request.
# @param func The func that takes in the serialized message and modifies the
# the request. The func is appended to the requestModifierList and then
# popped after the request is modified.
@contextlib.contextmanager
def requestModifier(self, func):
self.requestModifierList.append(func)
try:
yield
finally:
self.requestModifierList.pop()
## Invoke a managed method
#
# @param self self
# @param mo the 'this'
# @param info method info
# @param args arguments
# @param outerStub If not-None, this should be a reference to the wrapping
# stub adapter. Any ManagedObject references returned from this method
# will have outerStub in their _stub field. Note that this also changes
# the return type to a tuple containing the HTTP status and the
# deserialized object so that it's easier to distinguish an API error from
# a connection error.
def InvokeMethod(self, mo, info, args, outerStub=None):
if outerStub is None:
outerStub = self
headers = {'Cookie' : self.cookie,
'SOAPAction' : self.versionId,
'Content-Type': 'text/xml; charset={0}'.format(XML_ENCODING)}
if self._acceptCompressedResponses:
headers['Accept-Encoding'] = 'gzip, deflate'
req = self.SerializeRequest(mo, info, args)
for modifier in self.requestModifierList:
req = modifier(req)
conn = self.GetConnection()
try:
conn.request('POST', self.path, req, headers)
resp = conn.getresponse()
except (socket.error, http_client.HTTPException):
# The server is probably sick, drop all of the cached connections.
self.DropConnections()
raise
# NOTE (hartsocks): this cookie handling code should go away in a future
# release. The string 'set-cookie' and 'Set-Cookie' but both are
# acceptable, but the supporting library may have a bug making it
# case sensitive when it shouldn't be. The term 'set-cookie' will occur
# more frequently than 'Set-Cookie' based on practical testing.
cookie = resp.getheader('set-cookie')
if cookie is None:
# try case-sensitive header for compatibility
cookie = resp.getheader('Set-Cookie')
status = resp.status
if cookie:
self.cookie = cookie
if status == 200 or status == 500:
try:
fd = resp
encoding = resp.getheader('Content-Encoding', 'identity').lower()
if encoding == 'gzip':
fd = GzipReader(resp, encoding=GzipReader.GZIP)
elif encoding == 'deflate':
fd = GzipReader(resp, encoding=GzipReader.DEFLATE)
deserializer = SoapResponseDeserializer(outerStub)
obj = deserializer.Deserialize(fd, info.result)
except Exception as exc:
conn.close()
# NOTE (hartsock): This feels out of place. As a rule the lexical
# context that opens a connection should also close it. However,
# in this code the connection is passed around and closed in other
# contexts (ie: methods) that we are blind to here. Refactor this.
# The server might be sick, drop all of the cached connections.
self.DropConnections()
raise exc
else:
resp.read()
self.ReturnConnection(conn)
if outerStub != self:
return (status, obj)
if status == 200:
return obj
else:
raise obj # pylint: disable-msg=E0702
else:
conn.close()
raise http_client.HTTPException("{0} {1}".format(resp.status, resp.reason))
## Clean up connection pool to throw away idle timed-out connections
# SoapStubAdapter lock must be acquired before this method is called.
def _CloseIdleConnections(self):
if self.connectionPoolTimeout >= 0:
currentTime = time.time()
idleConnections = []
for conn, lastAccessTime in self.pool:
idleTime = currentTime - lastAccessTime
if idleTime >= self.connectionPoolTimeout:
i = self.pool.index((conn, lastAccessTime))
idleConnections = self.pool[i:]
self.pool = self.pool[:i]
break
for conn, _ in idleConnections:
conn.close()
## Get a HTTP connection from the pool
def GetConnection(self):
self.lock.acquire()
self._CloseIdleConnections()
if self.pool:
result, _ = self.pool.pop(0)
self.lock.release()
else:
self.lock.release()
result = self.scheme(self.host, **self.schemeArgs)
# Always disable NAGLE algorithm
#
# Python httplib (2.6 and below) is splitting a http request into 2
# packets (header and body). It first send the header, but will not
# send the body until it receives the ack (for header) from server
# [NAGLE at work]. The delayed ack time on ESX is around 40 - 100 ms
# (depends on version) and can go up to 200 ms. This effectively slow
# down each pyVmomi call by the same amount of time.
#
# Disable NAGLE on client will force both header and body packets to
# get out immediately, and eliminated the delay
#
# This bug is fixed in python 2.7, however, only if the request
# body is a string (which is true for now)
if sys.version_info[:2] < (2,7):
self.DisableNagle(result)
_VerifyThumbprint(self.thumbprint, result)
return result
## Drop all cached connections to the server.
def DropConnections(self):
self.lock.acquire()
oldConnections = self.pool
self.pool = []
self.lock.release()
for conn, _ in oldConnections:
conn.close()
## Return a HTTP connection to the pool
def ReturnConnection(self, conn):
self.lock.acquire()
self._CloseIdleConnections()
if len(self.pool) < self.poolSize:
self.pool.insert(0, (conn, time.time()))
self.lock.release()
else:
self.lock.release()
# NOTE (hartsock): this seems to violate good coding practice in that
# the lexical context that opens a connection should also be the
# same context responsible for closing it.
conn.close()
## Disable nagle on a http connections
def DisableNagle(self, conn):
# Override connections' connect function to force disable NAGLE
if self.scheme != UnixSocketConnection and getattr(conn, "connect"):
orgConnect = conn.connect
def ConnectDisableNagle(*args, **kwargs):
orgConnect(*args, **kwargs)
sock = getattr(conn, "sock")
if sock:
try:
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except Exception:
pass
conn.connect = ConnectDisableNagle
HEADER_SECTION_END = '\r\n\r\n'
## Parse an HTTP response into its headers and body
def ParseHttpResponse(httpResponse):
headerEnd = httpResponse.find(HEADER_SECTION_END)
if headerEnd == -1:
return ('', '')
headerEnd += len(HEADER_SECTION_END);
headerText = httpResponse[:headerEnd]
bodyText = httpResponse[headerEnd:]
return (headerText, bodyText)
## SOAP-over-stdio stub adapter object
class SoapCmdStubAdapter(SoapStubAdapterBase):
## Constructor
#
# @param self self
# @param cmd command to execute
# @param ns API namespace
def __init__(self, cmd, version='vim.version.version1'):
SoapStubAdapterBase.__init__(self, version=version)
self.cmd = cmd
self.systemError = GetVmodlType('vmodl.fault.SystemError')
## Invoke a managed method
#
# @param self self
# @param mo the 'this'
# @param info method info
# @param args arguments
def InvokeMethod(self, mo, info, args):
argv = self.cmd.split()
req = self.SerializeRequest(mo, info, args)
env = dict(os.environ)
env['REQUEST_METHOD'] = 'POST'
env['CONTENT_LENGTH'] = str(len(req))
env['HTTP_SOAPACTION'] = self.versionId[1:-1]
p = subprocess.Popen(argv,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
(outText, errText) = p.communicate(req)
if p.returncode < 0:
# Process died with a signal
errText = "Process terminated with signal {0}\n{1}".format(-p.returncode, errText)
raise self.systemError(msg=errText, reason=errText)
try:
(responseHeaders, responseBody) = ParseHttpResponse(outText)
obj = SoapResponseDeserializer(self).Deserialize(responseBody, info.result)
except:
errText = "Failure parsing SOAP response ({0})\n{1}}".format(outText, errText)
raise self.systemError(msg=errText, reason=errText)
if p.returncode == 0:
return obj
elif obj is None:
raise self.systemError(msg=errText, reason=errText)
else:
raise obj # pylint: disable-msg=E0702
class SessionOrientedStub(StubAdapterBase):
'''A session-oriented stub adapter that will relogin to the destination if a
session-oriented exception is thrown.
Here's an example. First, we setup the communication substrate:
>>> soapStub = SoapStubAdapter(host="192.168.1.2", ns="vim25/5.0")
Create a SessionOrientedStub that uses the stub we just created for talking
to the server:
>>> from pyVim.connect import VimSessionOrientedStub
>>> sessionStub = VimSessionOrientedStub(
... soapStub,
... VimSessionOrientedStub.makeUserLoginMethod("root", "vmware"))
Perform some privileged operations without needing to explicitly login:
>>> si = Vim.ServiceInstance("ServiceInstance", sessionStub)
>>> si.content.sessionManager.sessionList
>>> si.content.sessionManager.Logout()
>>> si.content.sessionManager.sessionList
'''
STATE_UNAUTHENTICATED = 0
STATE_AUTHENTICATED = 1
SESSION_EXCEPTIONS = tuple()
def __init__(self, soapStub, loginMethod, retryDelay=0.1, retryCount=4):
'''Construct a SessionOrientedStub.
The stub starts off in the "unauthenticated" state, so it will call the
loginMethod on the first invocation of a method. If a communication error
is encountered, the stub will wait for retryDelay seconds and then try to
call the method again. If the server throws an exception that is in the
SESSION_EXCEPTIONS tuple, it will be caught and the stub will transition
back into the "unauthenticated" state so that another login will be
performed.
@param soapStub The communication substrate.
@param loginMethod A function that takes a single parameter, soapStub, and
performs the necessary operations to authenticate with the server.
@param retryDelay The amount of time to sleep before retrying after a
communication error.
@param retryCount The number of times to retry connecting to the server.
'''
assert callable(loginMethod)
assert retryCount >= 0
StubAdapterBase.__init__(self, version=soapStub.version)
self.lock = threading.Lock()
self.soapStub = soapStub
self.state = self.STATE_UNAUTHENTICATED
self.loginMethod = loginMethod
self.retryDelay = retryDelay
self.retryCount = retryCount
def InvokeMethod(self, mo, info, args):
# This retry logic is replicated in InvokeAccessor and the two copies need
# to be in sync
retriesLeft = self.retryCount
while retriesLeft > 0:
try:
if self.state == self.STATE_UNAUTHENTICATED:
self._CallLoginMethod()
# Invoke the method
status, obj = self.soapStub.InvokeMethod(mo, info, args, self)
except (socket.error, http_client.HTTPException, ExpatError):
if self.retryDelay and retriesLeft:
time.sleep(self.retryDelay)
retriesLeft -= 1
continue
if status == 200:
# Normal return from the server, return the object to the caller.
return obj
# An exceptional return from the server
if isinstance(obj, self.SESSION_EXCEPTIONS):
# Our session might've timed out, change our state and retry.
self._SetStateUnauthenticated()
else:
# It's an exception from the method that was called, send it up.
raise obj
# Raise any socket/httplib errors caught above.
raise SystemError()
## Retrieve a managed property
#
# @param self self
# @param mo managed object
# @param info property info
def InvokeAccessor(self, mo, info):
# This retry logic is replicated in InvokeMethod and the two copies need
# to be in sync
retriesLeft = self.retryCount
while retriesLeft > 0:
try:
if self.state == self.STATE_UNAUTHENTICATED:
self._CallLoginMethod()
# Invoke the method
obj = StubAdapterBase.InvokeAccessor(self, mo, info)
except (socket.error, http_client.HTTPException, ExpatError):
if self.retryDelay and retriesLeft:
time.sleep(self.retryDelay)
retriesLeft -= 1
continue
except Exception as e:
if isinstance(e, self.SESSION_EXCEPTIONS):
# Our session might've timed out, change our state and retry.
self._SetStateUnauthenticated()
else:
raise e
return obj
# Raise any socket/httplib errors caught above.
raise SystemError()
## Handle the login method call
#
# This method calls the login method on the soap stub and changes the state
# to authenticated
def _CallLoginMethod(self):
try:
self.lock.acquire()
if self.state == self.STATE_UNAUTHENTICATED:
self.loginMethod(self.soapStub)
self.state = self.STATE_AUTHENTICATED
finally:
self.lock.release()
## Change the state to unauthenticated
def _SetStateUnauthenticated(self):
self.lock.acquire()
if self.state == self.STATE_AUTHENTICATED:
self.state = self.STATE_UNAUTHENTICATED
self.lock.release()
| apache-2.0 |
airbnb/caravel | superset/sql_parse.py | 1 | 7283 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
import logging
import sqlparse
from sqlparse.sql import Identifier, IdentifierList
from sqlparse.tokens import Keyword, Name
RESULT_OPERATIONS = {'UNION', 'INTERSECT', 'EXCEPT', 'SELECT'}
ON_KEYWORD = 'ON'
PRECEDES_TABLE_NAME = {
'FROM', 'JOIN', 'DESCRIBE', 'WITH', 'LEFT JOIN', 'RIGHT JOIN',
}
CTE_PREFIX = 'CTE__'
class ParsedQuery(object):
def __init__(self, sql_statement):
self.sql = sql_statement
self._table_names = set()
self._alias_names = set()
self._limit = None
logging.info('Parsing with sqlparse statement {}'.format(self.sql))
self._parsed = sqlparse.parse(self.stripped())
for statement in self._parsed:
self.__extract_from_token(statement)
self._limit = self._extract_limit_from_query(statement)
self._table_names = self._table_names - self._alias_names
@property
def tables(self):
return self._table_names
@property
def limit(self):
return self._limit
def is_select(self):
return self._parsed[0].get_type() == 'SELECT'
def is_explain(self):
return self.stripped().upper().startswith('EXPLAIN')
def is_readonly(self):
"""Pessimistic readonly, 100% sure statement won't mutate anything"""
return self.is_select() or self.is_explain()
def stripped(self):
return self.sql.strip(' \t\n;')
def get_statements(self):
"""Returns a list of SQL statements as strings, stripped"""
statements = []
for statement in self._parsed:
if statement:
sql = str(statement).strip(' \n;\t')
if sql:
statements.append(sql)
return statements
@staticmethod
def __get_full_name(identifier):
if len(identifier.tokens) > 1 and identifier.tokens[1].value == '.':
return '{}.{}'.format(identifier.tokens[0].value,
identifier.tokens[2].value)
return identifier.get_real_name()
@staticmethod
def __is_identifier(token):
return isinstance(token, (IdentifierList, Identifier))
def __process_identifier(self, identifier):
# exclude subselects
if '(' not in str(identifier):
table_name = self.__get_full_name(identifier)
if not table_name.startswith(CTE_PREFIX):
self._table_names.add(self.__get_full_name(identifier))
return
# store aliases
if hasattr(identifier, 'get_alias'):
self._alias_names.add(identifier.get_alias())
if hasattr(identifier, 'tokens'):
# some aliases are not parsed properly
if identifier.tokens[0].ttype == Name:
self._alias_names.add(identifier.tokens[0].value)
self.__extract_from_token(identifier)
def as_create_table(self, table_name, overwrite=False):
"""Reformats the query into the create table as query.
Works only for the single select SQL statements, in all other cases
the sql query is not modified.
:param superset_query: string, sql query that will be executed
:param table_name: string, will contain the results of the
query execution
:param overwrite, boolean, table table_name will be dropped if true
:return: string, create table as query
"""
exec_sql = ''
sql = self.stripped()
if overwrite:
exec_sql = f'DROP TABLE IF EXISTS {table_name};\n'
exec_sql += f'CREATE TABLE {table_name} AS \n{sql}'
return exec_sql
def __extract_from_token(self, token, depth=0):
if not hasattr(token, 'tokens'):
return
table_name_preceding_token = False
for item in token.tokens:
logging.debug((' ' * depth) + str(item.ttype) + str(item.value))
if item.is_group and not self.__is_identifier(item):
self.__extract_from_token(item, depth=depth + 1)
if (
item.ttype in Keyword and (
item.normalized in PRECEDES_TABLE_NAME or
item.normalized.endswith(' JOIN')
)):
table_name_preceding_token = True
continue
if item.ttype in Keyword:
table_name_preceding_token = False
continue
if table_name_preceding_token:
if isinstance(item, Identifier):
self.__process_identifier(item)
elif isinstance(item, IdentifierList):
for token in item.get_identifiers():
self.__process_identifier(token)
elif isinstance(item, IdentifierList):
for token in item.tokens:
if not self.__is_identifier(token):
self.__extract_from_token(item, depth=depth + 1)
def _get_limit_from_token(self, token):
if token.ttype == sqlparse.tokens.Literal.Number.Integer:
return int(token.value)
elif token.is_group:
return int(token.get_token_at_offset(1).value)
def _extract_limit_from_query(self, statement):
limit_token = None
for pos, item in enumerate(statement.tokens):
if item.ttype in Keyword and item.value.lower() == 'limit':
limit_token = statement.tokens[pos + 2]
return self._get_limit_from_token(limit_token)
def get_query_with_new_limit(self, new_limit):
"""returns the query with the specified limit"""
"""does not change the underlying query"""
if not self._limit:
return self.sql + ' LIMIT ' + str(new_limit)
limit_pos = None
tokens = self._parsed[0].tokens
# Add all items to before_str until there is a limit
for pos, item in enumerate(tokens):
if item.ttype in Keyword and item.value.lower() == 'limit':
limit_pos = pos
break
limit = tokens[limit_pos + 2]
if limit.ttype == sqlparse.tokens.Literal.Number.Integer:
tokens[limit_pos + 2].value = new_limit
elif limit.is_group:
tokens[limit_pos + 2].value = (
'{}, {}'.format(next(limit.get_identifiers()), new_limit)
)
str_res = ''
for i in tokens:
str_res += str(i.value)
return str_res
| apache-2.0 |
gnowxilef/youtube-dl | youtube_dl/extractor/msn.py | 17 | 4643 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
determine_ext,
ExtractorError,
int_or_none,
unescapeHTML,
)
class MSNIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?msn\.com/(?:[^/]+/)+(?P<display_id>[^/]+)/[a-z]{2}-(?P<id>[\da-zA-Z]+)'
_TESTS = [{
'url': 'http://www.msn.com/en-ae/foodanddrink/joinourtable/criminal-minds-shemar-moore-shares-a-touching-goodbye-message/vp-BBqQYNE',
'md5': '8442f66c116cbab1ff7098f986983458',
'info_dict': {
'id': 'BBqQYNE',
'display_id': 'criminal-minds-shemar-moore-shares-a-touching-goodbye-message',
'ext': 'mp4',
'title': 'Criminal Minds - Shemar Moore Shares A Touching Goodbye Message',
'description': 'md5:e8e89b897b222eb33a6b5067a8f1bc25',
'duration': 104,
'uploader': 'CBS Entertainment',
'uploader_id': 'IT0X5aoJ6bJgYerJXSDCgFmYPB1__54v',
},
}, {
'url': 'http://www.msn.com/en-ae/news/offbeat/meet-the-nine-year-old-self-made-millionaire/ar-BBt6ZKf',
'only_matching': True,
}, {
'url': 'http://www.msn.com/en-ae/video/watch/obama-a-lot-of-people-will-be-disappointed/vi-AAhxUMH',
'only_matching': True,
}, {
# geo restricted
'url': 'http://www.msn.com/en-ae/foodanddrink/joinourtable/the-first-fart-makes-you-laugh-the-last-fart-makes-you-cry/vp-AAhzIBU',
'only_matching': True,
}, {
'url': 'http://www.msn.com/en-ae/entertainment/bollywood/watch-how-salman-khan-reacted-when-asked-if-he-would-apologize-for-his-‘raped-woman’-comment/vi-AAhvzW6',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id, display_id = mobj.group('id', 'display_id')
webpage = self._download_webpage(url, display_id)
video = self._parse_json(
self._search_regex(
r'data-metadata\s*=\s*(["\'])(?P<data>.+?)\1',
webpage, 'video data', default='{}', group='data'),
display_id, transform_source=unescapeHTML)
if not video:
error = unescapeHTML(self._search_regex(
r'data-error=(["\'])(?P<error>.+?)\1',
webpage, 'error', group='error'))
raise ExtractorError('%s said: %s' % (self.IE_NAME, error), expected=True)
title = video['title']
formats = []
for file_ in video.get('videoFiles', []):
format_url = file_.get('url')
if not format_url:
continue
ext = determine_ext(format_url)
if ext == 'ism':
formats.extend(self._extract_ism_formats(
format_url + '/Manifest', display_id, 'mss', fatal=False))
if 'm3u8' in format_url:
# m3u8_native should not be used here until
# https://github.com/rg3/youtube-dl/issues/9913 is fixed
m3u8_formats = self._extract_m3u8_formats(
format_url, display_id, 'mp4',
m3u8_id='hls', fatal=False)
formats.extend(m3u8_formats)
else:
formats.append({
'url': format_url,
'ext': 'mp4',
'format_id': 'http',
'width': int_or_none(file_.get('width')),
'height': int_or_none(file_.get('height')),
})
self._sort_formats(formats)
subtitles = {}
for file_ in video.get('files', []):
format_url = file_.get('url')
format_code = file_.get('formatCode')
if not format_url or not format_code:
continue
if compat_str(format_code) == '3100':
subtitles.setdefault(file_.get('culture', 'en'), []).append({
'ext': determine_ext(format_url, 'ttml'),
'url': format_url,
})
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': video.get('description'),
'thumbnail': video.get('headlineImage', {}).get('url'),
'duration': int_or_none(video.get('durationSecs')),
'uploader': video.get('sourceFriendly'),
'uploader_id': video.get('providerId'),
'creator': video.get('creator'),
'subtitles': subtitles,
'formats': formats,
}
| unlicense |
rockneurotiko/django | tests/fixtures/tests.py | 113 | 35712 | from __future__ import unicode_literals
import os
import sys
import tempfile
import unittest
import warnings
from django.apps import apps
from django.contrib.sites.models import Site
from django.core import management
from django.core.serializers.base import ProgressBar
from django.db import IntegrityError, connection
from django.test import (
TestCase, TransactionTestCase, ignore_warnings, skipUnlessDBFeature,
)
from django.utils import six
from django.utils.encoding import force_text
from .models import Article, Spy, Tag, Visa
class TestCaseFixtureLoadingTests(TestCase):
fixtures = ['fixture1.json', 'fixture2.json']
def testClassFixtures(self):
"Check that test case has installed 3 fixture objects"
self.assertEqual(Article.objects.count(), 3)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker has no place on ESPN>',
])
class SubclassTestCaseFixtureLoadingTests(TestCaseFixtureLoadingTests):
"""
Make sure that subclasses can remove fixtures from parent class (#21089).
"""
fixtures = []
def testClassFixtures(self):
"Check that there were no fixture objects installed"
self.assertEqual(Article.objects.count(), 0)
class DumpDataAssertMixin(object):
def _dumpdata_assert(self, args, output, format='json', filename=None,
natural_foreign_keys=False, natural_primary_keys=False,
use_base_manager=False, exclude_list=[], primary_keys=''):
new_io = six.StringIO()
if filename:
filename = os.path.join(tempfile.gettempdir(), filename)
management.call_command('dumpdata', *args, **{'format': format,
'stdout': new_io,
'stderr': new_io,
'output': filename,
'use_natural_foreign_keys': natural_foreign_keys,
'use_natural_primary_keys': natural_primary_keys,
'use_base_manager': use_base_manager,
'exclude': exclude_list,
'primary_keys': primary_keys})
if filename:
with open(filename, "r") as f:
command_output = f.read()
os.remove(filename)
else:
command_output = new_io.getvalue().strip()
if format == "json":
self.assertJSONEqual(command_output, output)
elif format == "xml":
self.assertXMLEqual(command_output, output)
else:
self.assertEqual(command_output, output)
class FixtureLoadingTests(DumpDataAssertMixin, TestCase):
def test_loading_and_dumping(self):
apps.clear_cache()
Site.objects.all().delete()
# Load fixture 1. Single JSON file, with two objects.
management.call_command('loaddata', 'fixture1.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(['fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]')
# Try just dumping the contents of fixtures.Category
self._dumpdata_assert(['fixtures.Category'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}]')
# ...and just fixtures.Article
self._dumpdata_assert(['fixtures.Article'], '[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]')
# ...and both
self._dumpdata_assert(['fixtures.Category', 'fixtures.Article'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]')
# Specify a specific model twice
self._dumpdata_assert(['fixtures.Article', 'fixtures.Article'], '[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]')
# Specify a dump that specifies Article both explicitly and implicitly
self._dumpdata_assert(['fixtures.Article', 'fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]')
# Specify a dump that specifies Article both explicitly and implicitly,
# but lists the app first (#22025).
self._dumpdata_assert(['fixtures', 'fixtures.Article'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]')
# Same again, but specify in the reverse order
self._dumpdata_assert(['fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]')
# Specify one model from one application, and an entire other application.
self._dumpdata_assert(['fixtures.Category', 'sites'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}]')
# Load fixture 2. JSON file imported by default. Overwrites some existing objects
management.call_command('loaddata', 'fixture2.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker has no place on ESPN>',
])
# Load fixture 3, XML format.
management.call_command('loaddata', 'fixture3.xml', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: XML identified as leading cause of cancer>',
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker on TV is great!>',
])
# Load fixture 6, JSON file with dynamic ContentType fields. Testing ManyToOne.
management.call_command('loaddata', 'fixture6.json', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Copyright is fine the way it is> tagged "copyright">',
'<Tag: <Article: Copyright is fine the way it is> tagged "law">',
], ordered=False)
# Load fixture 7, XML file with dynamic ContentType fields. Testing ManyToOne.
management.call_command('loaddata', 'fixture7.xml', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Copyright is fine the way it is> tagged "copyright">',
'<Tag: <Article: Copyright is fine the way it is> tagged "legal">',
'<Tag: <Article: Django conquers world!> tagged "django">',
'<Tag: <Article: Django conquers world!> tagged "world domination">',
], ordered=False)
# Load fixture 8, JSON file with dynamic Permission fields. Testing ManyToMany.
management.call_command('loaddata', 'fixture8.json', verbosity=0)
self.assertQuerysetEqual(Visa.objects.all(), [
'<Visa: Django Reinhardt Can add user, Can change user, Can delete user>',
'<Visa: Stephane Grappelli Can add user>',
'<Visa: Prince >'
], ordered=False)
# Load fixture 9, XML file with dynamic Permission fields. Testing ManyToMany.
management.call_command('loaddata', 'fixture9.xml', verbosity=0)
self.assertQuerysetEqual(Visa.objects.all(), [
'<Visa: Django Reinhardt Can add user, Can change user, Can delete user>',
'<Visa: Stephane Grappelli Can add user, Can delete user>',
'<Visa: Artist formerly known as "Prince" Can change user>'
], ordered=False)
# object list is unaffected
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: XML identified as leading cause of cancer>',
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker on TV is great!>',
])
# By default, you get raw keys on dumpdata
self._dumpdata_assert(['fixtures.book'], '[{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [3, 1]}}]')
# But you can get natural keys if you ask for them and they are available
self._dumpdata_assert(['fixtures.book'], '[{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [["Artist formerly known as \\"Prince\\""], ["Django Reinhardt"]]}}]', natural_foreign_keys=True)
# You can also omit the primary keys for models that we can get later with natural keys.
self._dumpdata_assert(['fixtures.person'], '[{"fields": {"name": "Django Reinhardt"}, "model": "fixtures.person"}, {"fields": {"name": "Stephane Grappelli"}, "model": "fixtures.person"}, {"fields": {"name": "Artist formerly known as \\"Prince\\""}, "model": "fixtures.person"}]', natural_primary_keys=True)
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(['fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker on TV is great!", "pub_date": "2006-06-16T11:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}, {"pk": 4, "model": "fixtures.article", "fields": {"headline": "Django conquers world!", "pub_date": "2006-06-16T15:00:00"}}, {"pk": 5, "model": "fixtures.article", "fields": {"headline": "XML identified as leading cause of cancer", "pub_date": "2006-06-16T16:00:00"}}, {"pk": 1, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "copyright", "tagged_id": 3}}, {"pk": 2, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "legal", "tagged_id": 3}}, {"pk": 3, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "django", "tagged_id": 4}}, {"pk": 4, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "world domination", "tagged_id": 4}}, {"pk": 1, "model": "fixtures.person", "fields": {"name": "Django Reinhardt"}}, {"pk": 2, "model": "fixtures.person", "fields": {"name": "Stephane Grappelli"}}, {"pk": 3, "model": "fixtures.person", "fields": {"name": "Artist formerly known as \\"Prince\\""}}, {"pk": 1, "model": "fixtures.visa", "fields": {"person": ["Django Reinhardt"], "permissions": [["add_user", "auth", "user"], ["change_user", "auth", "user"], ["delete_user", "auth", "user"]]}}, {"pk": 2, "model": "fixtures.visa", "fields": {"person": ["Stephane Grappelli"], "permissions": [["add_user", "auth", "user"], ["delete_user", "auth", "user"]]}}, {"pk": 3, "model": "fixtures.visa", "fields": {"person": ["Artist formerly known as \\"Prince\\""], "permissions": [["change_user", "auth", "user"]]}}, {"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [["Artist formerly known as \\"Prince\\""], ["Django Reinhardt"]]}}]', natural_foreign_keys=True)
# Dump the current contents of the database as an XML fixture
self._dumpdata_assert(['fixtures'], """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0"><object pk="1" model="fixtures.category"><field type="CharField" name="title">News Stories</field><field type="TextField" name="description">Latest news stories</field></object><object pk="2" model="fixtures.article"><field type="CharField" name="headline">Poker on TV is great!</field><field type="DateTimeField" name="pub_date">2006-06-16T11:00:00</field></object><object pk="3" model="fixtures.article"><field type="CharField" name="headline">Copyright is fine the way it is</field><field type="DateTimeField" name="pub_date">2006-06-16T14:00:00</field></object><object pk="4" model="fixtures.article"><field type="CharField" name="headline">Django conquers world!</field><field type="DateTimeField" name="pub_date">2006-06-16T15:00:00</field></object><object pk="5" model="fixtures.article"><field type="CharField" name="headline">XML identified as leading cause of cancer</field><field type="DateTimeField" name="pub_date">2006-06-16T16:00:00</field></object><object pk="1" model="fixtures.tag"><field type="CharField" name="name">copyright</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field></object><object pk="2" model="fixtures.tag"><field type="CharField" name="name">legal</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field></object><object pk="3" model="fixtures.tag"><field type="CharField" name="name">django</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">4</field></object><object pk="4" model="fixtures.tag"><field type="CharField" name="name">world domination</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">4</field></object><object pk="1" model="fixtures.person"><field type="CharField" name="name">Django Reinhardt</field></object><object pk="2" model="fixtures.person"><field type="CharField" name="name">Stephane Grappelli</field></object><object pk="3" model="fixtures.person"><field type="CharField" name="name">Artist formerly known as "Prince"</field></object><object pk="1" model="fixtures.visa"><field to="fixtures.person" name="person" rel="ManyToOneRel"><natural>Django Reinhardt</natural></field><field to="auth.permission" name="permissions" rel="ManyToManyRel"><object><natural>add_user</natural><natural>auth</natural><natural>user</natural></object><object><natural>change_user</natural><natural>auth</natural><natural>user</natural></object><object><natural>delete_user</natural><natural>auth</natural><natural>user</natural></object></field></object><object pk="2" model="fixtures.visa"><field to="fixtures.person" name="person" rel="ManyToOneRel"><natural>Stephane Grappelli</natural></field><field to="auth.permission" name="permissions" rel="ManyToManyRel"><object><natural>add_user</natural><natural>auth</natural><natural>user</natural></object><object><natural>delete_user</natural><natural>auth</natural><natural>user</natural></object></field></object><object pk="3" model="fixtures.visa"><field to="fixtures.person" name="person" rel="ManyToOneRel"><natural>Artist formerly known as "Prince"</natural></field><field to="auth.permission" name="permissions" rel="ManyToManyRel"><object><natural>change_user</natural><natural>auth</natural><natural>user</natural></object></field></object><object pk="1" model="fixtures.book"><field type="CharField" name="name">Music for all ages</field><field to="fixtures.person" name="authors" rel="ManyToManyRel"><object><natural>Artist formerly known as "Prince"</natural></object><object><natural>Django Reinhardt</natural></object></field></object></django-objects>""", format='xml', natural_foreign_keys=True)
def test_dumpdata_with_excludes(self):
# Load fixture1 which has a site, two articles, and a category
Site.objects.all().delete()
management.call_command('loaddata', 'fixture1.json', verbosity=0)
# Excluding fixtures app should only leave sites
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}]',
exclude_list=['fixtures'])
# Excluding fixtures.Article/Book should leave fixtures.Category
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}, {"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book'])
# Excluding fixtures and fixtures.Article/Book should be a no-op
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}, {"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book'])
# Excluding sites and fixtures.Article/Book should only leave fixtures.Category
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book', 'sites'])
# Excluding a bogus app should throw an error
with six.assertRaisesRegex(self, management.CommandError,
"No installed app with label 'foo_app'."):
self._dumpdata_assert(['fixtures', 'sites'], '', exclude_list=['foo_app'])
# Excluding a bogus model should throw an error
with six.assertRaisesRegex(self, management.CommandError,
"Unknown model in excludes: fixtures.FooModel"):
self._dumpdata_assert(['fixtures', 'sites'], '', exclude_list=['fixtures.FooModel'])
@unittest.skipIf(sys.platform.startswith('win'), "Windows doesn't support '?' in filenames.")
def test_load_fixture_with_special_characters(self):
management.call_command('loaddata', 'fixture_with[special]chars', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), ['<Article: How To Deal With Special Characters>'])
def test_dumpdata_with_filtering_manager(self):
spy1 = Spy.objects.create(name='Paul')
spy2 = Spy.objects.create(name='Alex', cover_blown=True)
self.assertQuerysetEqual(Spy.objects.all(),
['<Spy: Paul>'])
# Use the default manager
self._dumpdata_assert(['fixtures.Spy'], '[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": false}}]' % spy1.pk)
# Dump using Django's base manager. Should return all objects,
# even those normally filtered by the manager
self._dumpdata_assert(['fixtures.Spy'], '[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": true}}, {"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": false}}]' % (spy2.pk, spy1.pk), use_base_manager=True)
def test_dumpdata_with_pks(self):
management.call_command('loaddata', 'fixture1.json', verbosity=0)
management.call_command('loaddata', 'fixture2.json', verbosity=0)
self._dumpdata_assert(
['fixtures.Article'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
self._dumpdata_assert(
['fixtures.Article'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}]',
primary_keys='2'
)
with six.assertRaisesRegex(self, management.CommandError,
"You can only use --pks option with one model"):
self._dumpdata_assert(
['fixtures'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
with six.assertRaisesRegex(self, management.CommandError,
"You can only use --pks option with one model"):
self._dumpdata_assert(
'',
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
with six.assertRaisesRegex(self, management.CommandError,
"You can only use --pks option with one model"):
self._dumpdata_assert(
['fixtures.Article', 'fixtures.category'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
def test_dumpdata_with_file_output(self):
management.call_command('loaddata', 'fixture1.json', verbosity=0)
self._dumpdata_assert(['fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]',
filename='dumpdata.json')
def test_dumpdata_progressbar(self):
"""
Dumpdata shows a progress bar on the command line when --output is set,
stdout is a tty, and verbosity > 0.
"""
management.call_command('loaddata', 'fixture1.json', verbosity=0)
new_io = six.StringIO()
new_io.isatty = lambda: True
_, filename = tempfile.mkstemp()
options = {
'format': 'json',
'stdout': new_io,
'stderr': new_io,
'output': filename,
}
management.call_command('dumpdata', 'fixtures', **options)
self.assertTrue(new_io.getvalue().endswith('[' + '.' * ProgressBar.progress_width + ']\n'))
# Test no progress bar when verbosity = 0
options['verbosity'] = 0
new_io = six.StringIO()
new_io.isatty = lambda: True
management.call_command('dumpdata', 'fixtures', **options)
self.assertEqual(new_io.getvalue(), '')
def test_compress_format_loading(self):
# Load fixture 4 (compressed), using format specification
management.call_command('loaddata', 'fixture4.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django pets kitten>',
])
def test_compressed_specified_loading(self):
# Load fixture 5 (compressed), using format *and* compression specification
management.call_command('loaddata', 'fixture5.json.zip', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: WoW subscribers now outnumber readers>',
])
def test_compressed_loading(self):
# Load fixture 5 (compressed), only compression specification
management.call_command('loaddata', 'fixture5.zip', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: WoW subscribers now outnumber readers>',
])
def test_ambiguous_compressed_fixture(self):
# The name "fixture5" is ambiguous, so loading it will raise an error
with self.assertRaises(management.CommandError) as cm:
management.call_command('loaddata', 'fixture5', verbosity=0)
self.assertIn("Multiple fixtures named 'fixture5'", cm.exception.args[0])
def test_db_loading(self):
# Load db fixtures 1 and 2. These will load using the 'default' database identifier implicitly
management.call_command('loaddata', 'db_fixture_1', verbosity=0)
management.call_command('loaddata', 'db_fixture_2', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
'<Article: Who needs to use compressed data?>',
])
def test_loaddata_error_message(self):
"""
Verifies that loading a fixture which contains an invalid object
outputs an error message which contains the pk of the object
that triggered the error.
"""
# MySQL needs a little prodding to reject invalid data.
# This won't affect other tests because the database connection
# is closed at the end of each test.
if connection.vendor == 'mysql':
connection.cursor().execute("SET sql_mode = 'TRADITIONAL'")
with self.assertRaises(IntegrityError) as cm:
management.call_command('loaddata', 'invalid.json', verbosity=0)
self.assertIn("Could not load fixtures.Article(pk=1):", cm.exception.args[0])
@ignore_warnings(category=UserWarning, message="No fixture named")
def test_loaddata_app_option(self):
"""
Verifies that the --app option works.
"""
management.call_command('loaddata', 'db_fixture_1', verbosity=0, app_label="someotherapp")
self.assertQuerysetEqual(Article.objects.all(), [])
management.call_command('loaddata', 'db_fixture_1', verbosity=0, app_label="fixtures")
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
])
def test_loaddata_verbosity_three(self):
output = six.StringIO()
management.call_command('loaddata', 'fixture1.json', verbosity=3, stdout=output, stderr=output)
command_output = force_text(output.getvalue())
self.assertIn(
"\rProcessed 1 object(s).\rProcessed 2 object(s)."
"\rProcessed 3 object(s).\rProcessed 4 object(s).\n",
command_output
)
def test_loading_using(self):
# Load db fixtures 1 and 2. These will load using the 'default' database identifier explicitly
management.call_command('loaddata', 'db_fixture_1', verbosity=0, using='default')
management.call_command('loaddata', 'db_fixture_2', verbosity=0, using='default')
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
'<Article: Who needs to use compressed data?>',
])
@ignore_warnings(category=UserWarning, message="No fixture named")
def test_unmatched_identifier_loading(self):
# Try to load db fixture 3. This won't load because the database identifier doesn't match
management.call_command('loaddata', 'db_fixture_3', verbosity=0)
management.call_command('loaddata', 'db_fixture_3', verbosity=0, using='default')
self.assertQuerysetEqual(Article.objects.all(), [])
def test_output_formats(self):
# Load back in fixture 1, we need the articles from it
management.call_command('loaddata', 'fixture1', verbosity=0)
# Try to load fixture 6 using format discovery
management.call_command('loaddata', 'fixture6', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Time to reform copyright> tagged "copyright">',
'<Tag: <Article: Time to reform copyright> tagged "law">'
], ordered=False)
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(['fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}, {"pk": 1, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "copyright", "tagged_id": 3}}, {"pk": 2, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "law", "tagged_id": 3}}, {"pk": 1, "model": "fixtures.person", "fields": {"name": "Django Reinhardt"}}, {"pk": 2, "model": "fixtures.person", "fields": {"name": "Stephane Grappelli"}}, {"pk": 3, "model": "fixtures.person", "fields": {"name": "Prince"}}]', natural_foreign_keys=True)
# Dump the current contents of the database as an XML fixture
self._dumpdata_assert(['fixtures'], """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0"><object pk="1" model="fixtures.category"><field type="CharField" name="title">News Stories</field><field type="TextField" name="description">Latest news stories</field></object><object pk="2" model="fixtures.article"><field type="CharField" name="headline">Poker has no place on ESPN</field><field type="DateTimeField" name="pub_date">2006-06-16T12:00:00</field></object><object pk="3" model="fixtures.article"><field type="CharField" name="headline">Time to reform copyright</field><field type="DateTimeField" name="pub_date">2006-06-16T13:00:00</field></object><object pk="1" model="fixtures.tag"><field type="CharField" name="name">copyright</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field></object><object pk="2" model="fixtures.tag"><field type="CharField" name="name">law</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field></object><object pk="1" model="fixtures.person"><field type="CharField" name="name">Django Reinhardt</field></object><object pk="2" model="fixtures.person"><field type="CharField" name="name">Stephane Grappelli</field></object><object pk="3" model="fixtures.person"><field type="CharField" name="name">Prince</field></object></django-objects>""", format='xml', natural_foreign_keys=True)
class NonExistentFixtureTests(TestCase):
"""
Custom class to limit fixture dirs.
"""
available_apps = ['django.contrib.auth', 'django.contrib.contenttypes']
def test_loaddata_not_existent_fixture_file(self):
stdout_output = six.StringIO()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# With verbosity=2, we get both stdout output and a warning
management.call_command(
'loaddata',
'this_fixture_doesnt_exist',
verbosity=2,
stdout=stdout_output,
)
self.assertIn("No fixture 'this_fixture_doesnt_exist' in",
force_text(stdout_output.getvalue()))
self.assertEqual(len(w), 1)
self.assertEqual(force_text(w[0].message),
"No fixture named 'this_fixture_doesnt_exist' found.")
class FixtureTransactionTests(DumpDataAssertMixin, TransactionTestCase):
available_apps = [
'fixtures',
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
]
@skipUnlessDBFeature('supports_forward_references')
def test_format_discovery(self):
# Load fixture 1 again, using format discovery
management.call_command('loaddata', 'fixture1', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Try to load fixture 2 using format discovery; this will fail
# because there are two fixture2's in the fixtures directory
with self.assertRaises(management.CommandError) as cm:
management.call_command('loaddata', 'fixture2', verbosity=0)
self.assertIn("Multiple fixtures named 'fixture2'", cm.exception.args[0])
# object list is unaffected
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(['fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]')
# Load fixture 4 (compressed), using format discovery
management.call_command('loaddata', 'fixture4', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django pets kitten>',
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
| bsd-3-clause |
mehdidc/scikit-learn | sklearn/neighbors/tests/test_kde.py | 17 | 5626 | import numpy as np
from sklearn.utils.testing import (assert_allclose, assert_raises,
assert_equal)
from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors
from sklearn.neighbors.ball_tree import kernel_norm
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_blobs
from sklearn.grid_search import GridSearchCV
from sklearn.preprocessing import StandardScaler
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0]
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kernel_density(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_features)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for bandwidth in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, bandwidth)
def check_results(kernel, bandwidth, atol, rtol):
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth,
atol=atol, rtol=rtol)
log_dens = kde.fit(X).score_samples(Y)
assert_allclose(np.exp(log_dens), dens_true,
atol=atol, rtol=max(1E-7, rtol))
assert_allclose(np.exp(kde.score(Y)),
np.prod(dens_true),
atol=atol, rtol=max(1E-7, rtol))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, bandwidth, atol, rtol)
def test_kernel_density_sampling(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
bandwidth = 0.2
for kernel in ['gaussian', 'tophat']:
# draw a tophat sample
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
samp = kde.sample(100)
assert_equal(X.shape, samp.shape)
# check that samples are in the right range
nbrs = NearestNeighbors(n_neighbors=1).fit(X)
dist, ind = nbrs.kneighbors(X, return_distance=True)
if kernel == 'tophat':
assert np.all(dist < bandwidth)
elif kernel == 'gaussian':
# 5 standard deviations is safe for 100 samples, but there's a
# very small chance this test could fail.
assert np.all(dist < 5 * bandwidth)
# check unsupported kernels
for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']:
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
assert_raises(NotImplementedError, kde.sample, 100)
# non-regression test: used to return a scalar
X = rng.randn(4, 1)
kde = KernelDensity(kernel="gaussian").fit(X)
assert_equal(kde.sample().shape, (1, 1))
def test_kde_algorithm_metric_choice():
"""Smoke test for various metrics and algorithms"""
rng = np.random.RandomState(0)
X = rng.randn(10, 2) # 2 features required for haversine dist.
Y = rng.randn(10, 2)
for algorithm in ['auto', 'ball_tree', 'kd_tree']:
for metric in ['euclidean', 'minkowski', 'manhattan',
'chebyshev', 'haversine']:
if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics:
assert_raises(ValueError, KernelDensity,
algorithm=algorithm, metric=metric)
else:
kde = KernelDensity(algorithm=algorithm, metric=metric)
kde.fit(X)
y_dens = kde.score_samples(Y)
assert_equal(y_dens.shape, Y.shape[:1])
def test_kde_score(n_samples=100, n_features=3):
pass
#FIXME
#np.random.seed(0)
#X = np.random.random((n_samples, n_features))
#Y = np.random.random((n_samples, n_features))
def test_kde_badargs():
assert_raises(ValueError, KernelDensity,
algorithm='blah')
assert_raises(ValueError, KernelDensity,
bandwidth=0)
assert_raises(ValueError, KernelDensity,
kernel='blah')
assert_raises(ValueError, KernelDensity,
metric='blah')
assert_raises(ValueError, KernelDensity,
algorithm='kd_tree', metric='blah')
def test_kde_pipeline_gridsearch():
# test that kde plays nice in pipelines and grid-searches
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
pipe1 = make_pipeline(StandardScaler(with_mean=False, with_std=False),
KernelDensity(kernel="gaussian"))
params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10])
search = GridSearchCV(pipe1, param_grid=params, cv=5)
search.fit(X)
assert_equal(search.best_params_['kerneldensity__bandwidth'], .1)
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
paulcalabro/zato | code/zato-cli/src/zato/cli/__init__.py | 6 | 26469 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2010 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
import json, shutil
from cStringIO import StringIO
from getpass import getpass, getuser
from socket import gethostname
# stdlib
import logging, os, sys, tempfile, time
from datetime import datetime
# Importing
from peak.util.imports import importString
# SQLAlchemy
import sqlalchemy
# Zato
from zato import common
from zato.cli import util as cli_util
from zato.common import odb, util, ZATO_INFO_FILE
from zato.common.util import get_engine_url, get_full_stack, get_session
################################################################################
_opts_odb_type = 'Operational database type, must be one of {}'.format(odb.SUPPORTED_DB_TYPES) # noqa
_opts_odb_host = 'Operational database host'
_opts_odb_port = 'Operational database port'
_opts_odb_user = 'Operational database user'
_opts_odb_schema = 'Operational database schema'
_opts_odb_db_name = 'Operational database name'
_opts_broker_host = 'Broker host'
_opts_broker_port = 'Broker port'
_opts_kvdb_host = 'Key/value DB host'
_opts_kvdb_port = 'Key/value DB port'
ca_defaults = {
'organization': 'My Company',
'organizational_unit': 'My Unit', # When it's an optional argument
'organizational-unit': 'My Unit', # When it's a required one
'locality': 'My Town',
'state_or_province': 'My State',
'country': 'US'
}
default_ca_name = 'Sample CA'
default_common_name = 'localhost'
common_odb_opts = [
{'name':'odb_type', 'help':_opts_odb_type, 'choices':odb.SUPPORTED_DB_TYPES}, # noqa
{'name':'--odb_host', 'help':_opts_odb_host},
{'name':'--odb_port', 'help':_opts_odb_port},
{'name':'--odb_user', 'help':_opts_odb_user},
{'name':'--odb_db_name', 'help':_opts_odb_db_name},
{'name':'--postgresql_schema', 'help':_opts_odb_schema + ' (PostgreSQL only)'},
{'name':'--odb_password', 'help':'ODB database password'},
]
common_ca_create_opts = [
{'name':'--organization', 'help':'Organization name (defaults to {organization})'.format(**ca_defaults)},
{'name':'--locality', 'help':'Locality name (defaults to {locality})'.format(**ca_defaults)},
{'name':'--state-or-province', 'help':'State or province name (defaults to {state_or_province})'.format(**ca_defaults)},
{'name':'--country', 'help':'Country (defaults to {country})'.format(**ca_defaults)},
{'name':'--common-name', 'help':'Common name (defaults to {default})'.format(default=default_common_name)},
]
kvdb_opts = [
{'name':'kvdb_host', 'help':_opts_kvdb_host},
{'name':'kvdb_port', 'help':_opts_kvdb_port},
{'name':'--kvdb_password', 'help':'Key/value database password'},
]
def get_tech_account_opts(help_suffix='to use for connecting to clusters'):
return [
{'name':'tech_account_name', 'help':'Technical account name {}'.format(help_suffix)},
{'name':'--tech_account_password', 'help':'Technical account password'},
]
common_logging_conf_contents = """
loggers:
'':
level: INFO
handlers: [stdout, default]
zato:
level: INFO
handlers: [stdout, default]
qualname: zato
propagate: false
zato_access_log:
level: INFO
handlers: [http_access_log]
qualname: zato_access_log
propagate: false
zato_admin:
level: INFO
handlers: [admin]
qualname: zato_admin
propagate: false
zato_connector:
level: INFO
handlers: [connector]
qualname: zato_connector
propagate: false
zato_kvdb:
level: INFO
handlers: [kvdb]
qualname: zato_kvdb
propagate: false
zato_pubsub:
level: INFO
handlers: [pubsub]
qualname: zato_pubsub
propagate: false
zato_pubsub_overflown:
level: INFO
handlers: [pubsub_overflown]
qualname: zato_pubsub_overflown
propagate: false
zato_rbac:
level: INFO
handlers: [rbac]
qualname: zato_rbac
propagate: false
zato_scheduler:
level: INFO
handlers: [scheduler]
qualname: zato_scheduler
propagate: false
zato_singleton:
level: INFO
handlers: [singleton]
qualname: zato_singleton
propagate: false
handlers:
default:
formatter: default
class: logging.handlers.ConcurrentRotatingFileHandler
filename: '{log_path}'
mode: 'a'
maxBytes: 20000000
backupCount: 10
stdout:
formatter: colour
class: logging.StreamHandler
http_access_log:
formatter: http_access_log
class: logging.handlers.ConcurrentRotatingFileHandler
filename: './logs/http_access.log'
mode: 'a'
maxBytes: 20000000
backupCount: 10
admin:
formatter: default
class: logging.handlers.ConcurrentRotatingFileHandler
filename: './logs/admin.log'
mode: 'a'
maxBytes: 20000000
backupCount: 10
connector:
formatter: default
class: logging.handlers.ConcurrentRotatingFileHandler
filename: './logs/connector.log'
mode: 'a'
maxBytes: 20000000
backupCount: 10
kvdb:
formatter: default
class: logging.handlers.ConcurrentRotatingFileHandler
filename: './logs/kvdb.log'
mode: 'a'
maxBytes: 20000000
backupCount: 10
pubsub:
formatter: default
class: logging.handlers.ConcurrentRotatingFileHandler
filename: './logs/pubsub.log'
mode: 'a'
maxBytes: 20000000
backupCount: 10
pubsub_overflown:
formatter: default
class: logging.handlers.ConcurrentRotatingFileHandler
filename: './logs/pubsub-overflown.log'
mode: 'a'
maxBytes: 20000000
backupCount: 10
rbac:
formatter: default
class: logging.handlers.ConcurrentRotatingFileHandler
filename: './logs/rbac.log'
mode: 'a'
maxBytes: 20000000
backupCount: 10
scheduler:
formatter: default
class: logging.handlers.ConcurrentRotatingFileHandler
filename: './logs/scheduler.log'
mode: 'a'
maxBytes: 20000000
backupCount: 10
singleton:
formatter: default
class: logging.handlers.ConcurrentRotatingFileHandler
filename: './logs/singleton.log'
mode: 'a'
maxBytes: 20000000
backupCount: 10
formatters:
default:
format: '%(asctime)s - %(levelname)s - %(process)d:%(threadName)s - %(name)s:%(lineno)d - %(message)s'
http_access_log:
format: '%(remote_ip)s %(cid_resp_time)s "%(channel_name)s" [%(req_timestamp)s] "%(method)s %(path)s %(http_version)s" %(status_code)s %(response_size)s "-" "%(user_agent)s"'
colour:
format: '%(asctime)s - %(levelname)s - %(process)d:%(threadName)s - %(name)s:%(lineno)d - %(message)s'
(): zato.common.util.ColorFormatter
version: 1
"""
# ######################################################################################################################
def run_command(args):
command_class = {}
command_imports = (
('ca_create_ca', 'zato.cli.ca_create_ca.Create'),
('ca_create_lb_agent', 'zato.cli.ca_create_lb_agent.Create'),
('ca_create_server', 'zato.cli.ca_create_server.Create'),
('ca_create_web_admin', 'zato.cli.ca_create_web_admin.Create'),
('check_config', 'zato.cli.check_config.CheckConfig'),
('component_version', 'zato.cli.component_version.ComponentVersion'),
('create_cluster', 'zato.cli.create_cluster.Create'),
('create_lb', 'zato.cli.create_lb.Create'),
('create_odb', 'zato.cli.create_odb.Create'),
('create_server', 'zato.cli.create_server.Create'),
('create_user', 'zato.cli.web_admin_auth.CreateUser'),
('create_web_admin', 'zato.cli.create_web_admin.Create'),
('delete_odb', 'zato.cli.delete_odb.Delete'),
('decrypt', 'zato.cli.crypto.Decrypt'),
('encrypt', 'zato.cli.crypto.Encrypt'),
('enmasse', 'zato.cli.enmasse.EnMasse'),
('from_config', 'zato.cli.FromConfig'),
('info', 'zato.cli.info.Info'),
('migrate', 'zato.cli.migrate.Migrate'),
('quickstart_create', 'zato.cli.quickstart.Create'),
('service_invoke', 'zato.cli.service.Invoke'),
('start', 'zato.cli.start.Start'),
('stop', 'zato.cli.stop.Stop'),
('update_crypto', 'zato.cli.crypto.UpdateCrypto'),
('update_password', 'zato.cli.web_admin_auth.UpdatePassword'),
('update_openid', 'zato.cli.web_admin_auth.UpdateOpenID'),
)
for k, v in command_imports:
command_class[k] = importString(v)
command_class[args.command](args).run(args)
################################################################################
class ZatoCommand(object):
""" A base class for all Zato CLI commands. Handles common things like parsing
the arguments, checking whether a config file or command line switches should
be used, asks for passwords etc.
"""
needs_empty_dir = False
file_needed = None
needs_secrets_confirm = True
allow_empty_secrets = False
add_config_file = True
target_dir = None
show_output = True
opts = []
class SYS_ERROR(object):
""" All non-zero sys.exit return codes the commands may use.
"""
ODB_EXISTS = 1
FILE_MISSING = 2
NOT_A_ZATO_COMPONENT = 3
NO_ODB_FOUND = 4
DIR_NOT_EMPTY = 5
CLUSTER_NAME_ALREADY_EXISTS = 6
SERVER_NAME_ALREADY_EXISTS = 7
NO_SUCH_CLUSTER = 8
COMPONENT_ALREADY_RUNNING = 9
NO_PID_FOUND = 10
NO_SUCH_WEB_ADMIN_USER = 11
NO_INPUT = 12
CONFLICTING_OPTIONS = 13
NO_OPTIONS = 14
INVALID_INPUT = 15
EXCEPTION_CAUGHT = 16
CANNOT_MIGRATE = 17
class COMPONENTS(object):
class _ComponentName(object):
def __init__(self, code, name):
self.code = code
self.name = name
CA = _ComponentName('CA', 'Certificate authority')
LOAD_BALANCER = _ComponentName('LOAD_BALANCER', 'Load balancer')
SERVER = _ComponentName('SERVER', 'Server')
WEB_ADMIN = _ComponentName('WEB_ADMIN', 'Web admin')
def __init__(self, args):
self.args = args
self.original_dir = os.getcwd()
self.show_output = False if 'ZATO_CLI_DONT_SHOW_OUTPUT' in os.environ else True
self.verbose = args.verbose
self.reset_logger(args)
if args.store_config:
self.store_config(args)
self.engine = None
def reset_logger(self, args, reload_=False):
if reload_:
logging.shutdown() # noqa
reload(logging) # noqa
self.logger = logging.getLogger(self.__class__.__name__) # noqa
self.logger.setLevel(logging.DEBUG if self.verbose else logging.INFO) # noqa
self.logger.handlers[:] = []
console_handler = logging.StreamHandler(sys.stdout) # noqa
console_formatter = logging.Formatter('%(message)s') # noqa
console_handler.setFormatter(console_formatter)
self.logger.addHandler(console_handler)
if args.store_log:
verbose_handler = logging.FileHandler('zato.{}.log'.format(util.fs_safe_now())) # noqa
verbose_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') # noqa
verbose_handler.setFormatter(verbose_formatter)
self.logger.addHandler(verbose_handler)
def _get_secret(self, template, needs_confirm, allow_empty, secret_name='password'):
""" Runs an infinite loop until a user enters the secret. User needs
to confirm the secret if 'needs_confirm' is True. New line characters
are always stripped before returning the secret, so that "\n" becomes
"", "\nsecret\n" becomes "secret" and "\nsec\nret\n" becomes "sec\nret".
"""
keep_running = True
self.logger.info('')
while keep_running:
secret1 = getpass(template + ' (will not be echoed): ')
if not needs_confirm:
return secret1.strip('\n')
secret2 = getpass('Enter the {} again (will not be echoed): '.format(secret_name))
if secret1 != secret2:
self.logger.info('{}s do not match'.format(secret_name.capitalize()))
else:
if not secret1 and not allow_empty:
self.logger.info('No {} entered'.format(secret_name))
else:
return secret1.strip('\n')
def _get_now(self, time_=None):
if not time_:
time_ = time.gmtime() # noqa
return time.strftime('%Y-%m-%d_%H-%M-%S', time_) # noqa
def _get_user_host(self):
return getuser() + '@' + gethostname()
def store_initial_info(self, target_dir, component):
info = {'version': common.version, # noqa
'created_user_host': self._get_user_host(),
'created_ts': datetime.utcnow().isoformat(), # noqa
'component': component
}
open(os.path.join(target_dir, ZATO_INFO_FILE), 'wb').write(json.dumps(info))
def store_config(self, args):
""" Stores the config options in a config file for a later use.
"""
now = util.fs_safe_now() # noqa
file_name = 'zato.{}.config'.format(now)
file_args = StringIO()
for arg, value in args._get_kwargs():
if value:
file_args.write('{}={}\n'.format(arg, value))
body = '# {} - {}\n{}'.format(now, self._get_user_host(), file_args.getvalue())
open(file_name, 'w').write(body)
file_args.close()
self.logger.debug('Options saved in file {file_name}'.format(
file_name=os.path.abspath(file_name)))
def _get_engine(self, args):
connect_args = {'application_name':util.get_component_name('enmasse')} if args.odb_type == 'postgresql' else {}
return sqlalchemy.create_engine(get_engine_url(args), connect_args=connect_args)
def _get_session(self, engine):
return get_session(engine)
def _check_passwords(self, args, check_password):
""" Get the password from a user for each argument that needs a password.
"""
for opt_name, opt_help in check_password:
opt_name = opt_name.replace('--', '').replace('-', '_')
password_arg = getattr(args, opt_name, None)
# It is OK if password is an empty string and empty secrets are allowed
if not password_arg:
if isinstance(password_arg, str) and self.allow_empty_secrets:
continue
password = self._get_secret(opt_help, self.needs_secrets_confirm, self.allow_empty_secrets, opt_name)
setattr(args, opt_name, password)
return args
def _get_arg(self, args, name, default):
value = getattr(args, name, None)
return value if value else default
def run(self, args, offer_save_opts=True, work_args=None):
""" Parses the command line or the args passed in and figures out
whether the user wishes to use a config file or command line switches.
"""
try:
# Do we need to have a clean directory to work in?
if self.needs_empty_dir:
work_dir = os.path.abspath(args.path)
for elem in os.listdir(work_dir):
if elem.startswith('zato') and elem.endswith('config'):
# This is a zato.{}.config file. The had been written there
# before we got to this point and it's OK to skip it.
continue
else:
msg = ('Directory {} is not empty, please re-run the command ' + # noqa
'in an empty directory').format(work_dir) # noqa
self.logger.info(msg)
sys.exit(self.SYS_ERROR.DIR_NOT_EMPTY) # noqa
# Do we need the directory to contain any specific files?
if self.file_needed:
full_path = os.path.join(args.path, self.file_needed)
if not os.path.exists(full_path):
msg = 'Could not find file {}'.format(full_path)
self.logger.info(msg)
sys.exit(self.SYS_ERROR.FILE_MISSING) # noqa
check_password = []
for opt_dict in self.opts:
name = opt_dict['name']
if 'password' in name or 'secret' in name:
# Don't required password on SQLite
if 'odb' in name and args.odb_type == 'sqlite':
continue
check_password.append((name, opt_dict['help']))
self.before_execute(args)
if check_password and self.is_password_required():
args = self._check_passwords(args, check_password)
# GH #328 - zato create web_admin treats boolean admin_created as an exit code
# https://github.com/zatosource/zato/issues/328
return_code = self.execute(args)
if isinstance(return_code, (int, long)):
sys.exit(return_code)
else:
sys.exit(0)
except Exception:
self.reset_logger(self.args)
self.logger.error(get_full_stack())
sys.exit(self.SYS_ERROR.EXCEPTION_CAUGHT)
def is_password_required(self):
return True
def before_execute(self, args):
""" A hooks that lets commands customize their input before they are actually executed.
"""
# Update odb_type if it's MySQL so that users don't have to think about the particular client implementation.
if getattr(args, 'odb_type', None) == 'mysql':
args.odb_type = 'mysql+pymysql'
def _copy_lb_server_crypto(self, repo_dir, args, middle_part):
for name in('pub-key', 'priv-key', 'cert', 'ca-certs'):
arg_name = '{}_path'.format(name.replace('-', '_'))
full_path = os.path.join(repo_dir, 'zato-{}-{}.pem'.format(middle_part, name))
shutil.copyfile(os.path.abspath(getattr(args, arg_name)), full_path)
def copy_lb_crypto(self, repo_dir, args):
self._copy_lb_server_crypto(repo_dir, args, 'lba')
def copy_server_crypto(self, repo_dir, args):
self._copy_lb_server_crypto(repo_dir, args, 'server')
def copy_web_admin_crypto(self, repo_dir, args):
for attr, name in (('pub_key_path', 'pub-key'), ('priv_key_path', 'priv-key'), ('cert_path', 'cert'), ('ca_certs_path', 'ca-certs')):
file_name = os.path.join(repo_dir, 'web-admin-{}.pem'.format(name))
shutil.copyfile(os.path.abspath(getattr(args, attr)), file_name)
def get_crypto_manager_from_server_config(self, config, repo_dir):
return cli_util.get_crypto_manager_from_server_config(config, repo_dir)
def get_odb_session_from_server_config(self, config, cm):
return cli_util.get_odb_session_from_server_config(config, cm)
def get_server_client_auth(self, config, repo_dir):
""" Returns credentials to authenticate with against Zato's own /zato/admin/invoke channel.
"""
return cli_util.get_server_client_auth(config, repo_dir)
class FromConfig(ZatoCommand):
""" Executes commands from a command config file.
"""
def execute(self, args):
""" Runs the command with arguments read from a config file.
"""
f = open(args.path)
for line in f:
if line.lstrip().startswith('#'):
continue
arg, value = line.split('=', 1)
arg = arg.strip()
value = value.strip()
setattr(args, arg, value)
run_command(args)
class CACreateCommand(ZatoCommand):
""" A base class for all commands that create new crypto material.
"""
file_needed = '.zato-ca-dir'
def __init__(self, args):
super(CACreateCommand, self).__init__(args)
self.target_dir = os.path.abspath(args.path)
def _on_file_missing(self):
msg = "{} doesn't seem to be a CA directory, the '{}' file is missing."
return msg.format(self.target_dir, self.file_needed)
def _execute(self, args, extension, show_output=True):
now = self._get_now()
openssl_template = open(os.path.join(self.target_dir, 'ca-material/openssl-template.conf')).read()
ou_attrs = ('organizational_unit', 'organizational-unit')
template_args = {}
for name in('organization', 'locality', 'state_or_province', 'country'):
value = self._get_arg(args, name, ca_defaults[name])
template_args[name.replace('-', '_')] = value
for name in ou_attrs:
has_name = self._get_arg(args, name, None)
if has_name:
value = self._get_arg(args, name, ca_defaults[name])
template_args[name.replace('-', '_')] = value
break
else:
if hasattr(self, 'get_organizational_unit'):
template_args['organizational_unit'] = self.get_organizational_unit(args)
else:
template_args['organizational_unit'] = ca_defaults['organizational_unit']
template_args['common_name'] = self._get_arg(args, 'common_name', default_common_name)
template_args['target_dir'] = self.target_dir
f = tempfile.NamedTemporaryFile() # noqa
f.write(openssl_template.format(**template_args))
f.flush()
file_args = {
'now':now,
'target_dir':self.target_dir
}
for arg in('cluster_name', 'server_name'):
if hasattr(args, arg):
file_args[arg] = getattr(args, arg)
file_args['file_prefix'] = self.get_file_prefix(file_args)
csr_name = '{target_dir}/out-csr/{file_prefix}-csr-{now}.pem'.format(**file_args)
priv_key_name = '{target_dir}/out-priv/{file_prefix}-priv-{now}.pem'.format(**file_args)
pub_key_name = '{target_dir}/out-pub/{file_prefix}-pub-{now}.pem'.format(**file_args)
cert_name = '{target_dir}/out-cert/{file_prefix}-cert-{now}.pem'.format(**file_args)
format_args = {
'config': f.name,
'extension': extension,
'csr_name': csr_name,
'priv_key_name': priv_key_name,
'pub_key_name': pub_key_name,
'cert_name': cert_name,
'target_dir': self.target_dir
}
# Create the CSR and keys ..
cmd = """openssl req -batch -new -nodes -extensions {extension} \
-out {csr_name} \
-keyout {priv_key_name} \
-pubkey \
-newkey rsa:4096 -config {config} \
>/dev/null 2>&1""".format(**format_args)
os.system(cmd)
# .. note that we were using "-pubkey" flag above so we now have to extract
# the public key from the CSR.
split_line = '-----END PUBLIC KEY-----'
csr_pub = open(csr_name).read()
csr_pub = csr_pub.split(split_line)
pub = csr_pub[0] + split_line
csr = csr_pub[1].lstrip()
open(csr_name, 'w').write(csr)
open(pub_key_name, 'w').write(pub)
# Generate the certificate
cmd = """openssl ca -batch -passin file:{target_dir}/ca-material/ca-password -config {config} \
-out {cert_name} \
-extensions {extension} \
-in {csr_name} \
>/dev/null 2>&1""".format(**format_args)
os.system(cmd)
f.close()
# Now delete the default certificate stored in './', we don't really
# need it because we have its copy in './out-cert' anyway.
last_serial = open(os.path.join(self.target_dir, 'ca-material/ca-serial.old')).read().strip()
os.remove(os.path.join(self.target_dir, last_serial + '.pem'))
msg = """Crypto material generated and saved in:
- private key: {priv_key_name}
- public key: {pub_key_name}
- certificate {cert_name}
- CSR: {csr_name}""".format(**format_args)
if show_output:
if self.verbose:
self.logger.debug(msg)
else:
self.logger.info('OK')
# Make sure permissions are tight (GH #440)
os.chmod(priv_key_name, 0640)
# In case someone needs to invoke us directly and wants to find out
# what the format_args were.
return format_args
class ManageCommand(ZatoCommand):
add_config_file = False
def _get_dispatch(self):
return {
self.COMPONENTS.LOAD_BALANCER.code: self._on_lb,
self.COMPONENTS.SERVER.code: self._on_server,
self.COMPONENTS.WEB_ADMIN.code: self._on_web_admin,
}
command_files = set([ZATO_INFO_FILE])
def _on_lb(self, *ignored_args, **ignored_kwargs):
raise NotImplementedError('Should be implemented by subclasses')
_on_web_admin = _on_server = _on_lb
def execute(self, args):
self.component_dir = os.path.abspath(args.path)
self.config_dir = os.path.join(self.component_dir, 'config')
listing = set(os.listdir(self.component_dir))
# Do we have any files we're looking for?
found = self.command_files & listing
if not found:
msg = """Directory {} doesn't seem to belong to a Zato component. Expected one of the following files to exist {}""".format(
self.component_dir, sorted(self.command_files))
self.logger.info(msg)
sys.exit(self.SYS_ERROR.NOT_A_ZATO_COMPONENT) # noqa
found = list(found)[0]
json_data = json.load(open(os.path.join(self.component_dir, found)))
os.chdir(self.component_dir)
return self._get_dispatch()[json_data['component']](args)
| gpl-3.0 |
moijes12/oh-mainline | vendor/packages/python-social-auth/social/backends/professionali.py | 73 | 1907 | # -*- coding: utf-8 -*-
"""
Professionaly OAuth 2.0 support.
This contribution adds support for professionaly.ru OAuth 2.0.
Username is retrieved from the identity returned by server.
"""
from time import time
from social.utils import parse_qs
from social.backends.oauth import BaseOAuth2
class ProfessionaliOAuth2(BaseOAuth2):
name = 'professionali'
ID_KEY = 'user_id'
AUTHORIZATION_URL = 'https://api.professionali.ru/oauth/authorize.html'
ACCESS_TOKEN_URL = 'https://api.professionali.ru/oauth/getToken.json'
ACCESS_TOKEN_METHOD = 'POST'
EXTRA_DATA = [
('avatar_big', 'avatar_big'),
('link', 'link')
]
def get_user_details(self, response):
first_name, last_name = map(response.get, ('firstname', 'lastname'))
email = ''
if self.setting('FAKE_EMAIL'):
email = '{0}@professionali.ru'.format(time())
return {
'username': '{0}_{1}'.format(last_name, first_name),
'first_name': first_name,
'last_name': last_name,
'email': email
}
def user_data(self, access_token, response, *args, **kwargs):
url = 'https://api.professionali.ru/v6/users/get.json'
fields = list(set(['firstname', 'lastname', 'avatar_big', 'link'] +
self.setting('EXTRA_DATA', [])))
params = {
'fields': ','.join(fields),
'access_token': access_token,
'ids[]': response['user_id']
}
try:
return self.get_json(url, params)[0]
except (TypeError, KeyError, IOError, ValueError, IndexError):
return None
def get_json(self, url, *args, **kwargs):
return self.request(url, verify=False, *args, **kwargs).json()
def get_querystring(self, url, *args, **kwargs):
return parse_qs(self.request(url, verify=False, *args, **kwargs).text)
| agpl-3.0 |
KelvinLu/ping-at-me | ping_at_me/pingapp/users.py | 1 | 1481 | from django.contrib.auth import authenticate, login, logout
from pingapp import models, ping
def user_login(request, username, password):
user = authenticate(username = username, password = password)
if user is not None:
login(request, user)
return True
return False
def user_logout(request):
logout(request)
def register(username, email, password):
newuser = models.AppUser(username = username, email = email)
newuser.set_password(password)
newuser.save()
ping.create_user(newuser)
return newuser
def remove_friend(user, user_id):
try:
friend_to_remove = user.friends.get(pk = user_id)
except models.AppUser.DoesNotExist:
return False
user.friends.remove(friend_to_remove)
user.save()
return True
def make_friend_request(user, user_id):
try:
user_to_request = models.AppUser.objects.get(pk = user_id)
except models.AppUser.DoesNotExist:
return False
if user_to_request is user:
return False
if user_to_request in user.friends.all():
return False
user_to_request.requests.add(user)
user_to_request.save()
return True
def respond_friend_request(user, user_id, action):
try:
user_requested = models.AppUser.objects.get(pk = user_id)
except models.AppUser.DoesNotExist:
return False
if action == 'accept':
user.friends.add(user_requested)
elif action == 'deny':
pass
else:
return False
user.requests.remove(user_requested)
user_requested.requested.remove(user)
user.save()
user_requested.save()
return True | agpl-3.0 |
rfleschenberg/djangocms-cascade | tests/test_image.py | 2 | 5295 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from bs4 import BeautifulSoup
from django.core.files import File as DjangoFile
from django.http import QueryDict
from filer.models.foldermodels import Folder
from filer.models.imagemodels import Image
from cms.api import add_plugin
from cms.utils.plugins import build_plugin_tree
from cmsplugin_cascade.models import SharableCascadeElement
from cmsplugin_cascade.bootstrap3 import settings
from cmsplugin_cascade.bootstrap3.container import (BootstrapContainerPlugin, BootstrapRowPlugin,
BootstrapColumnPlugin)
from cmsplugin_cascade.bootstrap3.image import BootstrapImagePlugin
from .test_base import CascadeTestCase
from .utils import get_request_context
BS3_BREAKPOINT_KEYS = list(tp[0] for tp in settings.CMSPLUGIN_CASCADE['bootstrap3']['breakpoints'])
class ImagePluginTest(CascadeTestCase):
maxDiff = None
def upload_demo_image(self):
demo_image = os.path.abspath(os.path.join(os.path.dirname(__file__), 'demo_image.png'))
folder, dummy = Folder.objects.get_or_create(name='Samples', parent=None)
file_obj = DjangoFile(open(demo_image, 'rb'), name='demo_image.png')
image = Image.objects.create(owner=self.user, original_filename='Demo Image',
file=file_obj, folder=folder)
return image
def test_plugin_context(self):
# create container
container_model = add_plugin(self.placeholder, BootstrapContainerPlugin, 'en',
glossary={'breakpoints': BS3_BREAKPOINT_KEYS})
container_plugin = container_model.get_plugin_class_instance(self.admin_site)
self.assertIsInstance(container_plugin, BootstrapContainerPlugin)
# add one row
row_model = add_plugin(self.placeholder, BootstrapRowPlugin, 'en', target=container_model,
glossary={})
row_plugin = row_model.get_plugin_class_instance()
self.assertIsInstance(row_plugin, BootstrapRowPlugin)
# add one column
column_model = add_plugin(self.placeholder, BootstrapColumnPlugin, 'en', target=row_model,
glossary={'xs-column-width': 'col-xs-12', 'sm-column-width': 'col-sm-6',
'md-column-width': 'col-md-4', 'lg-column-width': 'col-lg-3'})
column_plugin = column_model.get_plugin_class_instance()
self.assertIsInstance(column_plugin, BootstrapColumnPlugin)
# add an image
image_model = add_plugin(self.placeholder, BootstrapImagePlugin, 'en', target=column_model)
self.assertIsInstance(image_model, SharableCascadeElement)
image_plugin = image_model.get_plugin_class_instance(self.admin_site)
self.assertIsInstance(image_plugin, BootstrapImagePlugin)
image_plugin.cms_plugin_instance = image_model.cmsplugin_ptr
# upload an image and change the form
ModelForm = image_plugin.get_form(self.request, image_model)
image = self.upload_demo_image()
post_data = QueryDict('', mutable=True)
post_data.update({'image_file': image.pk, 'link_type': 'none', 'image-width-fixed': '300px'})
image_model._image_model = image
form = ModelForm(post_data, None, instance=image_model)
self.assertTrue(form.is_valid())
image_plugin.save_model(self.request, image_model, form, False)
# render the plugins
plugin_list = [container_model, row_model, column_model, image_model]
build_plugin_tree(plugin_list)
context = get_request_context(self.request)
html = container_model.render_plugin(context)
soup = BeautifulSoup(html)
self.assertEqual(soup.img['height'], '100')
self.assertEqual(soup.img['width'], '300')
self.assertTrue('demo_image.png__300x100_q85_subsampling-2' in str(soup.img))
# use a responsive image
post_data.setlist('image-shapes', ['img-responsive'])
form = ModelForm(post_data, None, instance=image_model)
self.assertTrue(form.is_valid())
image_plugin.save_model(self.request, image_model, form, False)
html = container_model.render_plugin(context)
soup = BeautifulSoup(html)
self.assertTrue('img-responsive' in soup.img['class'])
sizes = [s.strip() for s in soup.img['sizes'].split(',')]
self.assertTrue('(max-width: 768px) 720px' in sizes)
self.assertTrue('(min-width: 768px) and (max-width: 992px) 345px' in sizes)
self.assertTrue('(min-width: 992px) and (max-width: 1200px) 293px' in sizes)
self.assertTrue('(min-width: 1200px) 262px' in sizes)
srcsets = [s.strip() for s in soup.img['srcset'].split(',')]
self.assertEqual(len([s for s in srcsets if s.endswith('demo_image.png__293x98_q85_subsampling-2.jpg 293w')]), 1)
self.assertEqual(len([s for s in srcsets if s.endswith('demo_image.png__720x240_q85_subsampling-2.jpg 720w')]), 1)
self.assertEqual(len([s for s in srcsets if s.endswith('demo_image.png__345x115_q85_subsampling-2.jpg 345w')]), 1)
self.assertEqual(len([s for s in srcsets if s.endswith('demo_image.png__262x87_q85_subsampling-2.jpg 262w')]), 1)
self.assertTrue(soup.img['src'].endswith('demo_image.png__720x240_q85_subsampling-2.jpg'))
| mit |
dkillick/iris | lib/iris/tests/unit/analysis/geometry/test__extract_relevant_cube_slice.py | 17 | 3899 | # (C) British Crown Copyright 2014 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Unit tests for :func:`iris.analysis.geometry._extract_relevant_cube_slice`.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import iris.tests.stock as stock
import shapely.geometry
from iris.analysis.geometry import _extract_relevant_cube_slice
class Test(tests.IrisTest):
def test_polygon_smaller_than_cube(self):
cube = stock.lat_lon_cube()
cube.dim_coords[0].guess_bounds()
cube.dim_coords[1].guess_bounds()
geometry = shapely.geometry.box(-0.4, -0.4, 0.4, 0.4)
actual = _extract_relevant_cube_slice(cube, geometry)
target = (cube[1, 1],
cube[1, 1].coords(axis='x')[0],
cube[1, 1].coords(axis='y')[0],
(1, 1, 1, 1))
self.assertEqual(target, actual)
def test_polygon_larger_than_cube(self):
cube = stock.lat_lon_cube()
cube.dim_coords[0].guess_bounds()
cube.dim_coords[1].guess_bounds()
geometry = shapely.geometry.box(-0.6, -0.6, 0.6, 0.6)
actual = _extract_relevant_cube_slice(cube, geometry)
target = (cube[:, :3],
cube[:, :3].coords(axis='x')[0],
cube[:, :3].coords(axis='y')[0],
(0, 0, 2, 2))
self.assertEqual(target, actual)
def test_polygon_on_cube_boundary(self):
cube = stock.lat_lon_cube()
cube.dim_coords[0].guess_bounds()
cube.dim_coords[1].guess_bounds()
geometry = shapely.geometry.box(-0.5, -0.5, 0.5, 0.5)
actual = _extract_relevant_cube_slice(cube, geometry)
target = (cube[1, 1],
cube[1, 1].coords(axis='x')[0],
cube[1, 1].coords(axis='y')[0],
(1, 1, 1, 1))
self.assertEqual(target, actual)
def test_rotated_polygon_on_cube_boundary(self):
cube = stock.lat_lon_cube()
cube.dim_coords[0].guess_bounds()
cube.dim_coords[1].guess_bounds()
geometry = shapely.geometry.Polygon(((0., -.5), (-.5, 0.), (0., .5),
(.5, 0.)))
actual = _extract_relevant_cube_slice(cube, geometry)
target = (cube[1, 1],
cube[1, 1].coords(axis='x')[0],
cube[1, 1].coords(axis='y')[0],
(1, 1, 1, 1))
self.assertEqual(target, actual)
def test_rotated_polygon_larger_than_cube_boundary(self):
cube = stock.lat_lon_cube()
cube.dim_coords[0].guess_bounds()
cube.dim_coords[1].guess_bounds()
geometry = shapely.geometry.Polygon(((0., -.6), (-.6, 0.), (0., .6),
(.6, 0.)))
actual = _extract_relevant_cube_slice(cube, geometry)
target = (cube[:, :3],
cube[:, :3].coords(axis='x')[0],
cube[:, :3].coords(axis='y')[0],
(0, 0, 2, 2))
self.assertEqual(target, actual)
if __name__ == "__main__":
tests.main()
| lgpl-3.0 |
person142/scipy | scipy/special/_mptestutils.py | 8 | 14477 | import os
import sys
import time
import numpy as np
from numpy.testing import assert_
import pytest
from scipy.special._testutils import assert_func_equal
try:
import mpmath # type: ignore[import]
except ImportError:
pass
# ------------------------------------------------------------------------------
# Machinery for systematic tests with mpmath
# ------------------------------------------------------------------------------
class Arg(object):
"""Generate a set of numbers on the real axis, concentrating on
'interesting' regions and covering all orders of magnitude.
"""
def __init__(self, a=-np.inf, b=np.inf, inclusive_a=True, inclusive_b=True):
if a > b:
raise ValueError("a should be less than or equal to b")
if a == -np.inf:
a = -0.5*np.finfo(float).max
if b == np.inf:
b = 0.5*np.finfo(float).max
self.a, self.b = a, b
self.inclusive_a, self.inclusive_b = inclusive_a, inclusive_b
def _positive_values(self, a, b, n):
if a < 0:
raise ValueError("a should be positive")
# Try to put half of the points into a linspace between a and
# 10 the other half in a logspace.
if n % 2 == 0:
nlogpts = n//2
nlinpts = nlogpts
else:
nlogpts = n//2
nlinpts = nlogpts + 1
if a >= 10:
# Outside of linspace range; just return a logspace.
pts = np.logspace(np.log10(a), np.log10(b), n)
elif a > 0 and b < 10:
# Outside of logspace range; just return a linspace
pts = np.linspace(a, b, n)
elif a > 0:
# Linspace between a and 10 and a logspace between 10 and
# b.
linpts = np.linspace(a, 10, nlinpts, endpoint=False)
logpts = np.logspace(1, np.log10(b), nlogpts)
pts = np.hstack((linpts, logpts))
elif a == 0 and b <= 10:
# Linspace between 0 and b and a logspace between 0 and
# the smallest positive point of the linspace
linpts = np.linspace(0, b, nlinpts)
if linpts.size > 1:
right = np.log10(linpts[1])
else:
right = -30
logpts = np.logspace(-30, right, nlogpts, endpoint=False)
pts = np.hstack((logpts, linpts))
else:
# Linspace between 0 and 10, logspace between 0 and the
# smallest positive point of the linspace, and a logspace
# between 10 and b.
if nlogpts % 2 == 0:
nlogpts1 = nlogpts//2
nlogpts2 = nlogpts1
else:
nlogpts1 = nlogpts//2
nlogpts2 = nlogpts1 + 1
linpts = np.linspace(0, 10, nlinpts, endpoint=False)
if linpts.size > 1:
right = np.log10(linpts[1])
else:
right = -30
logpts1 = np.logspace(-30, right, nlogpts1, endpoint=False)
logpts2 = np.logspace(1, np.log10(b), nlogpts2)
pts = np.hstack((logpts1, linpts, logpts2))
return np.sort(pts)
def values(self, n):
"""Return an array containing n numbers."""
a, b = self.a, self.b
if a == b:
return np.zeros(n)
if not self.inclusive_a:
n += 1
if not self.inclusive_b:
n += 1
if n % 2 == 0:
n1 = n//2
n2 = n1
else:
n1 = n//2
n2 = n1 + 1
if a >= 0:
pospts = self._positive_values(a, b, n)
negpts = []
elif b <= 0:
pospts = []
negpts = -self._positive_values(-b, -a, n)
else:
pospts = self._positive_values(0, b, n1)
negpts = -self._positive_values(0, -a, n2 + 1)
# Don't want to get zero twice
negpts = negpts[1:]
pts = np.hstack((negpts[::-1], pospts))
if not self.inclusive_a:
pts = pts[1:]
if not self.inclusive_b:
pts = pts[:-1]
return pts
class FixedArg(object):
def __init__(self, values):
self._values = np.asarray(values)
def values(self, n):
return self._values
class ComplexArg(object):
def __init__(self, a=complex(-np.inf, -np.inf), b=complex(np.inf, np.inf)):
self.real = Arg(a.real, b.real)
self.imag = Arg(a.imag, b.imag)
def values(self, n):
m = int(np.floor(np.sqrt(n)))
x = self.real.values(m)
y = self.imag.values(m + 1)
return (x[:,None] + 1j*y[None,:]).ravel()
class IntArg(object):
def __init__(self, a=-1000, b=1000):
self.a = a
self.b = b
def values(self, n):
v1 = Arg(self.a, self.b).values(max(1 + n//2, n-5)).astype(int)
v2 = np.arange(-5, 5)
v = np.unique(np.r_[v1, v2])
v = v[(v >= self.a) & (v < self.b)]
return v
def get_args(argspec, n):
if isinstance(argspec, np.ndarray):
args = argspec.copy()
else:
nargs = len(argspec)
ms = np.asarray([1.5 if isinstance(spec, ComplexArg) else 1.0 for spec in argspec])
ms = (n**(ms/sum(ms))).astype(int) + 1
args = [spec.values(m) for spec, m in zip(argspec, ms)]
args = np.array(np.broadcast_arrays(*np.ix_(*args))).reshape(nargs, -1).T
return args
class MpmathData(object):
def __init__(self, scipy_func, mpmath_func, arg_spec, name=None,
dps=None, prec=None, n=None, rtol=1e-7, atol=1e-300,
ignore_inf_sign=False, distinguish_nan_and_inf=True,
nan_ok=True, param_filter=None):
# mpmath tests are really slow (see gh-6989). Use a small number of
# points by default, increase back to 5000 (old default) if XSLOW is
# set
if n is None:
try:
is_xslow = int(os.environ.get('SCIPY_XSLOW', '0'))
except ValueError:
is_xslow = False
n = 5000 if is_xslow else 500
self.scipy_func = scipy_func
self.mpmath_func = mpmath_func
self.arg_spec = arg_spec
self.dps = dps
self.prec = prec
self.n = n
self.rtol = rtol
self.atol = atol
self.ignore_inf_sign = ignore_inf_sign
self.nan_ok = nan_ok
if isinstance(self.arg_spec, np.ndarray):
self.is_complex = np.issubdtype(self.arg_spec.dtype, np.complexfloating)
else:
self.is_complex = any([isinstance(arg, ComplexArg) for arg in self.arg_spec])
self.ignore_inf_sign = ignore_inf_sign
self.distinguish_nan_and_inf = distinguish_nan_and_inf
if not name or name == '<lambda>':
name = getattr(scipy_func, '__name__', None)
if not name or name == '<lambda>':
name = getattr(mpmath_func, '__name__', None)
self.name = name
self.param_filter = param_filter
def check(self):
np.random.seed(1234)
# Generate values for the arguments
argarr = get_args(self.arg_spec, self.n)
# Check
old_dps, old_prec = mpmath.mp.dps, mpmath.mp.prec
try:
if self.dps is not None:
dps_list = [self.dps]
else:
dps_list = [20]
if self.prec is not None:
mpmath.mp.prec = self.prec
# Proper casting of mpmath input and output types. Using
# native mpmath types as inputs gives improved precision
# in some cases.
if np.issubdtype(argarr.dtype, np.complexfloating):
pytype = mpc2complex
def mptype(x):
return mpmath.mpc(complex(x))
else:
def mptype(x):
return mpmath.mpf(float(x))
def pytype(x):
if abs(x.imag) > 1e-16*(1 + abs(x.real)):
return np.nan
else:
return mpf2float(x.real)
# Try out different dps until one (or none) works
for j, dps in enumerate(dps_list):
mpmath.mp.dps = dps
try:
assert_func_equal(self.scipy_func,
lambda *a: pytype(self.mpmath_func(*map(mptype, a))),
argarr,
vectorized=False,
rtol=self.rtol, atol=self.atol,
ignore_inf_sign=self.ignore_inf_sign,
distinguish_nan_and_inf=self.distinguish_nan_and_inf,
nan_ok=self.nan_ok,
param_filter=self.param_filter)
break
except AssertionError:
if j >= len(dps_list)-1:
# reraise the Exception
tp, value, tb = sys.exc_info()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
finally:
mpmath.mp.dps, mpmath.mp.prec = old_dps, old_prec
def __repr__(self):
if self.is_complex:
return "<MpmathData: %s (complex)>" % (self.name,)
else:
return "<MpmathData: %s>" % (self.name,)
def assert_mpmath_equal(*a, **kw):
d = MpmathData(*a, **kw)
d.check()
def nonfunctional_tooslow(func):
return pytest.mark.skip(reason=" Test not yet functional (too slow), needs more work.")(func)
# ------------------------------------------------------------------------------
# Tools for dealing with mpmath quirks
# ------------------------------------------------------------------------------
def mpf2float(x):
"""
Convert an mpf to the nearest floating point number. Just using
float directly doesn't work because of results like this:
with mp.workdps(50):
float(mpf("0.99999999999999999")) = 0.9999999999999999
"""
return float(mpmath.nstr(x, 17, min_fixed=0, max_fixed=0))
def mpc2complex(x):
return complex(mpf2float(x.real), mpf2float(x.imag))
def trace_args(func):
def tofloat(x):
if isinstance(x, mpmath.mpc):
return complex(x)
else:
return float(x)
def wrap(*a, **kw):
sys.stderr.write("%r: " % (tuple(map(tofloat, a)),))
sys.stderr.flush()
try:
r = func(*a, **kw)
sys.stderr.write("-> %r" % r)
finally:
sys.stderr.write("\n")
sys.stderr.flush()
return r
return wrap
try:
import posix
import signal
POSIX = ('setitimer' in dir(signal))
except ImportError:
POSIX = False
class TimeoutError(Exception):
pass
def time_limited(timeout=0.5, return_val=np.nan, use_sigalrm=True):
"""
Decorator for setting a timeout for pure-Python functions.
If the function does not return within `timeout` seconds, the
value `return_val` is returned instead.
On POSIX this uses SIGALRM by default. On non-POSIX, settrace is
used. Do not use this with threads: the SIGALRM implementation
does probably not work well. The settrace implementation only
traces the current thread.
The settrace implementation slows down execution speed. Slowdown
by a factor around 10 is probably typical.
"""
if POSIX and use_sigalrm:
def sigalrm_handler(signum, frame):
raise TimeoutError()
def deco(func):
def wrap(*a, **kw):
old_handler = signal.signal(signal.SIGALRM, sigalrm_handler)
signal.setitimer(signal.ITIMER_REAL, timeout)
try:
return func(*a, **kw)
except TimeoutError:
return return_val
finally:
signal.setitimer(signal.ITIMER_REAL, 0)
signal.signal(signal.SIGALRM, old_handler)
return wrap
else:
def deco(func):
def wrap(*a, **kw):
start_time = time.time()
def trace(frame, event, arg):
if time.time() - start_time > timeout:
raise TimeoutError()
return trace
sys.settrace(trace)
try:
return func(*a, **kw)
except TimeoutError:
sys.settrace(None)
return return_val
finally:
sys.settrace(None)
return wrap
return deco
def exception_to_nan(func):
"""Decorate function to return nan if it raises an exception"""
def wrap(*a, **kw):
try:
return func(*a, **kw)
except Exception:
return np.nan
return wrap
def inf_to_nan(func):
"""Decorate function to return nan if it returns inf"""
def wrap(*a, **kw):
v = func(*a, **kw)
if not np.isfinite(v):
return np.nan
return v
return wrap
def mp_assert_allclose(res, std, atol=0, rtol=1e-17):
"""
Compare lists of mpmath.mpf's or mpmath.mpc's directly so that it
can be done to higher precision than double.
"""
try:
len(res)
except TypeError:
res = list(res)
n = len(std)
if len(res) != n:
raise AssertionError("Lengths of inputs not equal.")
failures = []
for k in range(n):
try:
assert_(mpmath.fabs(res[k] - std[k]) <= atol + rtol*mpmath.fabs(std[k]))
except AssertionError:
failures.append(k)
ndigits = int(abs(np.log10(rtol)))
msg = [""]
msg.append("Bad results ({} out of {}) for the following points:"
.format(len(failures), n))
for k in failures:
resrep = mpmath.nstr(res[k], ndigits, min_fixed=0, max_fixed=0)
stdrep = mpmath.nstr(std[k], ndigits, min_fixed=0, max_fixed=0)
if std[k] == 0:
rdiff = "inf"
else:
rdiff = mpmath.fabs((res[k] - std[k])/std[k])
rdiff = mpmath.nstr(rdiff, 3)
msg.append("{}: {} != {} (rdiff {})".format(k, resrep, stdrep, rdiff))
if failures:
assert_(False, "\n".join(msg))
| bsd-3-clause |
jhawkesworth/ansible | lib/ansible/plugins/lookup/aws_ssm.py | 14 | 10482 | # (c) 2016, Bill Wang <ozbillwang(at)gmail.com>
# (c) 2017, Marat Bakeev <hawara(at)gmail.com>
# (c) 2018, Michael De La Rue <siblemitcom.mddlr(at)spamgourmet.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
lookup: aws_ssm
author:
- Bill Wang <ozbillwang(at)gmail.com>
- Marat Bakeev <hawara(at)gmail.com>
- Michael De La Rue <siblemitcom.mddlr@spamgourmet.com>
version_added: 2.5
requirements:
- boto3
- botocore
short_description: Get the value for a SSM parameter or all parameters under a path.
description:
- Get the value for an Amazon Simple Systems Manager parameter or a hierarchy of parameters.
The first argument you pass the lookup can either be a parameter name or a hierarchy of
parameters. Hierarchies start with a forward slash and end with the parameter name. Up to
5 layers may be specified.
- If looking up an explicitly listed parameter by name which does not exist then the lookup will
return a None value which will be interpreted by Jinja2 as an empty string. You can use the
```default``` filter to give a default value in this case but must set the second parameter to
true (see examples below)
- When looking up a path for parameters under it a dictionary will be returned for each path.
If there is no parameter under that path then the return will be successful but the
dictionary will be empty.
- If the lookup fails due to lack of permissions or due to an AWS client error then the aws_ssm
will generate an error, normally crashing the current ansible task. This is normally the right
thing since ignoring a value that IAM isn't giving access to could cause bigger problems and
wrong behaviour or loss of data. If you want to continue in this case then you will have to set
up two ansible tasks, one which sets a variable and ignores failures one which uses the value
of that variable with a default. See the examples below.
options:
decrypt:
description: A boolean to indicate whether to decrypt the parameter.
default: true
type: boolean
bypath:
description: A boolean to indicate whether the parameter is provided as a hierarchy.
default: false
type: boolean
recursive:
description: A boolean to indicate whether to retrieve all parameters within a hierarchy.
default: false
type: boolean
shortnames:
description: Indicates whether to return the name only without path if using a parameter hierarchy.
default: false
type: boolean
'''
EXAMPLES = '''
# lookup sample:
- name: lookup ssm parameter store in the current region
debug: msg="{{ lookup('aws_ssm', 'Hello' ) }}"
- name: lookup ssm parameter store in nominated region
debug: msg="{{ lookup('aws_ssm', 'Hello', region='us-east-2' ) }}"
- name: lookup ssm parameter store without decrypted
debug: msg="{{ lookup('aws_ssm', 'Hello', decrypt=False ) }}"
- name: lookup ssm parameter store in nominated aws profile
debug: msg="{{ lookup('aws_ssm', 'Hello', aws_profile='myprofile' ) }}"
- name: lookup ssm parameter store using explicit aws credentials
debug: msg="{{ lookup('aws_ssm', 'Hello', aws_access_key=my_aws_access_key, aws_secret_key=my_aws_secret_key, aws_security_token=my_security_token ) }}"
- name: lookup ssm parameter store with all options.
debug: msg="{{ lookup('aws_ssm', 'Hello', decrypt=false, region='us-east-2', aws_profile='myprofile') }}"
- name: lookup a key which doesn't exist, returns ""
debug: msg="{{ lookup('aws_ssm', 'NoKey') }}"
- name: lookup a key which doesn't exist, returning a default ('root')
debug: msg="{{ lookup('aws_ssm', 'AdminID') | default('root', true) }}"
- name: lookup a key which doesn't exist failing to store it in a fact
set_fact:
temp_secret: "{{ lookup('aws_ssm', '/NoAccess/hiddensecret') }}"
ignore_errors: true
- name: show fact default to "access failed" if we don't have access
debug: msg="{{ 'the secret was:' ~ temp_secret | default('could not access secret') }}"
- name: return a dictionary of ssm parameters from a hierarchy path
debug: msg="{{ lookup('aws_ssm', '/PATH/to/params', region='ap-southeast-2', bypath=true, recursive=true ) }}"
- name: return a dictionary of ssm parameters from a hierarchy path with shortened names (param instead of /PATH/to/param)
debug: msg="{{ lookup('aws_ssm', '/PATH/to/params', region='ap-southeast-2', shortnames=true, bypath=true, recursive=true ) }}"
- name: Iterate over a parameter hierarchy
debug: msg='key contains {{item.Name}} with value {{item.Value}} '
loop: '{{ query("aws_ssm", "/TEST/test-list", region="ap-southeast-2", bypath=true) }}'
'''
from ansible.module_utils._text import to_native
from ansible.module_utils.ec2 import HAS_BOTO3, boto3_tag_list_to_ansible_dict
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.utils.display import Display
try:
from botocore.exceptions import ClientError
import botocore
import boto3
except ImportError:
pass # will be captured by imported HAS_BOTO3
display = Display()
def _boto3_conn(region, credentials):
if 'boto_profile' in credentials:
boto_profile = credentials.pop('boto_profile')
else:
boto_profile = None
try:
connection = boto3.session.Session(profile_name=boto_profile).client('ssm', region, **credentials)
except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError):
if boto_profile:
try:
connection = boto3.session.Session(profile_name=boto_profile).client('ssm', region)
# FIXME: we should probably do better passing on of the error information
except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError):
raise AnsibleError("Insufficient credentials found.")
else:
raise AnsibleError("Insufficient credentials found.")
return connection
class LookupModule(LookupBase):
def run(self, terms, variables=None, boto_profile=None, aws_profile=None,
aws_secret_key=None, aws_access_key=None, aws_security_token=None, region=None,
bypath=False, shortnames=False, recursive=False, decrypt=True):
'''
:arg terms: a list of lookups to run.
e.g. ['parameter_name', 'parameter_name_too' ]
:kwarg variables: ansible variables active at the time of the lookup
:kwarg aws_secret_key: identity of the AWS key to use
:kwarg aws_access_key: AWS secret key (matching identity)
:kwarg aws_security_token: AWS session key if using STS
:kwarg decrypt: Set to True to get decrypted parameters
:kwarg region: AWS region in which to do the lookup
:kwarg bypath: Set to True to do a lookup of variables under a path
:kwarg recursive: Set to True to recurse below the path (requires bypath=True)
:returns: A list of parameter values or a list of dictionaries if bypath=True.
'''
if not HAS_BOTO3:
raise AnsibleError('botocore and boto3 are required for aws_ssm lookup.')
ret = []
response = {}
ssm_dict = {}
credentials = {}
if aws_profile:
credentials['boto_profile'] = aws_profile
else:
credentials['boto_profile'] = boto_profile
credentials['aws_secret_access_key'] = aws_secret_key
credentials['aws_access_key_id'] = aws_access_key
credentials['aws_session_token'] = aws_security_token
client = _boto3_conn(region, credentials)
ssm_dict['WithDecryption'] = decrypt
# Lookup by path
if bypath:
ssm_dict['Recursive'] = recursive
for term in terms:
ssm_dict["Path"] = term
display.vvv("AWS_ssm path lookup term: %s in region: %s" % (term, region))
try:
response = client.get_parameters_by_path(**ssm_dict)
except ClientError as e:
raise AnsibleError("SSM lookup exception: {0}".format(to_native(e)))
paramlist = list()
paramlist.extend(response['Parameters'])
# Manual pagination, since boto doesn't support it yet for get_parameters_by_path
while 'NextToken' in response:
response = client.get_parameters_by_path(NextToken=response['NextToken'], **ssm_dict)
paramlist.extend(response['Parameters'])
# shorten parameter names. yes, this will return duplicate names with different values.
if shortnames:
for x in paramlist:
x['Name'] = x['Name'][x['Name'].rfind('/') + 1:]
display.vvvv("AWS_ssm path lookup returned: %s" % str(paramlist))
if len(paramlist):
ret.append(boto3_tag_list_to_ansible_dict(paramlist,
tag_name_key_name="Name",
tag_value_key_name="Value"))
else:
ret.append({})
# Lookup by parameter name - always returns a list with one or no entry.
else:
display.vvv("AWS_ssm name lookup term: %s" % terms)
ssm_dict["Names"] = terms
try:
response = client.get_parameters(**ssm_dict)
except ClientError as e:
raise AnsibleError("SSM lookup exception: {0}".format(to_native(e)))
params = boto3_tag_list_to_ansible_dict(response['Parameters'], tag_name_key_name="Name",
tag_value_key_name="Value")
for i in terms:
if i in params:
ret.append(params[i])
elif i in response['InvalidParameters']:
ret.append(None)
else:
raise AnsibleError("Ansible internal error: aws_ssm lookup failed to understand boto3 return value: {0}".format(str(response)))
return ret
display.vvvv("AWS_ssm path lookup returning: %s " % str(ret))
return ret
| gpl-3.0 |
GuoshunWu/googlemock | scripts/generator/cpp/gmock_class_test.py | 395 | 11356 | #!/usr/bin/env python
#
# Copyright 2009 Neal Norwitz All Rights Reserved.
# Portions Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gmock.scripts.generator.cpp.gmock_class."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
import os
import sys
import unittest
# Allow the cpp imports below to work when run as a standalone script.
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from cpp import ast
from cpp import gmock_class
class TestCase(unittest.TestCase):
"""Helper class that adds assert methods."""
def StripLeadingWhitespace(self, lines):
"""Strip leading whitespace in each line in 'lines'."""
return '\n'.join([s.lstrip() for s in lines.split('\n')])
def assertEqualIgnoreLeadingWhitespace(self, expected_lines, lines):
"""Specialized assert that ignores the indent level."""
self.assertEqual(expected_lines, self.StripLeadingWhitespace(lines))
class GenerateMethodsTest(TestCase):
def GenerateMethodSource(self, cpp_source):
"""Convert C++ source to Google Mock output source lines."""
method_source_lines = []
# <test> is a pseudo-filename, it is not read or written.
builder = ast.BuilderFromSource(cpp_source, '<test>')
ast_list = list(builder.Generate())
gmock_class._GenerateMethods(method_source_lines, cpp_source, ast_list[0])
return '\n'.join(method_source_lines)
def testSimpleMethod(self):
source = """
class Foo {
public:
virtual int Bar();
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testSimpleConstructorsAndDestructor(self):
source = """
class Foo {
public:
Foo();
Foo(int x);
Foo(const Foo& f);
Foo(Foo&& f);
~Foo();
virtual int Bar() = 0;
};
"""
# The constructors and destructor should be ignored.
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testVirtualDestructor(self):
source = """
class Foo {
public:
virtual ~Foo();
virtual int Bar() = 0;
};
"""
# The destructor should be ignored.
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testExplicitlyDefaultedConstructorsAndDestructor(self):
source = """
class Foo {
public:
Foo() = default;
Foo(const Foo& f) = default;
Foo(Foo&& f) = default;
~Foo() = default;
virtual int Bar() = 0;
};
"""
# The constructors and destructor should be ignored.
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testExplicitlyDeletedConstructorsAndDestructor(self):
source = """
class Foo {
public:
Foo() = delete;
Foo(const Foo& f) = delete;
Foo(Foo&& f) = delete;
~Foo() = delete;
virtual int Bar() = 0;
};
"""
# The constructors and destructor should be ignored.
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testSimpleOverrideMethod(self):
source = """
class Foo {
public:
int Bar() override;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testSimpleConstMethod(self):
source = """
class Foo {
public:
virtual void Bar(bool flag) const;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_CONST_METHOD1(Bar,\nvoid(bool flag));',
self.GenerateMethodSource(source))
def testExplicitVoid(self):
source = """
class Foo {
public:
virtual int Bar(void);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint(void));',
self.GenerateMethodSource(source))
def testStrangeNewlineInParameter(self):
source = """
class Foo {
public:
virtual void Bar(int
a) = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nvoid(int a));',
self.GenerateMethodSource(source))
def testDefaultParameters(self):
source = """
class Foo {
public:
virtual void Bar(int a, char c = 'x') = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nvoid(int, char));',
self.GenerateMethodSource(source))
def testMultipleDefaultParameters(self):
source = """
class Foo {
public:
virtual void Bar(int a = 42, char c = 'x') = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nvoid(int, char));',
self.GenerateMethodSource(source))
def testRemovesCommentsWhenDefaultsArePresent(self):
source = """
class Foo {
public:
virtual void Bar(int a = 42 /* a comment */,
char /* other comment */ c= 'x') = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nvoid(int, char));',
self.GenerateMethodSource(source))
def testDoubleSlashCommentsInParameterListAreRemoved(self):
source = """
class Foo {
public:
virtual void Bar(int a, // inline comments should be elided.
int b // inline comments should be elided.
) const = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_CONST_METHOD2(Bar,\nvoid(int a, int b));',
self.GenerateMethodSource(source))
def testCStyleCommentsInParameterListAreNotRemoved(self):
# NOTE(nnorwitz): I'm not sure if it's the best behavior to keep these
# comments. Also note that C style comments after the last parameter
# are still elided.
source = """
class Foo {
public:
virtual const string& Bar(int /* keeper */, int b);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nconst string&(int /* keeper */, int b));',
self.GenerateMethodSource(source))
def testArgsOfTemplateTypes(self):
source = """
class Foo {
public:
virtual int Bar(const vector<int>& v, map<int, string>* output);
};"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\n'
'int(const vector<int>& v, map<int, string>* output));',
self.GenerateMethodSource(source))
def testReturnTypeWithOneTemplateArg(self):
source = """
class Foo {
public:
virtual vector<int>* Bar(int n);
};"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nvector<int>*(int n));',
self.GenerateMethodSource(source))
def testReturnTypeWithManyTemplateArgs(self):
source = """
class Foo {
public:
virtual map<int, string> Bar();
};"""
# Comparing the comment text is brittle - we'll think of something
# better in case this gets annoying, but for now let's keep it simple.
self.assertEqualIgnoreLeadingWhitespace(
'// The following line won\'t really compile, as the return\n'
'// type has multiple template arguments. To fix it, use a\n'
'// typedef for the return type.\n'
'MOCK_METHOD0(Bar,\nmap<int, string>());',
self.GenerateMethodSource(source))
def testSimpleMethodInTemplatedClass(self):
source = """
template<class T>
class Foo {
public:
virtual int Bar();
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0_T(Bar,\nint());',
self.GenerateMethodSource(source))
def testPointerArgWithoutNames(self):
source = """
class Foo {
virtual int Bar(C*);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nint(C*));',
self.GenerateMethodSource(source))
def testReferenceArgWithoutNames(self):
source = """
class Foo {
virtual int Bar(C&);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nint(C&));',
self.GenerateMethodSource(source))
def testArrayArgWithoutNames(self):
source = """
class Foo {
virtual int Bar(C[]);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nint(C[]));',
self.GenerateMethodSource(source))
class GenerateMocksTest(TestCase):
def GenerateMocks(self, cpp_source):
"""Convert C++ source to complete Google Mock output source."""
# <test> is a pseudo-filename, it is not read or written.
filename = '<test>'
builder = ast.BuilderFromSource(cpp_source, filename)
ast_list = list(builder.Generate())
lines = gmock_class._GenerateMocks(filename, cpp_source, ast_list, None)
return '\n'.join(lines)
def testNamespaces(self):
source = """
namespace Foo {
namespace Bar { class Forward; }
namespace Baz {
class Test {
public:
virtual void Foo();
};
} // namespace Baz
} // namespace Foo
"""
expected = """\
namespace Foo {
namespace Baz {
class MockTest : public Test {
public:
MOCK_METHOD0(Foo,
void());
};
} // namespace Baz
} // namespace Foo
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testClassWithStorageSpecifierMacro(self):
source = """
class STORAGE_SPECIFIER Test {
public:
virtual void Foo();
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD0(Foo,
void());
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testTemplatedForwardDeclaration(self):
source = """
template <class T> class Forward; // Forward declaration should be ignored.
class Test {
public:
virtual void Foo();
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD0(Foo,
void());
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testTemplatedClass(self):
source = """
template <typename S, typename T>
class Test {
public:
virtual void Foo();
};
"""
expected = """\
template <typename T0, typename T1>
class MockTest : public Test<T0, T1> {
public:
MOCK_METHOD0_T(Foo,
void());
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testTemplateInATemplateTypedef(self):
source = """
class Test {
public:
typedef std::vector<std::list<int>> FooType;
virtual void Bar(const FooType& test_arg);
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD1(Bar,
void(const FooType& test_arg));
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testTemplateInATemplateTypedefWithComma(self):
source = """
class Test {
public:
typedef std::function<void(
const vector<std::list<int>>&, int> FooType;
virtual void Bar(const FooType& test_arg);
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD1(Bar,
void(const FooType& test_arg));
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
ChristfriedBalizou/jeamsql | adapters/tabulate/benchmark.py | 1 | 3028 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
from timeit import timeit
import tabulate
import asciitable
import prettytable
import texttable
import sys
import codecs
setup_code = r"""
from csv import writer
try: # Python 2
from StringIO import StringIO
except: # Python 3
from io import StringIO
import tabulate
import asciitable
import prettytable
import texttable
import platform
if platform.platform().startswith("Windows") \
and \
platform.python_version_tuple() < ('3','6','0'):
import win_unicode_console
win_unicode_console.enable()
table=[["some text"]+list(range(i,i+9)) for i in range(10)]
def csv_table(table):
buf = StringIO()
writer(buf).writerows(table)
return buf.getvalue()
def join_table(table):
return "\n".join(("\t".join(map(str,row)) for row in table))
def run_prettytable(table):
pp = prettytable.PrettyTable()
for row in table:
pp.add_row(row)
return str(pp)
def run_asciitable(table):
buf = StringIO()
asciitable.write(table, output=buf, Writer=asciitable.FixedWidth)
return buf.getvalue()
def run_texttable(table):
pp = texttable.Texttable()
pp.set_cols_align(["l"] + ["r"]*9)
pp.add_rows(table)
return pp.draw()
def run_tabletext(table):
return tabletext.to_text(table)
def run_tabulate(table, widechars=False):
tabulate.WIDE_CHARS_MODE = tabulate.wcwidth is not None and widechars
return tabulate.tabulate(table)
"""
methods = [(u"join with tabs and newlines", "join_table(table)"),
(u"csv to StringIO", "csv_table(table)"),
(u"asciitable (%s)" % asciitable.__version__, "run_asciitable(table)"),
(u"tabulate (%s)" % tabulate.__version__, "run_tabulate(table)"),
(u"tabulate (%s, WIDE_CHARS_MODE)" % tabulate.__version__, "run_tabulate(table, widechars=True)"),
(u"PrettyTable (%s)" % prettytable.__version__, "run_prettytable(table)"),
(u"texttable (%s)" % texttable.__version__, "run_texttable(table)"),
]
if tabulate.wcwidth is None:
del(methods[4])
def benchmark(n):
global methods
if '--onlyself' in sys.argv[1:]:
methods = [ m for m in methods if m[0].startswith("tabulate") ]
else:
methods = methods
results = [(desc, timeit(code, setup_code, number=n)/n * 1e6)
for desc, code in methods]
mintime = min(map(lambda x: x[1], results))
results = [(desc, t, t/mintime) for desc, t in
sorted(results, key=lambda x: x[1])]
table = tabulate.tabulate(results,
[u"Table formatter", u"time, μs", u"rel. time"],
u"rst", floatfmt=".1f")
import platform
if platform.platform().startswith("Windows"):
print(table)
else:
print(codecs.encode(table, "utf-8"))
if __name__ == "__main__":
if sys.argv[1:]:
n = int(sys.argv[1])
else:
n = 10000
benchmark(n)
| mit |
phalax4/CarnotKE | jyhton/ast/spark.py | 8 | 26973 | # Copyright (c) 1998-2002 John Aycock
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__version__ = 'SPARK-0.7 (pre-alpha-5)'
import re
import sys
import string
def _namelist(instance):
namelist, namedict, classlist = [], {}, [instance.__class__]
for c in classlist:
for b in c.__bases__:
classlist.append(b)
for name in c.__dict__.keys():
if not namedict.has_key(name):
namelist.append(name)
namedict[name] = 1
return namelist
class GenericScanner:
def __init__(self, flags=0):
pattern = self.reflect()
self.re = re.compile(pattern, re.VERBOSE|flags)
self.index2func = {}
for name, number in self.re.groupindex.items():
self.index2func[number-1] = getattr(self, 't_' + name)
def makeRE(self, name):
doc = getattr(self, name).__doc__
rv = '(?P<%s>%s)' % (name[2:], doc)
return rv
def reflect(self):
rv = []
for name in _namelist(self):
if name[:2] == 't_' and name != 't_default':
rv.append(self.makeRE(name))
rv.append(self.makeRE('t_default'))
return string.join(rv, '|')
def error(self, s, pos):
print "Lexical error at position %s" % pos
raise SystemExit
def tokenize(self, s):
pos = 0
n = len(s)
while pos < n:
m = self.re.match(s, pos)
if m is None:
self.error(s, pos)
groups = m.groups()
for i in range(len(groups)):
if groups[i] and self.index2func.has_key(i):
self.index2func[i](groups[i])
pos = m.end()
def t_default(self, s):
r'( . | \n )+'
print "Specification error: unmatched input"
raise SystemExit
#
# Extracted from GenericParser and made global so that [un]picking works.
#
class _State:
def __init__(self, stateno, items):
self.T, self.complete, self.items = [], [], items
self.stateno = stateno
class GenericParser:
#
# An Earley parser, as per J. Earley, "An Efficient Context-Free
# Parsing Algorithm", CACM 13(2), pp. 94-102. Also J. C. Earley,
# "An Efficient Context-Free Parsing Algorithm", Ph.D. thesis,
# Carnegie-Mellon University, August 1968. New formulation of
# the parser according to J. Aycock, "Practical Earley Parsing
# and the SPARK Toolkit", Ph.D. thesis, University of Victoria,
# 2001, and J. Aycock and R. N. Horspool, "Practical Earley
# Parsing", unpublished paper, 2001.
#
def __init__(self, start):
self.rules = {}
self.rule2func = {}
self.rule2name = {}
self.collectRules()
self.augment(start)
self.ruleschanged = 1
_NULLABLE = '\e_'
_START = 'START'
_BOF = '|-'
#
# When pickling, take the time to generate the full state machine;
# some information is then extraneous, too. Unfortunately we
# can't save the rule2func map.
#
def __getstate__(self):
if self.ruleschanged:
#
# XXX - duplicated from parse()
#
self.computeNull()
self.newrules = {}
self.new2old = {}
self.makeNewRules()
self.ruleschanged = 0
self.edges, self.cores = {}, {}
self.states = { 0: self.makeState0() }
self.makeState(0, self._BOF)
#
# XXX - should find a better way to do this..
#
changes = 1
while changes:
changes = 0
for k, v in self.edges.items():
if v is None:
state, sym = k
if self.states.has_key(state):
self.goto(state, sym)
changes = 1
rv = self.__dict__.copy()
for s in self.states.values():
del s.items
del rv['rule2func']
del rv['nullable']
del rv['cores']
return rv
def __setstate__(self, D):
self.rules = {}
self.rule2func = {}
self.rule2name = {}
self.collectRules()
start = D['rules'][self._START][0][1][1] # Blech.
self.augment(start)
D['rule2func'] = self.rule2func
D['makeSet'] = self.makeSet_fast
self.__dict__ = D
#
# A hook for GenericASTBuilder and GenericASTMatcher. Mess
# thee not with this; nor shall thee toucheth the _preprocess
# argument to addRule.
#
def preprocess(self, rule, func): return rule, func
def addRule(self, doc, func, _preprocess=1):
fn = func
rules = string.split(doc)
index = []
for i in range(len(rules)):
if rules[i] == '::=':
index.append(i-1)
index.append(len(rules))
for i in range(len(index)-1):
lhs = rules[index[i]]
rhs = rules[index[i]+2:index[i+1]]
rule = (lhs, tuple(rhs))
if _preprocess:
rule, fn = self.preprocess(rule, func)
if self.rules.has_key(lhs):
self.rules[lhs].append(rule)
else:
self.rules[lhs] = [ rule ]
self.rule2func[rule] = fn
self.rule2name[rule] = func.__name__[2:]
self.ruleschanged = 1
def collectRules(self):
for name in _namelist(self):
if name[:2] == 'p_':
func = getattr(self, name)
doc = func.__doc__
self.addRule(doc, func)
def augment(self, start):
rule = '%s ::= %s %s' % (self._START, self._BOF, start)
self.addRule(rule, lambda args: args[1], 0)
def computeNull(self):
self.nullable = {}
tbd = []
for rulelist in self.rules.values():
lhs = rulelist[0][0]
self.nullable[lhs] = 0
for rule in rulelist:
rhs = rule[1]
if len(rhs) == 0:
self.nullable[lhs] = 1
continue
#
# We only need to consider rules which
# consist entirely of nonterminal symbols.
# This should be a savings on typical
# grammars.
#
for sym in rhs:
if not self.rules.has_key(sym):
break
else:
tbd.append(rule)
changes = 1
while changes:
changes = 0
for lhs, rhs in tbd:
if self.nullable[lhs]:
continue
for sym in rhs:
if not self.nullable[sym]:
break
else:
self.nullable[lhs] = 1
changes = 1
def makeState0(self):
s0 = _State(0, [])
for rule in self.newrules[self._START]:
s0.items.append((rule, 0))
return s0
def finalState(self, tokens):
#
# Yuck.
#
if len(self.newrules[self._START]) == 2 and len(tokens) == 0:
return 1
start = self.rules[self._START][0][1][1]
return self.goto(1, start)
def makeNewRules(self):
worklist = []
for rulelist in self.rules.values():
for rule in rulelist:
worklist.append((rule, 0, 1, rule))
for rule, i, candidate, oldrule in worklist:
lhs, rhs = rule
n = len(rhs)
while i < n:
sym = rhs[i]
if not self.rules.has_key(sym) or \
not self.nullable[sym]:
candidate = 0
i = i + 1
continue
newrhs = list(rhs)
newrhs[i] = self._NULLABLE+sym
newrule = (lhs, tuple(newrhs))
worklist.append((newrule, i+1,
candidate, oldrule))
candidate = 0
i = i + 1
else:
if candidate:
lhs = self._NULLABLE+lhs
rule = (lhs, rhs)
if self.newrules.has_key(lhs):
self.newrules[lhs].append(rule)
else:
self.newrules[lhs] = [ rule ]
self.new2old[rule] = oldrule
def typestring(self, token):
return None
def error(self, token):
print "Syntax error at or near `%s' token" % token
raise SystemExit
def parse(self, tokens):
sets = [ [(1,0), (2,0)] ]
self.links = {}
if self.ruleschanged:
self.computeNull()
self.newrules = {}
self.new2old = {}
self.makeNewRules()
self.ruleschanged = 0
self.edges, self.cores = {}, {}
self.states = { 0: self.makeState0() }
self.makeState(0, self._BOF)
for i in xrange(len(tokens)):
sets.append([])
if sets[i] == []:
break
self.makeSet(tokens[i], sets, i)
else:
sets.append([])
self.makeSet(None, sets, len(tokens))
#_dump(tokens, sets, self.states)
finalitem = (self.finalState(tokens), 0)
if finalitem not in sets[-2]:
if len(tokens) > 0:
self.error(tokens[i-1])
else:
self.error(None)
return self.buildTree(self._START, finalitem,
tokens, len(sets)-2)
def isnullable(self, sym):
#
# For symbols in G_e only. If we weren't supporting 1.5,
# could just use sym.startswith().
#
return self._NULLABLE == sym[0:len(self._NULLABLE)]
def skip(self, (lhs, rhs), pos=0):
n = len(rhs)
while pos < n:
if not self.isnullable(rhs[pos]):
break
pos = pos + 1
return pos
def makeState(self, state, sym):
assert sym is not None
#
# Compute \epsilon-kernel state's core and see if
# it exists already.
#
kitems = []
for rule, pos in self.states[state].items:
lhs, rhs = rule
if rhs[pos:pos+1] == (sym,):
kitems.append((rule, self.skip(rule, pos+1)))
core = kitems
core.sort()
tcore = tuple(core)
if self.cores.has_key(tcore):
return self.cores[tcore]
#
# Nope, doesn't exist. Compute it and the associated
# \epsilon-nonkernel state together; we'll need it right away.
#
k = self.cores[tcore] = len(self.states)
K, NK = _State(k, kitems), _State(k+1, [])
self.states[k] = K
predicted = {}
edges = self.edges
rules = self.newrules
for X in K, NK:
worklist = X.items
for item in worklist:
rule, pos = item
lhs, rhs = rule
if pos == len(rhs):
X.complete.append(rule)
continue
nextSym = rhs[pos]
key = (X.stateno, nextSym)
if not rules.has_key(nextSym):
if not edges.has_key(key):
edges[key] = None
X.T.append(nextSym)
else:
edges[key] = None
if not predicted.has_key(nextSym):
predicted[nextSym] = 1
for prule in rules[nextSym]:
ppos = self.skip(prule)
new = (prule, ppos)
NK.items.append(new)
#
# Problem: we know K needs generating, but we
# don't yet know about NK. Can't commit anything
# regarding NK to self.edges until we're sure. Should
# we delay committing on both K and NK to avoid this
# hacky code? This creates other problems..
#
if X is K:
edges = {}
if NK.items == []:
return k
#
# Check for \epsilon-nonkernel's core. Unfortunately we
# need to know the entire set of predicted nonterminals
# to do this without accidentally duplicating states.
#
core = predicted.keys()
core.sort()
tcore = tuple(core)
if self.cores.has_key(tcore):
self.edges[(k, None)] = self.cores[tcore]
return k
nk = self.cores[tcore] = self.edges[(k, None)] = NK.stateno
self.edges.update(edges)
self.states[nk] = NK
return k
def goto(self, state, sym):
key = (state, sym)
if not self.edges.has_key(key):
#
# No transitions from state on sym.
#
return None
rv = self.edges[key]
if rv is None:
#
# Target state isn't generated yet. Remedy this.
#
rv = self.makeState(state, sym)
self.edges[key] = rv
return rv
def gotoT(self, state, t):
return [self.goto(state, t)]
def gotoST(self, state, st):
rv = []
for t in self.states[state].T:
if st == t:
rv.append(self.goto(state, t))
return rv
def add(self, set, item, i=None, predecessor=None, causal=None):
if predecessor is None:
if item not in set:
set.append(item)
else:
key = (item, i)
if item not in set:
self.links[key] = []
set.append(item)
self.links[key].append((predecessor, causal))
def makeSet(self, token, sets, i):
cur, next = sets[i], sets[i+1]
ttype = token is not None and self.typestring(token) or None
if ttype is not None:
fn, arg = self.gotoT, ttype
else:
fn, arg = self.gotoST, token
for item in cur:
ptr = (item, i)
state, parent = item
add = fn(state, arg)
for k in add:
if k is not None:
self.add(next, (k, parent), i+1, ptr)
nk = self.goto(k, None)
if nk is not None:
self.add(next, (nk, i+1))
if parent == i:
continue
for rule in self.states[state].complete:
lhs, rhs = rule
for pitem in sets[parent]:
pstate, pparent = pitem
k = self.goto(pstate, lhs)
if k is not None:
why = (item, i, rule)
pptr = (pitem, parent)
self.add(cur, (k, pparent),
i, pptr, why)
nk = self.goto(k, None)
if nk is not None:
self.add(cur, (nk, i))
def makeSet_fast(self, token, sets, i):
#
# Call *only* when the entire state machine has been built!
# It relies on self.edges being filled in completely, and
# then duplicates and inlines code to boost speed at the
# cost of extreme ugliness.
#
cur, next = sets[i], sets[i+1]
ttype = token is not None and self.typestring(token) or None
for item in cur:
ptr = (item, i)
state, parent = item
if ttype is not None:
k = self.edges.get((state, ttype), None)
if k is not None:
#self.add(next, (k, parent), i+1, ptr)
#INLINED --v
new = (k, parent)
key = (new, i+1)
if new not in next:
self.links[key] = []
next.append(new)
self.links[key].append((ptr, None))
#INLINED --^
#nk = self.goto(k, None)
nk = self.edges.get((k, None), None)
if nk is not None:
#self.add(next, (nk, i+1))
#INLINED --v
new = (nk, i+1)
if new not in next:
next.append(new)
#INLINED --^
else:
add = self.gotoST(state, token)
for k in add:
if k is not None:
self.add(next, (k, parent), i+1, ptr)
#nk = self.goto(k, None)
nk = self.edges.get((k, None), None)
if nk is not None:
self.add(next, (nk, i+1))
if parent == i:
continue
for rule in self.states[state].complete:
lhs, rhs = rule
for pitem in sets[parent]:
pstate, pparent = pitem
#k = self.goto(pstate, lhs)
k = self.edges.get((pstate, lhs), None)
if k is not None:
why = (item, i, rule)
pptr = (pitem, parent)
#self.add(cur, (k, pparent),
# i, pptr, why)
#INLINED --v
new = (k, pparent)
key = (new, i)
if new not in cur:
self.links[key] = []
cur.append(new)
self.links[key].append((pptr, why))
#INLINED --^
#nk = self.goto(k, None)
nk = self.edges.get((k, None), None)
if nk is not None:
#self.add(cur, (nk, i))
#INLINED --v
new = (nk, i)
if new not in cur:
cur.append(new)
#INLINED --^
def predecessor(self, key, causal):
for p, c in self.links[key]:
if c == causal:
return p
assert 0
def causal(self, key):
links = self.links[key]
if len(links) == 1:
return links[0][1]
choices = []
rule2cause = {}
for p, c in links:
rule = c[2]
choices.append(rule)
rule2cause[rule] = c
return rule2cause[self.ambiguity(choices)]
def deriveEpsilon(self, nt):
if len(self.newrules[nt]) > 1:
rule = self.ambiguity(self.newrules[nt])
else:
rule = self.newrules[nt][0]
#print rule
rhs = rule[1]
attr = [None] * len(rhs)
for i in range(len(rhs)-1, -1, -1):
attr[i] = self.deriveEpsilon(rhs[i])
return self.rule2func[self.new2old[rule]](attr)
def buildTree(self, nt, item, tokens, k):
state, parent = item
choices = []
for rule in self.states[state].complete:
if rule[0] == nt:
choices.append(rule)
rule = choices[0]
if len(choices) > 1:
rule = self.ambiguity(choices)
#print rule
rhs = rule[1]
attr = [None] * len(rhs)
for i in range(len(rhs)-1, -1, -1):
sym = rhs[i]
if not self.newrules.has_key(sym):
if sym != self._BOF:
attr[i] = tokens[k-1]
key = (item, k)
item, k = self.predecessor(key, None)
#elif self.isnullable(sym):
elif self._NULLABLE == sym[0:len(self._NULLABLE)]:
attr[i] = self.deriveEpsilon(sym)
else:
key = (item, k)
why = self.causal(key)
attr[i] = self.buildTree(sym, why[0],
tokens, why[1])
item, k = self.predecessor(key, why)
return self.rule2func[self.new2old[rule]](attr)
def ambiguity(self, rules):
#
# XXX - problem here and in collectRules() if the same rule
# appears in >1 method. Also undefined results if rules
# causing the ambiguity appear in the same method.
#
sortlist = []
name2index = {}
for i in range(len(rules)):
lhs, rhs = rule = rules[i]
name = self.rule2name[self.new2old[rule]]
sortlist.append((len(rhs), name))
name2index[name] = i
sortlist.sort()
list = map(lambda (a,b): b, sortlist)
return rules[name2index[self.resolve(list)]]
def resolve(self, list):
#
# Resolve ambiguity in favor of the shortest RHS.
# Since we walk the tree from the top down, this
# should effectively resolve in favor of a "shift".
#
return list[0]
#
# GenericASTBuilder automagically constructs a concrete/abstract syntax tree
# for a given input. The extra argument is a class (not an instance!)
# which supports the "__setslice__" and "__len__" methods.
#
# XXX - silently overrides any user code in methods.
#
class GenericASTBuilder(GenericParser):
def __init__(self, AST, start):
GenericParser.__init__(self, start)
self.AST = AST
def preprocess(self, rule, func):
rebind = lambda lhs, self=self: \
lambda args, lhs=lhs, self=self: \
self.buildASTNode(args, lhs)
lhs, rhs = rule
return rule, rebind(lhs)
def buildASTNode(self, args, lhs):
children = []
for arg in args:
if isinstance(arg, self.AST):
children.append(arg)
else:
children.append(self.terminal(arg))
return self.nonterminal(lhs, children)
def terminal(self, token): return token
def nonterminal(self, type, args):
rv = self.AST(type)
rv[:len(args)] = args
return rv
#
# GenericASTTraversal is a Visitor pattern according to Design Patterns. For
# each node it attempts to invoke the method n_<node type>, falling
# back onto the default() method if the n_* can't be found. The preorder
# traversal also looks for an exit hook named n_<node type>_exit (no default
# routine is called if it's not found). To prematurely halt traversal
# of a subtree, call the prune() method -- this only makes sense for a
# preorder traversal. Node type is determined via the typestring() method.
#
class GenericASTTraversalPruningException:
pass
class GenericASTTraversal:
def __init__(self, ast):
self.ast = ast
def typestring(self, node):
return node.type
def prune(self):
raise GenericASTTraversalPruningException
def preorder(self, node=None):
if node is None:
node = self.ast
try:
name = 'n_' + self.typestring(node)
if hasattr(self, name):
func = getattr(self, name)
func(node)
else:
self.default(node)
except GenericASTTraversalPruningException:
return
for kid in node:
self.preorder(kid)
name = name + '_exit'
if hasattr(self, name):
func = getattr(self, name)
func(node)
def postorder(self, node=None):
if node is None:
node = self.ast
for kid in node:
self.postorder(kid)
name = 'n_' + self.typestring(node)
if hasattr(self, name):
func = getattr(self, name)
func(node)
else:
self.default(node)
def default(self, node):
pass
#
# GenericASTMatcher. AST nodes must have "__getitem__" and "__cmp__"
# implemented.
#
# XXX - makes assumptions about how GenericParser walks the parse tree.
#
class GenericASTMatcher(GenericParser):
def __init__(self, start, ast):
GenericParser.__init__(self, start)
self.ast = ast
def preprocess(self, rule, func):
rebind = lambda func, self=self: \
lambda args, func=func, self=self: \
self.foundMatch(args, func)
lhs, rhs = rule
rhslist = list(rhs)
rhslist.reverse()
return (lhs, tuple(rhslist)), rebind(func)
def foundMatch(self, args, func):
func(args[-1])
return args[-1]
def match_r(self, node):
self.input.insert(0, node)
children = 0
for child in node:
if children == 0:
self.input.insert(0, '(')
children = children + 1
self.match_r(child)
if children > 0:
self.input.insert(0, ')')
def match(self, ast=None):
if ast is None:
ast = self.ast
self.input = []
self.match_r(ast)
self.parse(self.input)
def resolve(self, list):
#
# Resolve ambiguity in favor of the longest RHS.
#
return list[-1]
def _dump(tokens, sets, states):
for i in range(len(sets)):
print 'set', i
for item in sets[i]:
print '\t', item
for (lhs, rhs), pos in states[item[0]].items:
print '\t\t', lhs, '::=',
print string.join(rhs[:pos]),
print '.',
print string.join(rhs[pos:])
if i < len(tokens):
print
print 'token', str(tokens[i])
print
| apache-2.0 |
jbloom/mutpath | src/plot.py | 1 | 10257 | """Module for performing plotting for ``mutpath`` package.
This module uses ``pylab`` and ``matplotlib`` to make plots. These plots will
fail if ``pylab`` and ``matplotlib`` are not available for importation. Before
running any function in this module, you can run the *PylabAvailable*
function to determine if ``pylab`` and ``matplotlib`` are available. Otherwise,
calling any other function will raise an Exception if thise modules are
not available. The ``pdf`` backend is used for ``matplotlib`` / ``pylab``. This means
that plots must be created as PDF files.
Functions are:
`PylabAvailable`
`CumulativeFractionPlot`
'DatesPlot`
`Base10Formatter`
`SplitLabel`
Written by Jesse Bloom.
"""
import os
import sys
import math
# global variable _pylabavailable indicates if pylab/matplotlib present
try:
import matplotlib
matplotlib.use('pdf')
import pylab
_pylabavailable = True
except ImportError:
_pylabavailable = False
def PylabAvailable():
"""Returns True if pylab/matplotlib available, False otherwise.
You should call this function to test for the availability of the
pylab/matplotlib plotting modules before using other functions in
this module.
"""
return _pylabavailable
def DatesPlot(mutdates, plotfile, interval):
"""Plots dates of mutations.
Uses pylab / matplotlib to plot the dates and credible intervals
for mutations. Will raise an error *PylabAvailable() == False*.
The plot is a PDF.
* *mutdates* is a list of the mutations, in the form of the tuples
*(median, mininterval, maxinterval, mut, fractoca, weight)*. Mutations
are plotted in the order they are listed. In these tuples:
* *median* : posterior median date
* *minterval* : minimum of credible interval
* *maxinterval* : maximum of credible interval
* *mut* : string giving name of mutation
* *fractoca* : probability mutation is on path from common ancestor
to starting sequence
* *weight* : fraction of paths containing mutation.
* *plotfile* is a string giving the name of the PDF file we create.
* *interval* is the range of the credible interval. For example, 0.9
means a 90% credible interval.
"""
ext = os.path.splitext(plotfile)[1].lower()
if ext != '.pdf':
raise ValueError("Extension must be .pdf, but found %s" % ext)
if not PylabAvailable():
raise ValueError("pylab / matplotlib not available.")
if not mutdates:
raise ValueError("no mutation dates to plot")
tocalabels = []
tocamedians = []
tocaerrlow = []
tocaerrhigh = []
tocays = []
fromcalabels = []
fromcamedians = []
fromcaerrlow = []
fromcaerrhigh = []
fromcays = []
y = 0
for (median, mininterval, maxinterval, mut, fractoca, weight) in mutdates:
label = "%s" % (mut)
errlow = median - mininterval
errhigh = maxinterval - median
if fractoca > 0.5:
tocays.append(y)
tocalabels.append(label)
tocamedians.append(median)
tocaerrlow.append(errlow)
tocaerrhigh.append(errhigh)
else:
fromcays.append(y)
fromcalabels.append(label)
fromcamedians.append(median)
fromcaerrlow.append(errlow)
fromcaerrhigh.append(errhigh)
y += 1
(lmargin, rmargin, bmargin, tmargin) = (0.11, 0.05, 0.08, 0.01)
matplotlib.rc('font', size=10)
matplotlib.rc('xtick', labelsize=10)
matplotlib.rc('ytick', labelsize=10)
matplotlib.rc('legend', numpoints=1)
matplotlib.rc('legend', fontsize=10)
fig = pylab.figure(figsize=(6, 6))
ax = pylab.axes([lmargin, bmargin, 1 - lmargin - rmargin, 1 - tmargin - bmargin])
tocabar = fromcabar = None
if tocalabels:
tocabar = pylab.errorbar(tocamedians, tocays, xerr=[tocaerrlow, tocaerrhigh], fmt='sr')
if fromcalabels:
fromcabar = pylab.errorbar(fromcamedians, fromcays, xerr=[fromcaerrlow, fromcaerrhigh], fmt='sb')
ny = len(mutdates)
pylab.gca().set_ylim((-1, ny))
pylab.gca().yaxis.set_major_locator(matplotlib.ticker.FixedLocator([y for y in range(ny)]))
pylab.gca().yaxis.set_major_formatter(matplotlib.ticker.FixedFormatter(tocalabels + fromcalabels))
pylab.gca().xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter(useOffset=False))
pylab.xlabel("Date (posterior median and Bayesian %.2f%s credible interval)" % (interval * 100, '%'))
if tocabar and fromcabar:
pylab.legend([tocabar[0], fromcabar[0]], ['path to common ancestor', 'path from common ancestor'], loc='lower right')
elif tocabar:
pylab.legend([tocabar[0]], ['path to common ancestor'], loc='lower right')
elif fromcabar:
pylab.legend([fromcabar[0]], ['path from common ancestor'], loc='lower right')
pylab.savefig(plotfile)
def CumulativeFractionPlot(datalist, plotfile, title, xlabel):
"""Creates a cumulative fraction plot.
Takes a list of numeric data. Plots a cumulative fraction
plot giving the fraction of the data points that are <=
the indicated value.
*datalist* is a list of numbers giving the data for which we
are computing the cumulative fraction plot. Raises an
exception if this is an empty list.
*plotfile* is the name of the output plot file created by this method
(such as 'plot.pdf'). The extension must be '.pdf'.
*title* is a string placed above the plot as a title. Uses LaTex
formatting.
*xlabel* is the label given to the X-axis. Uses LaTex formatting.
This function uses pylab / matplotlib. It will raise an Exception if
these modules cannot be imported (if PylabAvailable() is False).
"""
if len(datalist) < 1:
raise ValueError("datalist is empty")
if not _pylabavailable:
raise ImportError("Could not find pylab or matplotlib")
if os.path.splitext(plotfile)[1] != '.pdf':
raise ValueError("plotfile must end in .pdf: %s" % plotfile)
datalist.sort() # sort from smallest to largest
(xmin, xmax) = (datalist[0], datalist[-1])
n = len(datalist)
cumfracs = []
cf = 0.0
for x in datalist:
cf += 1. / n
cumfracs.append(cf)
assert len(datalist) == len(cumfracs)
assert abs(1.0 - cf) < 1e-7
matplotlib.rc('text', usetex=True)
matplotlib.rc('font', size=12)
fig = pylab.figure(figsize=(6, 4))
(lmargin, rmargin, bmargin, tmargin) = (0.1, 0.01, 0.15, 0.1)
ax = pylab.axes([lmargin, bmargin, 1 - lmargin - rmargin, 1 -\
bmargin - tmargin])
pylab.plot(datalist, cumfracs, 'r-')
pylab.gca().set_ylim([0, 1])
pylab.gca().set_xlim([xmin, xmax])
pylab.ylabel('cumulative fraction')
pylab.xlabel(xlabel)
pylab.title(title)
if plotfile:
pylab.savefig(plotfile)
pylab.clf()
pylab.close()
def Base10Formatter(number, exp_cutoff, exp_decimal_digits, decimal_digits):
"""Converts a number into Latex formatting with scientific notation.
Takes a number and converts it to a string that can be shown
in LaTex using math mode. It is converted to scientific notation
if the criteria specified by exp_cutoff.
*number* the number to be formatted, should be a float or integer.
Currently only works for numbers >= 0
*exp_cutoff* convert to scientific notation if abs(math.log10(number)) >= this.
*exp_decimal_digits* show this many digits after the decimal if number
is converted to scientific notation.
*decimal_digits* show this many digits after the decimal if number
is NOT converted to scientific notation.
The returned value is the LaTex' string. If the number is zero, the
returned string is simply '0'.
>>> Base10Formatter(103, 3, 1, 1)
'103.0'
>>> Base10Formatter(103.0, 2, 1, 1)
'1.0 \\\\times 10^{2}'
>>> Base10Formatter(103.0, 2, 2, 1)
'1.03 \\\\times 10^{2}'
>>> Base10Formatter(2892.3, 3, 1, 1)
'2.9 \\\\times 10^{3}'
>>> Base10Formatter(0.0, 3, 1, 1)
'0'
>>> Base10Formatter(0.012, 2, 1, 1)
'1.2 \\\\times 10^{-2}'
>>> Base10Formatter(-0.1, 3, 1, 1)
Traceback (most recent call last):
...
ValueError: number must be >= 0
"""
if number < 0:
raise ValueError('number must be >= 0')
if number == 0:
return '0'
exponent = int(math.log10(number))
if math.log10(number) < exponent and number < 1:
exponent -= 1
if abs(exponent) >= exp_cutoff:
x = number / (10.**exponent)
formatstr = '%.' + '%d' % exp_decimal_digits + 'f \\times 10^{%d}'
return formatstr % (x, exponent)
else:
formatstr = '%.' + '%d' % decimal_digits + 'f'
return formatstr % number
def SplitLabel(label, splitlen, splitchar):
"""Splits a string with a return if it exceeds a certain length.
*label* a string giving the label we might split.
*splitlen* the maximum length of a label before we attempt to
split it.
*splitchar* the character added when splitting a label.
If len(*label*) > *splitlen*, we attempt to split the label in the
middle by adding *splitchar*. The label is split as close to the
middle as possible while splitting at a space.
No splitting as label length less than *splitlen*
>>> SplitLabel('WT virus 1', 10, '\\n')
'WT virus 1'
Splitting of this label
>>> SplitLabel('WT plasmid 1', 10, '\\n')
'WT\\nplasmid 1'
Splitting of this label
>>> SplitLabel('mutated WT plasmid 1', 10, '\\n')
'mutated WT\\nplasmid 1'
"""
if len(label) <= splitlen:
return label
else:
j = 0
imid = len(label) // 2
index = None
while 0 <= imid - j <= imid + j < len(label):
if label[imid - j].isspace():
return "%s%s%s" % (label[ : imid - j], splitchar, label[imid - j + 1 : ])
elif label[imid + j].isspace():
return "%s%s%s" % (label[ : imid + j], splitchar, label[imid + j + 1 : ])
j += 1
else:
return label # no white space to split
if __name__ == '__main__':
import doctest
doctest.testmod()
| gpl-3.0 |
todaychi/hue | desktop/core/ext-py/pysaml2-2.4.0/src/saml2/mcache.py | 34 | 7053 | #!/usr/bin/env python
import logging
import memcache
from saml2 import time_util
from saml2.cache import ToOld, CacheError
# The assumption is that any subject may consist of data
# gathered from several different sources, all with their own
# timeout time.
logger = logging.getLogger(__name__)
def _key(prefix, name):
return "%s_%s" % (prefix, name)
class Cache(object):
def __init__(self, servers, debug=0):
self._cache = memcache.Client(servers, debug)
def delete(self, subject_id):
entities = self.entities(subject_id)
if entities:
for entity_id in entities:
if not self._cache.delete(_key(subject_id, entity_id)):
raise CacheError("Delete failed")
if not self._cache.delete(subject_id):
raise CacheError("Delete failed")
subjects = self._cache.get("subjects")
if subjects and subject_id in subjects:
subjects.remove(subject_id)
if not self._cache.set("subjects", subjects):
raise CacheError("Set operation failed")
def get_identity(self, subject_id, entities=None):
""" Get all the identity information that has been received and
are still valid about the subject.
:param subject_id: The identifier of the subject
:param entities: The identifiers of the entities whoes assertions are
interesting. If the list is empty all entities are interesting.
:return: A 2-tuple consisting of the identity information (a
dictionary of attributes and values) and the list of entities
whoes information has timed out.
"""
if not entities:
entities = self.entities(subject_id)
if not entities:
return {}, []
res = {}
oldees = []
for (entity_id, item) in self._cache.get_multi(entities,
subject_id+'_').items():
try:
info = self.get_info(item)
except ToOld:
oldees.append(entity_id)
continue
for key, vals in info["ava"].items():
try:
tmp = set(res[key]).union(set(vals))
res[key] = list(tmp)
except KeyError:
res[key] = vals
return res, oldees
def get_info(self, item, check_not_on_or_after=True):
""" Get session information about a subject gotten from a
specified IdP/AA.
:param item: Information stored
:return: The session information as a dictionary
"""
try:
(timestamp, info) = item
except ValueError:
raise ToOld()
if check_not_on_or_after and not time_util.not_on_or_after(timestamp):
raise ToOld()
return info or None
def get(self, subject_id, entity_id, check_not_on_or_after=True):
res = self._cache.get(_key(subject_id, entity_id))
if not res:
return {}
else:
return self.get_info(res)
def set(self, subject_id, entity_id, info, timestamp=0):
""" Stores session information in the cache. Assumes that the subject_id
is unique within the context of the Service Provider.
:param subject_id: The subject identifier
:param entity_id: The identifier of the entity_id/receiver of an
assertion
:param info: The session info, the assertion is part of this
:param timestamp: A time after which the assertion is not valid.
"""
entities = self._cache.get(subject_id)
if not entities:
entities = []
subjects = self._cache.get("subjects")
if not subjects:
subjects = []
if subject_id not in subjects:
subjects.append(subject_id)
if not self._cache.set("subjects", subjects):
raise CacheError("set failed")
if entity_id not in entities:
entities.append(entity_id)
if not self._cache.set(subject_id, entities):
raise CacheError("set failed")
# Should use memcache's expire
if not self._cache.set(_key(subject_id, entity_id), (timestamp, info)):
raise CacheError("set failed")
def reset(self, subject_id, entity_id):
""" Scrap the assertions received from a IdP or an AA about a special
subject.
:param subject_id: The subjects identifier
:param entity_id: The identifier of the entity_id of the assertion
:return:
"""
if not self._cache.set(_key(subject_id, entity_id), {}, 0):
raise CacheError("reset failed")
def entities(self, subject_id):
""" Returns all the entities of assertions for a subject, disregarding
whether the assertion still is valid or not.
:param subject_id: The identifier of the subject
:return: A possibly empty list of entity identifiers
"""
res = self._cache.get(subject_id)
if not res:
raise KeyError("No such subject")
else:
return res
def receivers(self, subject_id):
""" Another name for entities() just to make it more logic in the IdP
scenario """
return self.entities(subject_id)
def active(self, subject_id, entity_id):
""" Returns the status of assertions from a specific entity_id.
:param subject_id: The ID of the subject
:param entity_id: The entity ID of the entity_id of the assertion
:return: True or False depending on if the assertion is still
valid or not.
"""
try:
(timestamp, info) = self._cache.get(_key(subject_id, entity_id))
except ValueError:
return False
except TypeError:
return False
# if not info:
# return False
try:
return time_util.not_on_or_after(timestamp)
except ToOld:
return False
def subjects(self):
""" Return identifiers for all the subjects that are in the cache.
:return: list of subject identifiers
"""
return self._cache.get("subjects")
def update(self, subject_id, entity_id, ava):
res = self._cache.get(_key(subject_id, entity_id))
if res is None:
raise KeyError("No such subject")
else:
info = self.get_info(res)
if info:
info.update(ava)
self.set(subject_id, entity_id, info, res[0])
def valid_to(self, subject_id, entity_id, newtime):
try:
(timestamp, info) = self._cache.get(_key(subject_id, entity_id))
except ValueError:
return False
except TypeError:
info = {}
if not self._cache.set(_key(subject_id, entity_id), (newtime, info)):
raise CacheError("valid_to failed")
| apache-2.0 |
samthor/intellij-community | python/lib/Lib/site-packages/django/contrib/admin/options.py | 69 | 56318 | from django import forms, template
from django.forms.formsets import all_valid
from django.forms.models import modelform_factory, modelformset_factory, inlineformset_factory
from django.forms.models import BaseInlineFormSet
from django.contrib.contenttypes.models import ContentType
from django.contrib.admin import widgets
from django.contrib.admin import helpers
from django.contrib.admin.util import unquote, flatten_fieldsets, get_deleted_objects, model_format_dict
from django.contrib import messages
from django.views.decorators.csrf import csrf_protect
from django.core.exceptions import PermissionDenied, ValidationError
from django.core.paginator import Paginator
from django.db import models, transaction, router
from django.db.models.related import RelatedObject
from django.db.models.fields import BLANK_CHOICE_DASH, FieldDoesNotExist
from django.db.models.sql.constants import LOOKUP_SEP, QUERY_TERMS
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render_to_response
from django.utils.decorators import method_decorator
from django.utils.datastructures import SortedDict
from django.utils.functional import update_wrapper
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.functional import curry
from django.utils.text import capfirst, get_text_list
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext
from django.utils.encoding import force_unicode
HORIZONTAL, VERTICAL = 1, 2
# returns the <ul> class for a given radio_admin field
get_ul_class = lambda x: 'radiolist%s' % ((x == HORIZONTAL) and ' inline' or '')
class IncorrectLookupParameters(Exception):
pass
# Defaults for formfield_overrides. ModelAdmin subclasses can change this
# by adding to ModelAdmin.formfield_overrides.
FORMFIELD_FOR_DBFIELD_DEFAULTS = {
models.DateTimeField: {
'form_class': forms.SplitDateTimeField,
'widget': widgets.AdminSplitDateTime
},
models.DateField: {'widget': widgets.AdminDateWidget},
models.TimeField: {'widget': widgets.AdminTimeWidget},
models.TextField: {'widget': widgets.AdminTextareaWidget},
models.URLField: {'widget': widgets.AdminURLFieldWidget},
models.IntegerField: {'widget': widgets.AdminIntegerFieldWidget},
models.BigIntegerField: {'widget': widgets.AdminIntegerFieldWidget},
models.CharField: {'widget': widgets.AdminTextInputWidget},
models.ImageField: {'widget': widgets.AdminFileWidget},
models.FileField: {'widget': widgets.AdminFileWidget},
}
csrf_protect_m = method_decorator(csrf_protect)
class BaseModelAdmin(object):
"""Functionality common to both ModelAdmin and InlineAdmin."""
__metaclass__ = forms.MediaDefiningClass
raw_id_fields = ()
fields = None
exclude = None
fieldsets = None
form = forms.ModelForm
filter_vertical = ()
filter_horizontal = ()
radio_fields = {}
prepopulated_fields = {}
formfield_overrides = {}
readonly_fields = ()
ordering = None
def __init__(self):
overrides = FORMFIELD_FOR_DBFIELD_DEFAULTS.copy()
overrides.update(self.formfield_overrides)
self.formfield_overrides = overrides
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Hook for specifying the form Field instance for a given database Field
instance.
If kwargs are given, they're passed to the form Field's constructor.
"""
request = kwargs.pop("request", None)
# If the field specifies choices, we don't need to look for special
# admin widgets - we just need to use a select widget of some kind.
if db_field.choices:
return self.formfield_for_choice_field(db_field, request, **kwargs)
# ForeignKey or ManyToManyFields
if isinstance(db_field, (models.ForeignKey, models.ManyToManyField)):
# Combine the field kwargs with any options for formfield_overrides.
# Make sure the passed in **kwargs override anything in
# formfield_overrides because **kwargs is more specific, and should
# always win.
if db_field.__class__ in self.formfield_overrides:
kwargs = dict(self.formfield_overrides[db_field.__class__], **kwargs)
# Get the correct formfield.
if isinstance(db_field, models.ForeignKey):
formfield = self.formfield_for_foreignkey(db_field, request, **kwargs)
elif isinstance(db_field, models.ManyToManyField):
formfield = self.formfield_for_manytomany(db_field, request, **kwargs)
# For non-raw_id fields, wrap the widget with a wrapper that adds
# extra HTML -- the "add other" interface -- to the end of the
# rendered output. formfield can be None if it came from a
# OneToOneField with parent_link=True or a M2M intermediary.
if formfield and db_field.name not in self.raw_id_fields:
related_modeladmin = self.admin_site._registry.get(
db_field.rel.to)
can_add_related = bool(related_modeladmin and
related_modeladmin.has_add_permission(request))
formfield.widget = widgets.RelatedFieldWidgetWrapper(
formfield.widget, db_field.rel, self.admin_site,
can_add_related=can_add_related)
return formfield
# If we've got overrides for the formfield defined, use 'em. **kwargs
# passed to formfield_for_dbfield override the defaults.
for klass in db_field.__class__.mro():
if klass in self.formfield_overrides:
kwargs = dict(self.formfield_overrides[klass], **kwargs)
return db_field.formfield(**kwargs)
# For any other type of field, just call its formfield() method.
return db_field.formfield(**kwargs)
def formfield_for_choice_field(self, db_field, request=None, **kwargs):
"""
Get a form Field for a database Field that has declared choices.
"""
# If the field is named as a radio_field, use a RadioSelect
if db_field.name in self.radio_fields:
# Avoid stomping on custom widget/choices arguments.
if 'widget' not in kwargs:
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
if 'choices' not in kwargs:
kwargs['choices'] = db_field.get_choices(
include_blank = db_field.blank,
blank_choice=[('', _('None'))]
)
return db_field.formfield(**kwargs)
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
"""
Get a form Field for a ForeignKey.
"""
db = kwargs.get('using')
if db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ForeignKeyRawIdWidget(db_field.rel, using=db)
elif db_field.name in self.radio_fields:
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
kwargs['empty_label'] = db_field.blank and _('None') or None
return db_field.formfield(**kwargs)
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
"""
Get a form Field for a ManyToManyField.
"""
# If it uses an intermediary model that isn't auto created, don't show
# a field in admin.
if not db_field.rel.through._meta.auto_created:
return None
db = kwargs.get('using')
if db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ManyToManyRawIdWidget(db_field.rel, using=db)
kwargs['help_text'] = ''
elif db_field.name in (list(self.filter_vertical) + list(self.filter_horizontal)):
kwargs['widget'] = widgets.FilteredSelectMultiple(db_field.verbose_name, (db_field.name in self.filter_vertical))
return db_field.formfield(**kwargs)
def _declared_fieldsets(self):
if self.fieldsets:
return self.fieldsets
elif self.fields:
return [(None, {'fields': self.fields})]
return None
declared_fieldsets = property(_declared_fieldsets)
def get_readonly_fields(self, request, obj=None):
return self.readonly_fields
def queryset(self, request):
"""
Returns a QuerySet of all model instances that can be edited by the
admin site. This is used by changelist_view.
"""
qs = self.model._default_manager.get_query_set()
# TODO: this should be handled by some parameter to the ChangeList.
ordering = self.ordering or () # otherwise we might try to *None, which is bad ;)
if ordering:
qs = qs.order_by(*ordering)
return qs
def lookup_allowed(self, lookup):
parts = lookup.split(LOOKUP_SEP)
# Last term in lookup is a query term (__exact, __startswith etc)
# This term can be ignored.
if len(parts) > 1 and parts[-1] in QUERY_TERMS:
parts.pop()
# Special case -- foo__id__exact and foo__id queries are implied
# if foo has been specificially included in the lookup list; so
# drop __id if it is the last part. However, first we need to find
# the pk attribute name.
model = self.model
pk_attr_name = None
for part in parts[:-1]:
field, _, _, _ = model._meta.get_field_by_name(part)
if hasattr(field, 'rel'):
model = field.rel.to
pk_attr_name = model._meta.pk.name
elif isinstance(field, RelatedObject):
model = field.model
pk_attr_name = model._meta.pk.name
else:
pk_attr_name = None
if pk_attr_name and len(parts) > 1 and parts[-1] == pk_attr_name:
parts.pop()
try:
self.model._meta.get_field_by_name(parts[0])
except FieldDoesNotExist:
# Lookups on non-existants fields are ok, since they're ignored
# later.
return True
else:
clean_lookup = LOOKUP_SEP.join(parts)
return clean_lookup in self.list_filter or clean_lookup == self.date_hierarchy
class ModelAdmin(BaseModelAdmin):
"Encapsulates all admin options and functionality for a given model."
list_display = ('__str__',)
list_display_links = ()
list_filter = ()
list_select_related = False
list_per_page = 100
list_editable = ()
search_fields = ()
date_hierarchy = None
save_as = False
save_on_top = False
paginator = Paginator
inlines = []
# Custom templates (designed to be over-ridden in subclasses)
add_form_template = None
change_form_template = None
change_list_template = None
delete_confirmation_template = None
delete_selected_confirmation_template = None
object_history_template = None
# Actions
actions = []
action_form = helpers.ActionForm
actions_on_top = True
actions_on_bottom = False
actions_selection_counter = True
def __init__(self, model, admin_site):
self.model = model
self.opts = model._meta
self.admin_site = admin_site
self.inline_instances = []
for inline_class in self.inlines:
inline_instance = inline_class(self.model, self.admin_site)
self.inline_instances.append(inline_instance)
if 'action_checkbox' not in self.list_display and self.actions is not None:
self.list_display = ['action_checkbox'] + list(self.list_display)
if not self.list_display_links:
for name in self.list_display:
if name != 'action_checkbox':
self.list_display_links = [name]
break
super(ModelAdmin, self).__init__()
def get_urls(self):
from django.conf.urls.defaults import patterns, url
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.module_name
urlpatterns = patterns('',
url(r'^$',
wrap(self.changelist_view),
name='%s_%s_changelist' % info),
url(r'^add/$',
wrap(self.add_view),
name='%s_%s_add' % info),
url(r'^(.+)/history/$',
wrap(self.history_view),
name='%s_%s_history' % info),
url(r'^(.+)/delete/$',
wrap(self.delete_view),
name='%s_%s_delete' % info),
url(r'^(.+)/$',
wrap(self.change_view),
name='%s_%s_change' % info),
)
return urlpatterns
def urls(self):
return self.get_urls()
urls = property(urls)
def _media(self):
from django.conf import settings
js = ['js/core.js', 'js/admin/RelatedObjectLookups.js',
'js/jquery.min.js', 'js/jquery.init.js']
if self.actions is not None:
js.extend(['js/actions.min.js'])
if self.prepopulated_fields:
js.append('js/urlify.js')
js.append('js/prepopulate.min.js')
if self.opts.get_ordered_objects():
js.extend(['js/getElementsBySelector.js', 'js/dom-drag.js' , 'js/admin/ordering.js'])
return forms.Media(js=['%s%s' % (settings.ADMIN_MEDIA_PREFIX, url) for url in js])
media = property(_media)
def has_add_permission(self, request):
"Returns True if the given request has permission to add an object."
opts = self.opts
return request.user.has_perm(opts.app_label + '.' + opts.get_add_permission())
def has_change_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance.
If `obj` is None, this should return True if the given request has
permission to change *any* object of the given type.
"""
opts = self.opts
return request.user.has_perm(opts.app_label + '.' + opts.get_change_permission())
def has_delete_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance.
If `obj` is None, this should return True if the given request has
permission to delete *any* object of the given type.
"""
opts = self.opts
return request.user.has_perm(opts.app_label + '.' + opts.get_delete_permission())
def get_model_perms(self, request):
"""
Returns a dict of all perms for this model. This dict has the keys
``add``, ``change``, and ``delete`` mapping to the True/False for each
of those actions.
"""
return {
'add': self.has_add_permission(request),
'change': self.has_change_permission(request),
'delete': self.has_delete_permission(request),
}
def get_fieldsets(self, request, obj=None):
"Hook for specifying fieldsets for the add form."
if self.declared_fieldsets:
return self.declared_fieldsets
form = self.get_form(request, obj)
fields = form.base_fields.keys() + list(self.get_readonly_fields(request, obj))
return [(None, {'fields': fields})]
def get_form(self, request, obj=None, **kwargs):
"""
Returns a Form class for use in the admin add view. This is used by
add_view and change_view.
"""
if self.declared_fieldsets:
fields = flatten_fieldsets(self.declared_fieldsets)
else:
fields = None
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(kwargs.get("exclude", []))
exclude.extend(self.get_readonly_fields(request, obj))
# if exclude is an empty list we pass None to be consistant with the
# default on modelform_factory
exclude = exclude or None
defaults = {
"form": self.form,
"fields": fields,
"exclude": exclude,
"formfield_callback": curry(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
return modelform_factory(self.model, **defaults)
def get_changelist(self, request, **kwargs):
"""
Returns the ChangeList class for use on the changelist page.
"""
from django.contrib.admin.views.main import ChangeList
return ChangeList
def get_object(self, request, object_id):
"""
Returns an instance matching the primary key provided. ``None`` is
returned if no match is found (or the object_id failed validation
against the primary key field).
"""
queryset = self.queryset(request)
model = queryset.model
try:
object_id = model._meta.pk.to_python(object_id)
return queryset.get(pk=object_id)
except (model.DoesNotExist, ValidationError):
return None
def get_changelist_form(self, request, **kwargs):
"""
Returns a Form class for use in the Formset on the changelist page.
"""
defaults = {
"formfield_callback": curry(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
return modelform_factory(self.model, **defaults)
def get_changelist_formset(self, request, **kwargs):
"""
Returns a FormSet class for use on the changelist page if list_editable
is used.
"""
defaults = {
"formfield_callback": curry(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
return modelformset_factory(self.model,
self.get_changelist_form(request), extra=0,
fields=self.list_editable, **defaults)
def get_formsets(self, request, obj=None):
for inline in self.inline_instances:
yield inline.get_formset(request, obj)
def get_paginator(self, request, queryset, per_page, orphans=0, allow_empty_first_page=True):
return self.paginator(queryset, per_page, orphans, allow_empty_first_page)
def log_addition(self, request, object):
"""
Log that an object has been successfully added.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, ADDITION
LogEntry.objects.log_action(
user_id = request.user.pk,
content_type_id = ContentType.objects.get_for_model(object).pk,
object_id = object.pk,
object_repr = force_unicode(object),
action_flag = ADDITION
)
def log_change(self, request, object, message):
"""
Log that an object has been successfully changed.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, CHANGE
LogEntry.objects.log_action(
user_id = request.user.pk,
content_type_id = ContentType.objects.get_for_model(object).pk,
object_id = object.pk,
object_repr = force_unicode(object),
action_flag = CHANGE,
change_message = message
)
def log_deletion(self, request, object, object_repr):
"""
Log that an object will be deleted. Note that this method is called
before the deletion.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, DELETION
LogEntry.objects.log_action(
user_id = request.user.id,
content_type_id = ContentType.objects.get_for_model(self.model).pk,
object_id = object.pk,
object_repr = object_repr,
action_flag = DELETION
)
def action_checkbox(self, obj):
"""
A list_display column containing a checkbox widget.
"""
return helpers.checkbox.render(helpers.ACTION_CHECKBOX_NAME, force_unicode(obj.pk))
action_checkbox.short_description = mark_safe('<input type="checkbox" id="action-toggle" />')
action_checkbox.allow_tags = True
def get_actions(self, request):
"""
Return a dictionary mapping the names of all actions for this
ModelAdmin to a tuple of (callable, name, description) for each action.
"""
# If self.actions is explicitally set to None that means that we don't
# want *any* actions enabled on this page.
if self.actions is None:
return []
actions = []
# Gather actions from the admin site first
for (name, func) in self.admin_site.actions:
description = getattr(func, 'short_description', name.replace('_', ' '))
actions.append((func, name, description))
# Then gather them from the model admin and all parent classes,
# starting with self and working back up.
for klass in self.__class__.mro()[::-1]:
class_actions = getattr(klass, 'actions', [])
# Avoid trying to iterate over None
if not class_actions:
continue
actions.extend([self.get_action(action) for action in class_actions])
# get_action might have returned None, so filter any of those out.
actions = filter(None, actions)
# Convert the actions into a SortedDict keyed by name
# and sorted by description.
actions.sort(key=lambda k: k[2].lower())
actions = SortedDict([
(name, (func, name, desc))
for func, name, desc in actions
])
return actions
def get_action_choices(self, request, default_choices=BLANK_CHOICE_DASH):
"""
Return a list of choices for use in a form object. Each choice is a
tuple (name, description).
"""
choices = [] + default_choices
for func, name, description in self.get_actions(request).itervalues():
choice = (name, description % model_format_dict(self.opts))
choices.append(choice)
return choices
def get_action(self, action):
"""
Return a given action from a parameter, which can either be a callable,
or the name of a method on the ModelAdmin. Return is a tuple of
(callable, name, description).
"""
# If the action is a callable, just use it.
if callable(action):
func = action
action = action.__name__
# Next, look for a method. Grab it off self.__class__ to get an unbound
# method instead of a bound one; this ensures that the calling
# conventions are the same for functions and methods.
elif hasattr(self.__class__, action):
func = getattr(self.__class__, action)
# Finally, look for a named method on the admin site
else:
try:
func = self.admin_site.get_action(action)
except KeyError:
return None
if hasattr(func, 'short_description'):
description = func.short_description
else:
description = capfirst(action.replace('_', ' '))
return func, action, description
def construct_change_message(self, request, form, formsets):
"""
Construct a change message from a changed object.
"""
change_message = []
if form.changed_data:
change_message.append(_('Changed %s.') % get_text_list(form.changed_data, _('and')))
if formsets:
for formset in formsets:
for added_object in formset.new_objects:
change_message.append(_('Added %(name)s "%(object)s".')
% {'name': force_unicode(added_object._meta.verbose_name),
'object': force_unicode(added_object)})
for changed_object, changed_fields in formset.changed_objects:
change_message.append(_('Changed %(list)s for %(name)s "%(object)s".')
% {'list': get_text_list(changed_fields, _('and')),
'name': force_unicode(changed_object._meta.verbose_name),
'object': force_unicode(changed_object)})
for deleted_object in formset.deleted_objects:
change_message.append(_('Deleted %(name)s "%(object)s".')
% {'name': force_unicode(deleted_object._meta.verbose_name),
'object': force_unicode(deleted_object)})
change_message = ' '.join(change_message)
return change_message or _('No fields changed.')
def message_user(self, request, message):
"""
Send a message to the user. The default implementation
posts a message using the django.contrib.messages backend.
"""
messages.info(request, message)
def save_form(self, request, form, change):
"""
Given a ModelForm return an unsaved instance. ``change`` is True if
the object is being changed, and False if it's being added.
"""
return form.save(commit=False)
def save_model(self, request, obj, form, change):
"""
Given a model instance save it to the database.
"""
obj.save()
def delete_model(self, requet, obj):
"""
Given a model instance delete it from the database.
"""
obj.delete()
def save_formset(self, request, form, formset, change):
"""
Given an inline formset save it to the database.
"""
formset.save()
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
opts = self.model._meta
app_label = opts.app_label
ordered_objects = opts.get_ordered_objects()
context.update({
'add': add,
'change': change,
'has_add_permission': self.has_add_permission(request),
'has_change_permission': self.has_change_permission(request, obj),
'has_delete_permission': self.has_delete_permission(request, obj),
'has_file_field': True, # FIXME - this should check if form or formsets have a FileField,
'has_absolute_url': hasattr(self.model, 'get_absolute_url'),
'ordered_objects': ordered_objects,
'form_url': mark_safe(form_url),
'opts': opts,
'content_type_id': ContentType.objects.get_for_model(self.model).id,
'save_as': self.save_as,
'save_on_top': self.save_on_top,
'root_path': self.admin_site.root_path,
})
if add and self.add_form_template is not None:
form_template = self.add_form_template
else:
form_template = self.change_form_template
context_instance = template.RequestContext(request, current_app=self.admin_site.name)
return render_to_response(form_template or [
"admin/%s/%s/change_form.html" % (app_label, opts.object_name.lower()),
"admin/%s/change_form.html" % app_label,
"admin/change_form.html"
], context, context_instance=context_instance)
def response_add(self, request, obj, post_url_continue='../%s/'):
"""
Determines the HttpResponse for the add_view stage.
"""
opts = obj._meta
pk_value = obj._get_pk_val()
msg = _('The %(name)s "%(obj)s" was added successfully.') % {'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj)}
# Here, we distinguish between different save types by checking for
# the presence of keys in request.POST.
if "_continue" in request.POST:
self.message_user(request, msg + ' ' + _("You may edit it again below."))
if "_popup" in request.POST:
post_url_continue += "?_popup=1"
return HttpResponseRedirect(post_url_continue % pk_value)
if "_popup" in request.POST:
return HttpResponse('<script type="text/javascript">opener.dismissAddAnotherPopup(window, "%s", "%s");</script>' % \
# escape() calls force_unicode.
(escape(pk_value), escape(obj)))
elif "_addanother" in request.POST:
self.message_user(request, msg + ' ' + (_("You may add another %s below.") % force_unicode(opts.verbose_name)))
return HttpResponseRedirect(request.path)
else:
self.message_user(request, msg)
# Figure out where to redirect. If the user has change permission,
# redirect to the change-list page for this object. Otherwise,
# redirect to the admin index.
if self.has_change_permission(request, None):
post_url = '../'
else:
post_url = '../../../'
return HttpResponseRedirect(post_url)
def response_change(self, request, obj):
"""
Determines the HttpResponse for the change_view stage.
"""
opts = obj._meta
pk_value = obj._get_pk_val()
msg = _('The %(name)s "%(obj)s" was changed successfully.') % {'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj)}
if "_continue" in request.POST:
self.message_user(request, msg + ' ' + _("You may edit it again below."))
if "_popup" in request.REQUEST:
return HttpResponseRedirect(request.path + "?_popup=1")
else:
return HttpResponseRedirect(request.path)
elif "_saveasnew" in request.POST:
msg = _('The %(name)s "%(obj)s" was added successfully. You may edit it again below.') % {'name': force_unicode(opts.verbose_name), 'obj': obj}
self.message_user(request, msg)
return HttpResponseRedirect("../%s/" % pk_value)
elif "_addanother" in request.POST:
self.message_user(request, msg + ' ' + (_("You may add another %s below.") % force_unicode(opts.verbose_name)))
return HttpResponseRedirect("../add/")
else:
self.message_user(request, msg)
return HttpResponseRedirect("../")
def response_action(self, request, queryset):
"""
Handle an admin action. This is called if a request is POSTed to the
changelist; it returns an HttpResponse if the action was handled, and
None otherwise.
"""
# There can be multiple action forms on the page (at the top
# and bottom of the change list, for example). Get the action
# whose button was pushed.
try:
action_index = int(request.POST.get('index', 0))
except ValueError:
action_index = 0
# Construct the action form.
data = request.POST.copy()
data.pop(helpers.ACTION_CHECKBOX_NAME, None)
data.pop("index", None)
# Use the action whose button was pushed
try:
data.update({'action': data.getlist('action')[action_index]})
except IndexError:
# If we didn't get an action from the chosen form that's invalid
# POST data, so by deleting action it'll fail the validation check
# below. So no need to do anything here
pass
action_form = self.action_form(data, auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
# If the form's valid we can handle the action.
if action_form.is_valid():
action = action_form.cleaned_data['action']
select_across = action_form.cleaned_data['select_across']
func, name, description = self.get_actions(request)[action]
# Get the list of selected PKs. If nothing's selected, we can't
# perform an action on it, so bail. Except we want to perform
# the action explicitly on all objects.
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
if not selected and not select_across:
# Reminder that something needs to be selected or nothing will happen
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg)
return None
if not select_across:
# Perform the action only on the selected objects
queryset = queryset.filter(pk__in=selected)
response = func(self, request, queryset)
# Actions may return an HttpResponse, which will be used as the
# response from the POST. If not, we'll be a good little HTTP
# citizen and redirect back to the changelist page.
if isinstance(response, HttpResponse):
return response
else:
return HttpResponseRedirect(request.get_full_path())
else:
msg = _("No action selected.")
self.message_user(request, msg)
return None
@csrf_protect_m
@transaction.commit_on_success
def add_view(self, request, form_url='', extra_context=None):
"The 'add' admin view for this model."
model = self.model
opts = model._meta
if not self.has_add_permission(request):
raise PermissionDenied
ModelForm = self.get_form(request)
formsets = []
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES)
if form.is_valid():
new_object = self.save_form(request, form, change=False)
form_validated = True
else:
form_validated = False
new_object = self.model()
prefixes = {}
for FormSet, inline in zip(self.get_formsets(request), self.inline_instances):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset = FormSet(data=request.POST, files=request.FILES,
instance=new_object,
save_as_new="_saveasnew" in request.POST,
prefix=prefix, queryset=inline.queryset(request))
formsets.append(formset)
if all_valid(formsets) and form_validated:
self.save_model(request, new_object, form, change=False)
form.save_m2m()
for formset in formsets:
self.save_formset(request, form, formset, change=False)
self.log_addition(request, new_object)
return self.response_add(request, new_object)
else:
# Prepare the dict of initial data from the request.
# We have to special-case M2Ms as a list of comma-separated PKs.
initial = dict(request.GET.items())
for k in initial:
try:
f = opts.get_field(k)
except models.FieldDoesNotExist:
continue
if isinstance(f, models.ManyToManyField):
initial[k] = initial[k].split(",")
form = ModelForm(initial=initial)
prefixes = {}
for FormSet, inline in zip(self.get_formsets(request),
self.inline_instances):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset = FormSet(instance=self.model(), prefix=prefix,
queryset=inline.queryset(request))
formsets.append(formset)
adminForm = helpers.AdminForm(form, list(self.get_fieldsets(request)),
self.prepopulated_fields, self.get_readonly_fields(request),
model_admin=self)
media = self.media + adminForm.media
inline_admin_formsets = []
for inline, formset in zip(self.inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request))
readonly = list(inline.get_readonly_fields(request))
inline_admin_formset = helpers.InlineAdminFormSet(inline, formset,
fieldsets, readonly, model_admin=self)
inline_admin_formsets.append(inline_admin_formset)
media = media + inline_admin_formset.media
context = {
'title': _('Add %s') % force_unicode(opts.verbose_name),
'adminform': adminForm,
'is_popup': "_popup" in request.REQUEST,
'show_delete': False,
'media': mark_safe(media),
'inline_admin_formsets': inline_admin_formsets,
'errors': helpers.AdminErrorList(form, formsets),
'root_path': self.admin_site.root_path,
'app_label': opts.app_label,
}
context.update(extra_context or {})
return self.render_change_form(request, context, form_url=form_url, add=True)
@csrf_protect_m
@transaction.commit_on_success
def change_view(self, request, object_id, extra_context=None):
"The 'change' admin view for this model."
model = self.model
opts = model._meta
obj = self.get_object(request, unquote(object_id))
if not self.has_change_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})
if request.method == 'POST' and "_saveasnew" in request.POST:
return self.add_view(request, form_url='../add/')
ModelForm = self.get_form(request, obj)
formsets = []
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES, instance=obj)
if form.is_valid():
form_validated = True
new_object = self.save_form(request, form, change=True)
else:
form_validated = False
new_object = obj
prefixes = {}
for FormSet, inline in zip(self.get_formsets(request, new_object),
self.inline_instances):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset = FormSet(request.POST, request.FILES,
instance=new_object, prefix=prefix,
queryset=inline.queryset(request))
formsets.append(formset)
if all_valid(formsets) and form_validated:
self.save_model(request, new_object, form, change=True)
form.save_m2m()
for formset in formsets:
self.save_formset(request, form, formset, change=True)
change_message = self.construct_change_message(request, form, formsets)
self.log_change(request, new_object, change_message)
return self.response_change(request, new_object)
else:
form = ModelForm(instance=obj)
prefixes = {}
for FormSet, inline in zip(self.get_formsets(request, obj), self.inline_instances):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset = FormSet(instance=obj, prefix=prefix,
queryset=inline.queryset(request))
formsets.append(formset)
adminForm = helpers.AdminForm(form, self.get_fieldsets(request, obj),
self.prepopulated_fields, self.get_readonly_fields(request, obj),
model_admin=self)
media = self.media + adminForm.media
inline_admin_formsets = []
for inline, formset in zip(self.inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request, obj))
readonly = list(inline.get_readonly_fields(request, obj))
inline_admin_formset = helpers.InlineAdminFormSet(inline, formset,
fieldsets, readonly, model_admin=self)
inline_admin_formsets.append(inline_admin_formset)
media = media + inline_admin_formset.media
context = {
'title': _('Change %s') % force_unicode(opts.verbose_name),
'adminform': adminForm,
'object_id': object_id,
'original': obj,
'is_popup': "_popup" in request.REQUEST,
'media': mark_safe(media),
'inline_admin_formsets': inline_admin_formsets,
'errors': helpers.AdminErrorList(form, formsets),
'root_path': self.admin_site.root_path,
'app_label': opts.app_label,
}
context.update(extra_context or {})
return self.render_change_form(request, context, change=True, obj=obj)
@csrf_protect_m
def changelist_view(self, request, extra_context=None):
"The 'change list' admin view for this model."
from django.contrib.admin.views.main import ERROR_FLAG
opts = self.model._meta
app_label = opts.app_label
if not self.has_change_permission(request, None):
raise PermissionDenied
# Check actions to see if any are available on this changelist
actions = self.get_actions(request)
# Remove action checkboxes if there aren't any actions available.
list_display = list(self.list_display)
if not actions:
try:
list_display.remove('action_checkbox')
except ValueError:
pass
ChangeList = self.get_changelist(request)
try:
cl = ChangeList(request, self.model, list_display, self.list_display_links,
self.list_filter, self.date_hierarchy, self.search_fields,
self.list_select_related, self.list_per_page, self.list_editable, self)
except IncorrectLookupParameters:
# Wacky lookup parameters were given, so redirect to the main
# changelist page, without parameters, and pass an 'invalid=1'
# parameter via the query string. If wacky parameters were given
# and the 'invalid=1' parameter was already in the query string,
# something is screwed up with the database, so display an error
# page.
if ERROR_FLAG in request.GET.keys():
return render_to_response('admin/invalid_setup.html', {'title': _('Database error')})
return HttpResponseRedirect(request.path + '?' + ERROR_FLAG + '=1')
# If the request was POSTed, this might be a bulk action or a bulk
# edit. Try to look up an action or confirmation first, but if this
# isn't an action the POST will fall through to the bulk edit check,
# below.
action_failed = False
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
# Actions with no confirmation
if (actions and request.method == 'POST' and
'index' in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request, queryset=cl.get_query_set())
if response:
return response
else:
action_failed = True
else:
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg)
action_failed = True
# Actions with confirmation
if (actions and request.method == 'POST' and
helpers.ACTION_CHECKBOX_NAME in request.POST and
'index' not in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request, queryset=cl.get_query_set())
if response:
return response
else:
action_failed = True
# If we're allowing changelist editing, we need to construct a formset
# for the changelist given all the fields to be edited. Then we'll
# use the formset to validate/process POSTed data.
formset = cl.formset = None
# Handle POSTed bulk-edit data.
if (request.method == "POST" and self.list_editable and
'_save' in request.POST and not action_failed):
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(request.POST, request.FILES, queryset=cl.result_list)
if formset.is_valid():
changecount = 0
for form in formset.forms:
if form.has_changed():
obj = self.save_form(request, form, change=True)
self.save_model(request, obj, form, change=True)
form.save_m2m()
change_msg = self.construct_change_message(request, form, None)
self.log_change(request, obj, change_msg)
changecount += 1
if changecount:
if changecount == 1:
name = force_unicode(opts.verbose_name)
else:
name = force_unicode(opts.verbose_name_plural)
msg = ungettext("%(count)s %(name)s was changed successfully.",
"%(count)s %(name)s were changed successfully.",
changecount) % {'count': changecount,
'name': name,
'obj': force_unicode(obj)}
self.message_user(request, msg)
return HttpResponseRedirect(request.get_full_path())
# Handle GET -- construct a formset for display.
elif self.list_editable:
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(queryset=cl.result_list)
# Build the list of media to be used by the formset.
if formset:
media = self.media + formset.media
else:
media = self.media
# Build the action form and populate it with available actions.
if actions:
action_form = self.action_form(auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
else:
action_form = None
selection_note_all = ungettext('%(total_count)s selected',
'All %(total_count)s selected', cl.result_count)
context = {
'module_name': force_unicode(opts.verbose_name_plural),
'selection_note': _('0 of %(cnt)s selected') % {'cnt': len(cl.result_list)},
'selection_note_all': selection_note_all % {'total_count': cl.result_count},
'title': cl.title,
'is_popup': cl.is_popup,
'cl': cl,
'media': media,
'has_add_permission': self.has_add_permission(request),
'root_path': self.admin_site.root_path,
'app_label': app_label,
'action_form': action_form,
'actions_on_top': self.actions_on_top,
'actions_on_bottom': self.actions_on_bottom,
'actions_selection_counter': self.actions_selection_counter,
}
context.update(extra_context or {})
context_instance = template.RequestContext(request, current_app=self.admin_site.name)
return render_to_response(self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_label, opts.object_name.lower()),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'
], context, context_instance=context_instance)
@csrf_protect_m
@transaction.commit_on_success
def delete_view(self, request, object_id, extra_context=None):
"The 'delete' admin view for this model."
opts = self.model._meta
app_label = opts.app_label
obj = self.get_object(request, unquote(object_id))
if not self.has_delete_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})
using = router.db_for_write(self.model)
# Populate deleted_objects, a data structure of all related objects that
# will also be deleted.
(deleted_objects, perms_needed) = get_deleted_objects(
[obj], opts, request.user, self.admin_site, using)
if request.POST: # The user has already confirmed the deletion.
if perms_needed:
raise PermissionDenied
obj_display = force_unicode(obj)
self.log_deletion(request, obj, obj_display)
self.delete_model(request, obj)
self.message_user(request, _('The %(name)s "%(obj)s" was deleted successfully.') % {'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj_display)})
if not self.has_change_permission(request, None):
return HttpResponseRedirect("../../../../")
return HttpResponseRedirect("../../")
context = {
"title": _("Are you sure?"),
"object_name": force_unicode(opts.verbose_name),
"object": obj,
"deleted_objects": deleted_objects,
"perms_lacking": perms_needed,
"opts": opts,
"root_path": self.admin_site.root_path,
"app_label": app_label,
}
context.update(extra_context or {})
context_instance = template.RequestContext(request, current_app=self.admin_site.name)
return render_to_response(self.delete_confirmation_template or [
"admin/%s/%s/delete_confirmation.html" % (app_label, opts.object_name.lower()),
"admin/%s/delete_confirmation.html" % app_label,
"admin/delete_confirmation.html"
], context, context_instance=context_instance)
def history_view(self, request, object_id, extra_context=None):
"The 'history' admin view for this model."
from django.contrib.admin.models import LogEntry
model = self.model
opts = model._meta
app_label = opts.app_label
action_list = LogEntry.objects.filter(
object_id = object_id,
content_type__id__exact = ContentType.objects.get_for_model(model).id
).select_related().order_by('action_time')
# If no history was found, see whether this object even exists.
obj = get_object_or_404(model, pk=unquote(object_id))
context = {
'title': _('Change history: %s') % force_unicode(obj),
'action_list': action_list,
'module_name': capfirst(force_unicode(opts.verbose_name_plural)),
'object': obj,
'root_path': self.admin_site.root_path,
'app_label': app_label,
}
context.update(extra_context or {})
context_instance = template.RequestContext(request, current_app=self.admin_site.name)
return render_to_response(self.object_history_template or [
"admin/%s/%s/object_history.html" % (app_label, opts.object_name.lower()),
"admin/%s/object_history.html" % app_label,
"admin/object_history.html"
], context, context_instance=context_instance)
class InlineModelAdmin(BaseModelAdmin):
"""
Options for inline editing of ``model`` instances.
Provide ``name`` to specify the attribute name of the ``ForeignKey`` from
``model`` to its parent. This is required if ``model`` has more than one
``ForeignKey`` to its parent.
"""
model = None
fk_name = None
formset = BaseInlineFormSet
extra = 3
max_num = None
template = None
verbose_name = None
verbose_name_plural = None
can_delete = True
def __init__(self, parent_model, admin_site):
self.admin_site = admin_site
self.parent_model = parent_model
self.opts = self.model._meta
super(InlineModelAdmin, self).__init__()
if self.verbose_name is None:
self.verbose_name = self.model._meta.verbose_name
if self.verbose_name_plural is None:
self.verbose_name_plural = self.model._meta.verbose_name_plural
def _media(self):
from django.conf import settings
js = ['js/jquery.min.js', 'js/jquery.init.js', 'js/inlines.min.js']
if self.prepopulated_fields:
js.append('js/urlify.js')
js.append('js/prepopulate.min.js')
if self.filter_vertical or self.filter_horizontal:
js.extend(['js/SelectBox.js' , 'js/SelectFilter2.js'])
return forms.Media(js=['%s%s' % (settings.ADMIN_MEDIA_PREFIX, url) for url in js])
media = property(_media)
def get_formset(self, request, obj=None, **kwargs):
"""Returns a BaseInlineFormSet class for use in admin add/change views."""
if self.declared_fieldsets:
fields = flatten_fieldsets(self.declared_fieldsets)
else:
fields = None
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(kwargs.get("exclude", []))
exclude.extend(self.get_readonly_fields(request, obj))
# if exclude is an empty list we use None, since that's the actual
# default
exclude = exclude or None
defaults = {
"form": self.form,
"formset": self.formset,
"fk_name": self.fk_name,
"fields": fields,
"exclude": exclude,
"formfield_callback": curry(self.formfield_for_dbfield, request=request),
"extra": self.extra,
"max_num": self.max_num,
"can_delete": self.can_delete,
}
defaults.update(kwargs)
return inlineformset_factory(self.parent_model, self.model, **defaults)
def get_fieldsets(self, request, obj=None):
if self.declared_fieldsets:
return self.declared_fieldsets
form = self.get_formset(request).form
fields = form.base_fields.keys() + list(self.get_readonly_fields(request, obj))
return [(None, {'fields': fields})]
class StackedInline(InlineModelAdmin):
template = 'admin/edit_inline/stacked.html'
class TabularInline(InlineModelAdmin):
template = 'admin/edit_inline/tabular.html'
| apache-2.0 |
frostidaho/qtile | libqtile/widget/moc.py | 6 | 4530 | # -*- coding: utf-8 -*-
# Copyright (C) 2015, zordsdavini
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from . import base
import os
import subprocess
class Moc(base.ThreadPoolText):
"""A simple MOC widget.
Show the artist and album of now listening song and allow basic mouse
control from the bar:
- toggle pause (or play if stopped) on left click;
- skip forward in playlist on scroll up;
- skip backward in playlist on scroll down.
MOC (http://moc.daper.net) should be installed.
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
('play_color', '00ff00', 'Text colour when playing.'),
('noplay_color', 'cecece', 'Text colour when not playing.'),
('max_chars', 0, 'Maximum number of characters to display in widget.'),
('update_interval', 0.5, 'Update Time in seconds.'),
]
def __init__(self, **config):
base.ThreadPoolText.__init__(self, "", **config)
self.add_defaults(Moc.defaults)
self.status = ""
self.local = None
def get_info(self):
"""Return a dictionary with info about the current MOC status."""
try:
output = self.call_process(['mocp', '-i'])
except subprocess.CalledProcessError as err:
output = err.output.decode()
if output.startswith("State"):
output = output.splitlines()
info = {'State': "",
'File': "",
'SongTitle': "",
'Artist': "",
'Album': ""}
for line in output:
for data in info:
if data in line:
info[data] = line[len(data) + 2:].strip()
break
return info
def now_playing(self):
"""Return a string with the now playing info (Artist - Song Title)."""
info = self.get_info()
now_playing = ""
if info:
status = info['State']
if self.status != status:
self.status = status
if self.status == "PLAY":
self.layout.colour = self.play_color
else:
self.layout.colour = self.noplay_color
title = info['SongTitle']
artist = info['Artist']
if title and artist:
now_playing = "♫ {0} - {1}".format(artist, title)
elif title:
now_playing = "♫ {0}".format(title)
else:
basename = os.path.basename(info['File'])
filename = os.path.splitext(basename)[0]
now_playing = "♫ {0}".format(filename)
if self.status == "STOP":
now_playing = "♫"
return now_playing
def update(self, text):
"""Update the text box."""
old_width = self.layout.width
if not self.status:
return
if len(text) > self.max_chars > 0:
text = text[:self.max_chars] + "…"
self.text = text
if self.layout.width == old_width:
self.draw()
else:
self.bar.draw()
def poll(self):
"""Poll content for the text box."""
return self.now_playing()
def button_press(self, x, y, button):
"""What to do when press a mouse button over the MOC widget.
Will:
- toggle pause (or play if stopped) on left click;
- skip forward in playlist on scroll up;
- skip backward in playlist on scroll down.
"""
if button == 1:
if self.status in ('PLAY', 'PAUSE'):
subprocess.Popen(['mocp', '-G'])
elif self.status == 'STOP':
subprocess.Popen(['mocp', '-p'])
elif button == 4:
subprocess.Popen(['mocp', '-f'])
elif button == 5:
subprocess.Popen(['mocp', '-r'])
| mit |
usc-isi-i2/lsh-linking | swoosh/evaluate.py | 1 | 1496 | from evaluator import evaluator
class evaluate:
def __init__(self, num, lencora, resultfile, answerfile):
self.num = num
self.lencora = lencora
fp = open('logs/merge_log.txt')
self.pairs = [line.strip().split('-') for line in fp]
self.marks = [0 for _ in xrange(lencora)]
self.clusters = [set() for _ in xrange(num)]
self.setnum = 0
self.resultfile = resultfile
self.answerfile = answerfile
fp.close()
def _bfs(self, x):
if (self.marks[x] == 1):
return
self.marks[x] = 1
self.clusters[self.setnum].add(x)
neighbors = set()
for pair in self.pairs:
if (pair[0] == str(x)):
neighbors.add(int(pair[1]))
if (pair[1] == str(x)):
neighbors.add(int(pair[0]))
for neighbor in neighbors:
self._bfs(neighbor)
def do(self):
i = 0
while (i < self.lencora):
if (self.marks[i] == 0):
self._bfs(i)
self.setnum += 1
i += 1
s = 0
res = []
fp = open(self.resultfile, 'w+')
for i in xrange(len(self.clusters)):
s += len(self.clusters[i])
tmp = list(self.clusters[i])
res.append(tmp)
for j in xrange(len(tmp)):
if (j == len(tmp) - 1):
fp.write(str(tmp[j]) + '\n')
else:
fp.write(str(tmp[j]) + ' ')
fp.close()
fp = open(self.answerfile)
lines = [line.strip().split() for line in fp]
ans = []
for line in lines:
tmp = []
for j in xrange(len(line)):
tmp.append(int(line[j]))
ans.append(tmp)
ev = evaluator(self.lencora)
ev.load_answer_clusters(ans)
ev.evaluate_clusters(res)
| apache-2.0 |
davipeterlini/routeflow_oficial | pox/pox/lib/graph/graph.py | 26 | 17844 | # Copyright 2011 James McCauley
# Copyright 2012 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
#import networkx as nx
import pox.lib.graph.minigraph as nx
from collections import defaultdict
from copy import copy
LINK = 'link'
class Link (object):
def reorder (self, l):
"""
Flips a list of Links so that this node is first in each
"""
return Link.order(l, self)
@staticmethod
def order (links, n):
"""
Give a list of Links that each contain node n, flips any links so
that n is always the first element of the link.
"""
r = []
for l in links:
assert n in l
if l._n[0] == n:
r.append(l)
else:
r.append(l.flip())
return r
def __init__ (self, np1, np2):
self._n = [np1[0],np2[0]]
self._p = [np1[1],np2[1]]
def _index (self, i):
if i in self._n:
i = self._n.index(i)
assert i == 0 or i == 1
return i
def flip (self):
"""
Returns the same link, but flipped (a,b) becomes (b,a)
"""
return Link(self[1], self[0])
def port (self, n):
return self._p[_index(n)]
def other_port (self, n):
"""
Returns the other end's port.
See other().
"""
return self.other(n)[1]
def other (self, n):
"""
Returns the other end of a link.
Given a node or (node,port) that is part of this link, it returns
the opposite end's (node,port).
"""
if type(n) is tuple:
if self[0] == n:
return self[1]
assert self[1] == n
return self[0]
if self[0][0] == n:
return self[1]
assert self[1][0] == n
return self[0]
def __contains__ (self, n):
"""
Does this link contain (node,port) or node?
"""
if type(n) is tuple:
return n in [self[0], self[1]]
else:
return n in [self._n]
def __len__ (self):
return 2
def __getitem__ (self, i):
"""
Gets (node,port) based on index
"""
i = self._index(i)
return (self._n[i], self._p[i])
def __repr__ (self):
return "Link(%s, %s)" % (self[0], self[1])
class Node (object):
pass
#TODO: Add back in some convenience methods that call real methods
# on the parent graph? Or just remove?
def _void ():
return None
class LeaveException (RuntimeError):
pass
class Operator (object):
def __repr__ (self):
return "<%s>" % (self.__class__.__name__)
class Literal (Operator):
def __init__ (self, v):
self._v = v
def __call__ (self, n, li=None):
return self._v
def __repr__ (self):
return repr(self._v)
class Anything (Operator):
def __call__ (self, n, li):
return True
def __repr__ (self):
return "Anything"
class Self (Operator):
def __call__ (self, n, li=None):
return n
def __repr__ (self):
return "Self"
class Port (Operator):
def __call__ (self, n, li):
if li is None:
raise RuntimeError("You can only use Port for link queries")
return li[0][1]
def __repr__ (self):
return "Port"
class OtherPort (Operator):
def __call__ (self, n, li):
if li is None:
raise RuntimeError("You can only use OtherPort for link queries")
return li[1][1]
def __repr__ (self):
return "OtherPort"
class Other (Operator):
def __call__ (self, n, li):
if li is None:
raise RuntimeError("You can only use Other for link queries")
return li[1][0]
def __repr__ (self):
return "Other"
class Call (Operator):
def __init__ (_self, *arg, **kw):
_self._arg = []
for v in arg:
ao = None
if isinstance(v, Operator):
ao = v
else:
ao = Literal(v)
_self._arg.append(ao)
_self._kw = {}
for k,v in kw.iteritems():
ao = None
if isinstance(v, Operator):
ao = v
else:
ao = Literal(v)
_self._kw[k].append(ao)
def __call__ (self, n, li):
arglist = []
for arg in self._arg:
arglist.append(arg(n,li))
kws = {}
for k,v in self._kw.iteritems():
kws[k] = v(n)
func = arglist.pop(0)
return func(*arglist, **kws)
def __repr__ (self):
r = str(self._arg[0])
args = [str(s) for s in self._arg[1:]]
args.append(["%s=%s" % (k,str(v)) for k,v in self._kw])
return "%s(%s)" % (self._arg[0], ', '.join(args))
class UnaryOp (Operator):
def __init__ (self, operand):
if isinstance(operand, Operator):
self._operand = operand
else:
self._operand = Literal(operand)
def __call__ (self, n, li):
a = self._operand(n, li)
return self._apply(a)
def _apply (self, attr):
raise RuntimeError("Unimplemented")
class BinaryOp (Operator):
def __init__ (self, left, right):
if isinstance(left, Operator):
self._left = left
else:
self._left = Literal(left)
if isinstance(right, Operator):
self._right = right
else:
self._right = Literal(right)
def __call__ (self, n, li):
l = self._left(n, li)
r = self._right(n, li)
return self._apply(l, r)
def _apply (self, l, r):
raise RuntimeError("Unimplemented")
def __repr__ (self):
if hasattr(self, '_symbol'):
return "%s %s %s" % (self._left, self._symbol, self._right)
else:
return "%s(%s, %s)" % (self.__class__.__name__, self._left, self._right)
class Or (BinaryOp):
_symbol = "or"
def _apply (self, l, r):
return l or r
class And (BinaryOp):
_symbol = "and"
def _apply (self, l, r):
return l and r
class LessThan (BinaryOp):
_symbol = "<"
def _apply (self, value):
return value < self._value
class GreaterThan (BinaryOp):
_symbol = ">"
def _apply (self, l, r):
return value > self._value
class LessThanEqualTo (BinaryOp):
_symbol = "<="
def _apply (self, l, r):
return value <= self._value
class GreaterThanEqualTo (BinaryOp):
_symbol = "=>"
def _apply (self, l, r):
return value > self._value
class Not (UnaryOp):
def _apply (self, v):
return not v
def __repr__ (self):
return "(Not %s)" % (self._operand,)
class Length (UnaryOp):
def _apply (self, v):
return len(v)
def __repr__ (self):
return "len(%s)" % (self._operand,)
class Index (BinaryOp):
def _apply (self, l, r):
return l[r]
def __repr__ (self):
return "%s[%s]" % (self._left, self._right)
_dummy = object()
class NodeOp (Operator):
"""
Can be a binary operator, or if only one argument supplied, the
left one defaults to the node.
"""
def __init__ (self, left, right=_dummy):
if right is _dummy:
right = left
left = Self()
if isinstance(left, Operator):
self._left = left
else:
self._left = Literal(left)
if isinstance(right, Operator):
self._right = right
else:
self._right = Literal(right)
def __call__ (self, n, li):
l = self._left(n, li)
r = self._right(n, li)
return self._apply(l, r)
def _apply (self, l, r):
raise RuntimeError("Unimplemented")
def __repr__ (self):
if hasattr(self, '_symbol'):
return "%s %s %s" % (self._left, self._symbol, self._right)
else:
return "%s(%s, %s)" % (self.__class__.__name__, self._left, self._right)
class Equal (NodeOp):
_symbol = "=="
def _apply (self, l, r):
#print "???", repr(l), repr(r), l == r
return l == r
class Is (NodeOp):
_symbol = "is"
def _apply (self, l, r):
return l is r
class Field (NodeOp):
def __init__ (self, left, right=_dummy, optional=True):
NodeOp.__init__(self, left, right)
self._optional = optional
def _apply (self, l, r):
#print ">>",self._attr_name,hasattr(n, self._attr_name)
do_call = r.endswith("()")
if do_call: r = r[:-2]
if not hasattr(l, r) and self._optional:
raise LeaveException
a = getattr(l, r)
if do_call: a = a()
#print ">>>",a
return a
F = Field # Short alias
class IsInstance (NodeOp):
def _apply (self, l, r):
return isinstance(l, r)
def __repr__ (self):
return "isinstance(%s, %s)" % (self._left, self._right)
class IsType (NodeOp):
def _apply (self, l, r):
if isinstance(r, str):
return type(l).__name__ == r
return type(l) is r
def __repr__ (self):
return "type(%s) == %s" % (self._left, self._right)
class ConnectedTo (NodeOp):
def _apply (self, l, r):
return l.connected_to(r)
def __repr__ (self):
return "%s.connected_to(%s)" % (self._left, self._right)
class InValues (BinaryOp):
def __init__ (self, left, right):
super(Member, self).__init__(left, right)
self._optional = optional
def _apply (self, l, r):
return l in r.values()
class In (BinaryOp):
def _apply (self, l, r):
return l in r
class Member (BinaryOp):
_symbol = "."
def __init__ (self, left, right, optional = True):
super(Member, self).__init__(left, right)
self._optional = optional
def _apply (self, l, r):
if not hasattr(l, r) and self._optional:
raise LeaveException
return getattr(l, r)
class Graph (object):
def __init__ (self):
self._g = nx.MultiGraph()
self.node_port = {}
def __contains__ (self, n):
return n in self._g
def add (self, node):
self._g.add_node(node)
self.node_port[node] = {}
def remove (self, node):
self._g.remove_node(node)
def neighbors (self, n):
return self._g.neighbors(n)
def find_port (self, node1, node2):
for n1, n2, k, d in self._g.edges([node1, node2], data=True, keys=True):
return (d[LINK][node1][1], d[LINK][node2][1])
return None
def connected(self, node1, node2):
return (self.find_port(node1, node2) != None)
def disconnect_port (self, np):
"""
Disconnects the given (node,port)
"""
assert type(np) is tuple
remove = []
if self.port_for_node(np[0], np[1]) is None:
return 0
for n1,n2,k,d in self._g.edges([np[0], self.node_port[np[0]][np[1]][0]], data=True, keys=True):
if np in d[LINK]:
remove.append((n1,n2,k))
del self.node_port[n1][d[LINK][n1][1]]
del self.node_port[n2][d[LINK][n2][1]]
for e in remove:
#print "remove",e
self._g.remove_edge(*e)
return len(remove)
def unlink (self, np1, np2):
count = 0
if isinstance(np1, tuple):
count = disconnect_port(np1)
elif isinstance(np2, tuple):
count = disconnect_port(np2)
else:
for n1, n2, k, d in self._g.edges([np1, np2], data=True, keys=True):
self._g.remove_edge(n1,n2,k)
del self.node_port[n1][d[LINK][n1][1]]
del self.node_port[n2][d[LINK][n2][1]]
count = count + 1
return count
def link (self, np1, np2):
"""
Links two nodes on given ports
np1 is (node1, port1)
np2 is (node2, port2)
"""
#FIXME: the portless variation doesn't really make sense with
# allow_multiples yet.
try:
_ = np1[0]
except:
# portless (hacky)
for free in xrange(1000):
if free not in np1.ports:
np1 = (np1,free)
break
try:
_ = np2[0]
except:
# portless (hacky)
for free in xrange(1000):
if free not in np2.ports:
np2 = (np2,free)
break
self._g.add_node(np1[0])
self._g.add_node(np2[0])
self.disconnect_port(np1)
self.disconnect_port(np2)
self._g.add_edge(np1[0],np2[0],link=Link(np1,np2))
self.node_port[np1[0]][np1[1]] = np2
self.node_port[np2[0]][np2[1]] = np1
def find_links (self, query1=None, query2=()):
# No idea if new link query stuff works.
if query2 is None: query2 = query1
if query1 == (): query1 = None
if query2 == (): query2 = None
o = set()
for n1,n2,k,d in self._g.edges(data=True, keys=True):
l = d[LINK]
ok = False
if query1 is None or self._test_node(l[0][0], args=(query1,), link=l):
if query2 is None or self._test_node(l[1][0], args=(query2,), link=l):
ok = True
if not ok and (query1 != query2):
if query2 is None or self._test_node(l[0][0], args=(query2,), link=l):
if query1 is None or self._test_node(l[1][0], args=(query1,), link=l):
ok = True
l = l.flip()
if ok:
o.add(l)
return list(o)
def ports_for_node (self, node):
"""
Map of local port -> (other, other_port)
"""
ports = defaultdict(_void)
for n1, n2, k, d in self._g.edges([node], data=True, keys=True):
p = d[LINK]
assert n1 is node
assert ports.get(p[node]) is None
ports[p[node][1]] = p.other(node)
return ports
def port_for_node(self, node, port):
assert node in self.node_port
return self.node_port[node].get(port)
def disconnect_nodes(self, node1, node2):
""" Disconnect node1 from node2. Either of node1 or node2
can be a node, or a (node, port) pair
Returns number of nodes disconnected
"""
self.unlink(node1, node2)
def disconnect_node(self, node1):
""" Disconnecte node from all neighbours """
for neighbor in self.neighbors(node1):
self.disconnect_nodes(node1, neighbor)
def get_one_link (self, query1=None, query2=(), **kw):
return self.get_link(query1, query2, one=True, **kw)
def get_link (self, query1=None, query2=(), **kw):
"""
Keyword argument "default" lets you set a default value if
no node is found. Note that this means you must use
Equal(F("default"), <value>) to actually check a field called
"default" on a node.
"""
if 'default' in kw:
has_default = True
default = kw['default']
del kw['default']
else:
has_default = False
one = False
if 'one' in kw:
one = kw['one']
del kw['one']
assert len(kw) == 0
r = self.find_links(query1, query2)
if len(r) > 1 and one:
raise RuntimeError("More than one match")
elif len(r) == 0:
if has_default:
return default
raise RuntimeError("Could not get element")
return r[0]
def has_link (self, query1=None, query2=()):
# Really bad implementation. We can easily scape early.
return len(self.find_links(query1, query2)) > 0
def _test_node (self, n, args=(), kw={}, link=None):
#TODO: Should use a special value for unspecified n2
for k,v in kw.iteritems():
if k == "is_a":
if not isinstance(n,v): return False
elif k == "type":
if type(n) is not v: return False
else:
if not hasattr(n, k): return False
if getattr(n, k) != v: return False
for a in args:
try:
if not a(n, link):
return False
except LeaveException:
return False
return True
def find (self, *args, **kw):
r = []
def test (n):
return self._test_node(n, args, kw)
for n in self._g.nodes():
if test(n):
r.append(n)
return r
def get_one (self, *args, **kw):
kw['one'] = True
return self.get(*args, **kw)
def get (self, *args, **kw):
"""
Keyword argument "default" lets you set a default value if
no node is found. Note that this means you must use
Equal(F("default"), <value>) to actually check a field called
"default" on a node.
"""
if 'default' in kw:
has_default = True
default = kw['default']
del kw['default']
else:
has_default = False
one = False
if 'one' in kw:
del kw['one']
one = True
r = self.find(*args,**kw)
if len(r) > 1 and one:
raise RuntimeError("More than one match")
elif len(r) == 0:
if has_default:
return default
raise RuntimeError("Could not get element")
return r[0]
def has (self, *args, **kw):
# Really bad implementation. We can easily scape early.
return len(self.find(*args,**kw)) > 0
def __len__ (self):
return len(self._g)
def test():
class Node1 (object):
_next_num = 0
def __init__ (self):
self._num = self.__class__._next_num
self.__class__._next_num += 1
def __repr__ (self):
return "Node1 #" + str(self._num)
class Node2 (object):
_next_num = 0
def __init__ (self):
self._num = self.__class__._next_num
self.__class__._next_num += 1
def __repr__ (self):
return "Node2 #" + str(self._num)
class Node3 (Node1):
_next_num = 0
def __init__ (self):
self._num = self.__class__._next_num
self.__class__._next_num += 1
def __repr__ (self):
return "Node3 #" + str(self._num)
g = Graph()
n1 = Node1();n1.label=1
n2 = Node2();n2.label=2
n3 = Node3();n3.label=3
g.add(n1)
g.add(n2)
g.add(n3)
g.link((n1,0),(n2,0))
g.link((n1,1),(n3,0))
print g.find(is_a=Node1)
print g.find(is_a=Node2)
print g.find(type=Node1)
print g.find(type=Node3)
print g.find_links()
print "=== NEIGHBORS ==="
print g.neighbors(n1)
print g.find_port(n1, n2)
print g.connected(n1, n3)
print g.ports_for_node(n3)
print [(n, x[0], x[1][0], x[1][1]) for n in g.find(is_a=Node1) for x in g.ports_for_node(n).iteritems() ]
g.disconnect_nodes(n1, n3)
print g.find_links()
g.link((n2, 1), (n3, 1))
g.link((n1,1), (n3, 0))
g.link((n1,0), (n2, 0))
print g.find_links()
g.disconnect_node(n3)
print g.find_links()
import code
code.interact(local=locals())
if __name__ == "__main__":
test()
| apache-2.0 |
yongshengwang/hue | desktop/core/ext-py/requests-2.6.0/build/lib/requests/packages/urllib3/fields.py | 1007 | 5833 | import email.utils
import mimetypes
from .packages import six
def guess_content_type(filename, default='application/octet-stream'):
"""
Guess the "Content-Type" of a file.
:param filename:
The filename to guess the "Content-Type" of using :mod:`mimetypes`.
:param default:
If no "Content-Type" can be guessed, default to `default`.
"""
if filename:
return mimetypes.guess_type(filename)[0] or default
return default
def format_header_param(name, value):
"""
Helper function to format and quote a single header parameter.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows RFC 2231, as
suggested by RFC 2388 Section 4.4.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
if not any(ch in value for ch in '"\\\r\n'):
result = '%s="%s"' % (name, value)
try:
result.encode('ascii')
except UnicodeEncodeError:
pass
else:
return result
if not six.PY3: # Python 2:
value = value.encode('utf-8')
value = email.utils.encode_rfc2231(value, 'utf-8')
value = '%s*=%s' % (name, value)
return value
class RequestField(object):
"""
A data container for request body parameters.
:param name:
The name of this request field.
:param data:
The data/value body.
:param filename:
An optional filename of the request field.
:param headers:
An optional dict-like object of headers to initially use for the field.
"""
def __init__(self, name, data, filename=None, headers=None):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
@classmethod
def from_tuples(cls, fieldname, value):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from
parameter of key/value strings AND key/filetuple. A filetuple is a
(filename, data, MIME type) tuple where the MIME type is optional.
For example::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name, value):
"""
Overridable helper function to format a single header parameter.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
return format_header_param(name, value)
def _render_parts(self, header_parts):
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
as `k1="v1"; k2="v2"; ...`.
"""
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for name, value in iterable:
if value:
parts.append(self._render_part(name, value))
return '; '.join(parts)
def render_headers(self):
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append('%s: %s' % (header_name, header_value))
lines.append('\r\n')
return '\r\n'.join(lines)
def make_multipart(self, content_disposition=None, content_type=None,
content_location=None):
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
self.headers['Content-Disposition'] = content_disposition or 'form-data'
self.headers['Content-Disposition'] += '; '.join([
'', self._render_parts(
(('name', self._name), ('filename', self._filename))
)
])
self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location
| apache-2.0 |
gimite/personfinder | app/vendors/unidecode/x0b6.py | 253 | 4996 | data = (
'ddyels', # 0x00
'ddyelt', # 0x01
'ddyelp', # 0x02
'ddyelh', # 0x03
'ddyem', # 0x04
'ddyeb', # 0x05
'ddyebs', # 0x06
'ddyes', # 0x07
'ddyess', # 0x08
'ddyeng', # 0x09
'ddyej', # 0x0a
'ddyec', # 0x0b
'ddyek', # 0x0c
'ddyet', # 0x0d
'ddyep', # 0x0e
'ddyeh', # 0x0f
'ddo', # 0x10
'ddog', # 0x11
'ddogg', # 0x12
'ddogs', # 0x13
'ddon', # 0x14
'ddonj', # 0x15
'ddonh', # 0x16
'ddod', # 0x17
'ddol', # 0x18
'ddolg', # 0x19
'ddolm', # 0x1a
'ddolb', # 0x1b
'ddols', # 0x1c
'ddolt', # 0x1d
'ddolp', # 0x1e
'ddolh', # 0x1f
'ddom', # 0x20
'ddob', # 0x21
'ddobs', # 0x22
'ddos', # 0x23
'ddoss', # 0x24
'ddong', # 0x25
'ddoj', # 0x26
'ddoc', # 0x27
'ddok', # 0x28
'ddot', # 0x29
'ddop', # 0x2a
'ddoh', # 0x2b
'ddwa', # 0x2c
'ddwag', # 0x2d
'ddwagg', # 0x2e
'ddwags', # 0x2f
'ddwan', # 0x30
'ddwanj', # 0x31
'ddwanh', # 0x32
'ddwad', # 0x33
'ddwal', # 0x34
'ddwalg', # 0x35
'ddwalm', # 0x36
'ddwalb', # 0x37
'ddwals', # 0x38
'ddwalt', # 0x39
'ddwalp', # 0x3a
'ddwalh', # 0x3b
'ddwam', # 0x3c
'ddwab', # 0x3d
'ddwabs', # 0x3e
'ddwas', # 0x3f
'ddwass', # 0x40
'ddwang', # 0x41
'ddwaj', # 0x42
'ddwac', # 0x43
'ddwak', # 0x44
'ddwat', # 0x45
'ddwap', # 0x46
'ddwah', # 0x47
'ddwae', # 0x48
'ddwaeg', # 0x49
'ddwaegg', # 0x4a
'ddwaegs', # 0x4b
'ddwaen', # 0x4c
'ddwaenj', # 0x4d
'ddwaenh', # 0x4e
'ddwaed', # 0x4f
'ddwael', # 0x50
'ddwaelg', # 0x51
'ddwaelm', # 0x52
'ddwaelb', # 0x53
'ddwaels', # 0x54
'ddwaelt', # 0x55
'ddwaelp', # 0x56
'ddwaelh', # 0x57
'ddwaem', # 0x58
'ddwaeb', # 0x59
'ddwaebs', # 0x5a
'ddwaes', # 0x5b
'ddwaess', # 0x5c
'ddwaeng', # 0x5d
'ddwaej', # 0x5e
'ddwaec', # 0x5f
'ddwaek', # 0x60
'ddwaet', # 0x61
'ddwaep', # 0x62
'ddwaeh', # 0x63
'ddoe', # 0x64
'ddoeg', # 0x65
'ddoegg', # 0x66
'ddoegs', # 0x67
'ddoen', # 0x68
'ddoenj', # 0x69
'ddoenh', # 0x6a
'ddoed', # 0x6b
'ddoel', # 0x6c
'ddoelg', # 0x6d
'ddoelm', # 0x6e
'ddoelb', # 0x6f
'ddoels', # 0x70
'ddoelt', # 0x71
'ddoelp', # 0x72
'ddoelh', # 0x73
'ddoem', # 0x74
'ddoeb', # 0x75
'ddoebs', # 0x76
'ddoes', # 0x77
'ddoess', # 0x78
'ddoeng', # 0x79
'ddoej', # 0x7a
'ddoec', # 0x7b
'ddoek', # 0x7c
'ddoet', # 0x7d
'ddoep', # 0x7e
'ddoeh', # 0x7f
'ddyo', # 0x80
'ddyog', # 0x81
'ddyogg', # 0x82
'ddyogs', # 0x83
'ddyon', # 0x84
'ddyonj', # 0x85
'ddyonh', # 0x86
'ddyod', # 0x87
'ddyol', # 0x88
'ddyolg', # 0x89
'ddyolm', # 0x8a
'ddyolb', # 0x8b
'ddyols', # 0x8c
'ddyolt', # 0x8d
'ddyolp', # 0x8e
'ddyolh', # 0x8f
'ddyom', # 0x90
'ddyob', # 0x91
'ddyobs', # 0x92
'ddyos', # 0x93
'ddyoss', # 0x94
'ddyong', # 0x95
'ddyoj', # 0x96
'ddyoc', # 0x97
'ddyok', # 0x98
'ddyot', # 0x99
'ddyop', # 0x9a
'ddyoh', # 0x9b
'ddu', # 0x9c
'ddug', # 0x9d
'ddugg', # 0x9e
'ddugs', # 0x9f
'ddun', # 0xa0
'ddunj', # 0xa1
'ddunh', # 0xa2
'ddud', # 0xa3
'ddul', # 0xa4
'ddulg', # 0xa5
'ddulm', # 0xa6
'ddulb', # 0xa7
'dduls', # 0xa8
'ddult', # 0xa9
'ddulp', # 0xaa
'ddulh', # 0xab
'ddum', # 0xac
'ddub', # 0xad
'ddubs', # 0xae
'ddus', # 0xaf
'dduss', # 0xb0
'ddung', # 0xb1
'dduj', # 0xb2
'dduc', # 0xb3
'dduk', # 0xb4
'ddut', # 0xb5
'ddup', # 0xb6
'dduh', # 0xb7
'ddweo', # 0xb8
'ddweog', # 0xb9
'ddweogg', # 0xba
'ddweogs', # 0xbb
'ddweon', # 0xbc
'ddweonj', # 0xbd
'ddweonh', # 0xbe
'ddweod', # 0xbf
'ddweol', # 0xc0
'ddweolg', # 0xc1
'ddweolm', # 0xc2
'ddweolb', # 0xc3
'ddweols', # 0xc4
'ddweolt', # 0xc5
'ddweolp', # 0xc6
'ddweolh', # 0xc7
'ddweom', # 0xc8
'ddweob', # 0xc9
'ddweobs', # 0xca
'ddweos', # 0xcb
'ddweoss', # 0xcc
'ddweong', # 0xcd
'ddweoj', # 0xce
'ddweoc', # 0xcf
'ddweok', # 0xd0
'ddweot', # 0xd1
'ddweop', # 0xd2
'ddweoh', # 0xd3
'ddwe', # 0xd4
'ddweg', # 0xd5
'ddwegg', # 0xd6
'ddwegs', # 0xd7
'ddwen', # 0xd8
'ddwenj', # 0xd9
'ddwenh', # 0xda
'ddwed', # 0xdb
'ddwel', # 0xdc
'ddwelg', # 0xdd
'ddwelm', # 0xde
'ddwelb', # 0xdf
'ddwels', # 0xe0
'ddwelt', # 0xe1
'ddwelp', # 0xe2
'ddwelh', # 0xe3
'ddwem', # 0xe4
'ddweb', # 0xe5
'ddwebs', # 0xe6
'ddwes', # 0xe7
'ddwess', # 0xe8
'ddweng', # 0xe9
'ddwej', # 0xea
'ddwec', # 0xeb
'ddwek', # 0xec
'ddwet', # 0xed
'ddwep', # 0xee
'ddweh', # 0xef
'ddwi', # 0xf0
'ddwig', # 0xf1
'ddwigg', # 0xf2
'ddwigs', # 0xf3
'ddwin', # 0xf4
'ddwinj', # 0xf5
'ddwinh', # 0xf6
'ddwid', # 0xf7
'ddwil', # 0xf8
'ddwilg', # 0xf9
'ddwilm', # 0xfa
'ddwilb', # 0xfb
'ddwils', # 0xfc
'ddwilt', # 0xfd
'ddwilp', # 0xfe
'ddwilh', # 0xff
)
| apache-2.0 |
kunallillaney/thunder | test/test_seriesloader.py | 7 | 10333 | import glob
import json
import os
import struct
import unittest
from nose.tools import assert_almost_equal, assert_equals, assert_true, assert_raises
from numpy import allclose, arange, array, array_equal
from numpy import dtype as dtypeFunc
from thunder.rdds.fileio.seriesloader import SeriesLoader
from thunder.utils.common import smallestFloatType
from test_utils import PySparkTestCase, PySparkTestCaseWithOutputDir
_have_image = False
try:
from PIL import Image
_have_image = True
except ImportError:
# PIL not available; skip tests that require it
Image = None
class SeriesBinaryTestData(object):
"""
Data object for SeriesLoader binary test.
"""
__slots__ = ('keys', 'vals', 'keyDtype', 'valDtype')
def __init__(self, keys, vals, keyDtype, valDtype):
"""
Constructor, intended to be called from fromArrays class factory method.
Expects m x n and m x p data for keys and vals.
Parameters
----------
keys: two dimensional array or sequence
vals: two dimensional array or sequence
keydtype: object castable to numpy dtype
data type of keys
valdtype: object castable to numpy dtype
data type of values
Returns
-------
self: new instance of SeriesBinaryTestData
"""
self.keys = keys
self.vals = vals
self.keyDtype = keyDtype
self.valDtype = valDtype
@property
def keyStructFormat(self):
return self.keyDtype.char * self.nkeys
@property
def valStructFormat(self):
return self.valDtype.char * self.nvals
@property
def data(self):
return zip(self.keys, self.vals)
@property
def nkeys(self):
return len(self.keys[0])
@property
def nvals(self):
return len(self.vals[0])
def writeToFile(self, f):
"""
Writes own key, value data to passed file handle in binary format
Parameters
----------
f: file handle, open for writing
f will remain open after this call
"""
for keys, vals in self.data:
f.write(struct.pack(self.keyStructFormat, *keys))
f.write(struct.pack(self.valStructFormat, *vals))
@staticmethod
def _validateLengths(dat):
l = len(dat[0])
for d in dat:
assert len(d) == l, "Data of unequal lengths, %d and %d" % (l, len(d))
@staticmethod
def _normalizeDType(dtypeInstance, data):
if dtypeInstance is None:
return data.dtype
return dtypeFunc(dtypeInstance)
@classmethod
def fromArrays(cls, keys, vals, keyDtype=None, valDtype=None):
"""
Factory method for SeriesBinaryTestData. Validates input before calling class __init__ method.
Expects m x n and m x p data for keys and vals.
Parameters
----------
keys: two dimensional array or sequence
vals: two dimensional array or sequence
keydtype: object castable to numpy dtype
data type of keys
valdtype: object castable to numpy dtype
data type of values
Returns
-------
self: new instance of SeriesBinaryTestData
"""
keyDtype = cls._normalizeDType(keyDtype, keys)
valDtype = cls._normalizeDType(valDtype, vals)
assert len(keys) == len(vals), "Unequal numbers of keys and values, %d and %d" % (len(keys), len(vals))
cls._validateLengths(keys)
cls._validateLengths(vals)
return cls(keys, vals, keyDtype, valDtype)
class TestSeriesLoader(PySparkTestCase):
@staticmethod
def _findTestResourcesDir(resourcesDirName="resources"):
testDirPath = os.path.dirname(os.path.realpath(__file__))
testResourcesDirPath = os.path.join(testDirPath, resourcesDirName)
if not os.path.isdir(testResourcesDirPath):
raise IOError("Test resources directory "+testResourcesDirPath+" not found")
return testResourcesDirPath
@staticmethod
def _findSourceTreeDir(dirName="utils/data"):
testDirPath = os.path.dirname(os.path.realpath(__file__))
testResourcesDirPath = os.path.join(testDirPath, "..", "thunder", dirName)
if not os.path.isdir(testResourcesDirPath):
raise IOError("Directory "+testResourcesDirPath+" not found")
return testResourcesDirPath
def test_fromArrays(self):
ary = arange(8, dtype=dtypeFunc('int16')).reshape((2, 4))
series = SeriesLoader(self.sc).fromArraysAsImages(ary)
seriesVals = series.collect()
seriesAry = series.pack()
# check ordering of keys
assert_equals((0, 0), seriesVals[0][0]) # first key
assert_equals((1, 0), seriesVals[1][0]) # second key
assert_equals((2, 0), seriesVals[2][0])
assert_equals((3, 0), seriesVals[3][0])
assert_equals((0, 1), seriesVals[4][0])
assert_equals((1, 1), seriesVals[5][0])
assert_equals((2, 1), seriesVals[6][0])
assert_equals((3, 1), seriesVals[7][0])
# check dimensions tuple is reversed from numpy shape
assert_equals(ary.shape[::-1], series.dims.count)
# check that values are in original order
collectedVals = array([kv[1] for kv in seriesVals], dtype=dtypeFunc('int16')).ravel()
assert_true(array_equal(ary.ravel(), collectedVals))
# check that packing returns transpose of original array
assert_true(array_equal(ary.T, seriesAry))
def test_fromMultipleArrays(self):
ary = arange(8, dtype=dtypeFunc('int16')).reshape((2, 4))
ary2 = arange(8, 16, dtype=dtypeFunc('int16')).reshape((2, 4))
series = SeriesLoader(self.sc).fromArraysAsImages([ary, ary2])
seriesVals = series.collect()
seriesAry = series.pack()
# check ordering of keys
assert_equals((0, 0), seriesVals[0][0]) # first key
assert_equals((1, 0), seriesVals[1][0]) # second key
assert_equals((3, 0), seriesVals[3][0])
assert_equals((0, 1), seriesVals[4][0])
assert_equals((3, 1), seriesVals[7][0])
# check dimensions tuple is reversed from numpy shape
assert_equals(ary.shape[::-1], series.dims.count)
# check that values are in original order, with subsequent point concatenated in values
collectedVals = array([kv[1] for kv in seriesVals], dtype=dtypeFunc('int16'))
assert_true(array_equal(ary.ravel(), collectedVals[:, 0]))
assert_true(array_equal(ary2.ravel(), collectedVals[:, 1]))
# check that packing returns concatenation of input arrays, with time as first dimension
assert_true(array_equal(ary.T, seriesAry[0]))
assert_true(array_equal(ary2.T, seriesAry[1]))
class TestSeriesBinaryLoader(PySparkTestCaseWithOutputDir):
def _run_tst_fromBinary(self, useConfJson=False):
# run this as a single big test so as to avoid repeated setUp and tearDown of the spark context
# data will be a sequence of test data
# all keys and all values in a test data item must be of the same length
# keys get converted to ints regardless of raw input format
DATA = [
SeriesBinaryTestData.fromArrays([[1, 2, 3]], [[11, 12, 13]], 'int16', 'int16'),
SeriesBinaryTestData.fromArrays([[1, 2, 3], [5, 6, 7]], [[11], [12]], 'int16', 'int16'),
SeriesBinaryTestData.fromArrays([[1, 2, 3]], [[11, 12, 13]], 'int16', 'int32'),
SeriesBinaryTestData.fromArrays([[1, 2, 3]], [[11, 12, 13]], 'int32', 'int16'),
SeriesBinaryTestData.fromArrays([[1, 2, 3]], [[11.0, 12.0, 13.0]], 'int16', 'float32'),
SeriesBinaryTestData.fromArrays([[1, 2, 3]], [[11.0, 12.0, 13.0]], 'float32', 'float32'),
SeriesBinaryTestData.fromArrays([[2, 3, 4]], [[11.0, 12.0, 13.0]], 'float32', 'float32'),
]
for itemidx, item in enumerate(DATA):
outSubdir = os.path.join(self.outputdir, 'input%d' % itemidx)
os.mkdir(outSubdir)
fname = os.path.join(outSubdir, 'inputfile%d.bin' % itemidx)
with open(fname, 'wb') as f:
item.writeToFile(f)
loader = SeriesLoader(self.sc)
if not useConfJson:
series = loader.fromBinary(outSubdir, nkeys=item.nkeys, nvalues=item.nvals, keyType=str(item.keyDtype),
valueType=str(item.valDtype))
else:
# write configuration file
conf = {'input': outSubdir,
'nkeys': item.nkeys, 'nvalues': item.nvals,
'valuetype': str(item.valDtype), 'keytype': str(item.keyDtype)}
with open(os.path.join(outSubdir, "conf.json"), 'wb') as f:
json.dump(conf, f, indent=2)
series = loader.fromBinary(outSubdir)
seriesData = series.rdd.collect()
expectedData = item.data
assert_equals(len(expectedData), len(seriesData),
"Differing numbers of k/v pairs in item %d; expected %d, got %d" %
(itemidx, len(expectedData), len(seriesData)))
for expected, actual in zip(expectedData, seriesData):
expectedKeys = tuple(expected[0])
expectedType = smallestFloatType(item.valDtype)
expectedVals = array(expected[1], dtype=expectedType)
assert_equals(expectedKeys, actual[0],
"Key mismatch in item %d; expected %s, got %s" %
(itemidx, str(expectedKeys), str(actual[0])))
assert_true(allclose(expectedVals, actual[1]),
"Value mismatch in item %d; expected %s, got %s" %
(itemidx, str(expectedVals), str(actual[1])))
assert_equals(expectedType, str(actual[1].dtype),
"Value type mismatch in item %d; expected %s, got %s" %
(itemidx, expectedType, str(actual[1].dtype)))
def test_fromBinary(self):
self._run_tst_fromBinary()
def test_fromBinaryWithConfFile(self):
self._run_tst_fromBinary(True)
| apache-2.0 |
hellhovnd/django | tests/model_forms/tests.py | 2 | 67705 | from __future__ import absolute_import, unicode_literals
import datetime
import os
from decimal import Decimal
from django import forms
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.validators import ValidationError
from django.db import connection
from django.db.models.query import EmptyQuerySet
from django.forms.models import model_to_dict
from django.utils._os import upath
from django.utils.unittest import skipUnless
from django.test import TestCase
from django.utils import six
from shared_models.models import Author, Book
from .models import (Article, ArticleStatus, BetterAuthor, BigInt,
Category, CommaSeparatedInteger, CustomFieldForExclusionModel, DerivedBook,
DerivedPost, ExplicitPK, FlexibleDatePost, ImprovedArticle,
ImprovedArticleWithParentLink, Inventory, Post, Price,
Product, TextFile, AuthorProfile, Colour, ColourfulItem,
test_images)
if test_images:
from .models import ImageFile, OptionalImageFile
class ImageFileForm(forms.ModelForm):
class Meta:
model = ImageFile
class OptionalImageFileForm(forms.ModelForm):
class Meta:
model = OptionalImageFile
class ProductForm(forms.ModelForm):
class Meta:
model = Product
class PriceForm(forms.ModelForm):
class Meta:
model = Price
class BookForm(forms.ModelForm):
class Meta:
fields = ['title', 'author', 'pubdate']
model = Book
class DerivedBookForm(forms.ModelForm):
class Meta:
fields = ['title', 'author', 'isbn', 'suffix1', 'suffix2']
model = DerivedBook
class ExplicitPKForm(forms.ModelForm):
class Meta:
model = ExplicitPK
fields = ('key', 'desc',)
class PostForm(forms.ModelForm):
class Meta:
model = Post
class DerivedPostForm(forms.ModelForm):
class Meta:
model = DerivedPost
class CustomAuthorForm(forms.ModelForm):
name = forms.CharField(required=False)
class Meta:
model = Author
class FlexDatePostForm(forms.ModelForm):
class Meta:
model = FlexibleDatePost
class BaseCategoryForm(forms.ModelForm):
class Meta:
model = Category
class ArticleForm(forms.ModelForm):
class Meta:
model = Article
class ArticleForm(forms.ModelForm):
class Meta:
model = Article
class PartialArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = ('headline','pub_date')
class RoykoForm(forms.ModelForm):
class Meta:
model = Author
class TestArticleForm(forms.ModelForm):
class Meta:
model = Article
class PartialArticleFormWithSlug(forms.ModelForm):
class Meta:
model = Article
fields=('headline', 'slug', 'pub_date')
class ArticleStatusForm(forms.ModelForm):
class Meta:
model = ArticleStatus
class InventoryForm(forms.ModelForm):
class Meta:
model = Inventory
class SelectInventoryForm(forms.Form):
items = forms.ModelMultipleChoiceField(Inventory.objects.all(), to_field_name='barcode')
class CustomFieldForExclusionForm(forms.ModelForm):
class Meta:
model = CustomFieldForExclusionModel
fields = ['name', 'markup']
class ShortCategory(forms.ModelForm):
name = forms.CharField(max_length=5)
slug = forms.CharField(max_length=5)
url = forms.CharField(max_length=3)
class Meta:
model = Category
class ImprovedArticleForm(forms.ModelForm):
class Meta:
model = ImprovedArticle
class ImprovedArticleWithParentLinkForm(forms.ModelForm):
class Meta:
model = ImprovedArticleWithParentLink
class BetterAuthorForm(forms.ModelForm):
class Meta:
model = BetterAuthor
class AuthorProfileForm(forms.ModelForm):
class Meta:
model = AuthorProfile
class TextFileForm(forms.ModelForm):
class Meta:
model = TextFile
class BigIntForm(forms.ModelForm):
class Meta:
model = BigInt
class ModelFormWithMedia(forms.ModelForm):
class Media:
js = ('/some/form/javascript',)
css = {
'all': ('/some/form/css',)
}
class Meta:
model = TextFile
class CommaSeparatedIntegerForm(forms.ModelForm):
class Meta:
model = CommaSeparatedInteger
class PriceFormWithoutQuantity(forms.ModelForm):
class Meta:
model = Price
exclude = ('quantity',)
class ColourfulItemForm(forms.ModelForm):
class Meta:
model = ColourfulItem
class ModelFormBaseTest(TestCase):
def test_base_form(self):
self.assertEqual(list(BaseCategoryForm.base_fields),
['name', 'slug', 'url'])
def test_extra_fields(self):
class ExtraFields(BaseCategoryForm):
some_extra_field = forms.BooleanField()
self.assertEqual(list(ExtraFields.base_fields),
['name', 'slug', 'url', 'some_extra_field'])
def test_replace_field(self):
class ReplaceField(forms.ModelForm):
url = forms.BooleanField()
class Meta:
model = Category
self.assertTrue(isinstance(ReplaceField.base_fields['url'],
forms.fields.BooleanField))
def test_override_field(self):
class AuthorForm(forms.ModelForm):
book = forms.CharField(required=False)
class Meta:
model = Author
wf = AuthorForm({'name': 'Richard Lockridge'})
self.assertTrue(wf.is_valid())
def test_limit_fields(self):
class LimitFields(forms.ModelForm):
class Meta:
model = Category
fields = ['url']
self.assertEqual(list(LimitFields.base_fields),
['url'])
def test_exclude_fields(self):
class ExcludeFields(forms.ModelForm):
class Meta:
model = Category
exclude = ['url']
self.assertEqual(list(ExcludeFields.base_fields),
['name', 'slug'])
def test_confused_form(self):
class ConfusedForm(forms.ModelForm):
""" Using 'fields' *and* 'exclude'. Not sure why you'd want to do
this, but uh, "be liberal in what you accept" and all.
"""
class Meta:
model = Category
fields = ['name', 'url']
exclude = ['url']
self.assertEqual(list(ConfusedForm.base_fields),
['name'])
def test_mixmodel_form(self):
class MixModelForm(BaseCategoryForm):
""" Don't allow more than one 'model' definition in the
inheritance hierarchy. Technically, it would generate a valid
form, but the fact that the resulting save method won't deal with
multiple objects is likely to trip up people not familiar with the
mechanics.
"""
class Meta:
model = Article
# MixModelForm is now an Article-related thing, because MixModelForm.Meta
# overrides BaseCategoryForm.Meta.
self.assertEqual(
list(MixModelForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_article_form(self):
self.assertEqual(
list(ArticleForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_bad_form(self):
#First class with a Meta class wins...
class BadForm(ArticleForm, BaseCategoryForm):
pass
self.assertEqual(
list(BadForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_invalid_meta_model(self):
class InvalidModelForm(forms.ModelForm):
class Meta:
pass # no model
# Can't create new form
with self.assertRaises(ValueError):
f = InvalidModelForm()
# Even if you provide a model instance
with self.assertRaises(ValueError):
f = InvalidModelForm(instance=Category)
def test_subcategory_form(self):
class SubCategoryForm(BaseCategoryForm):
""" Subclassing without specifying a Meta on the class will use
the parent's Meta (or the first parent in the MRO if there are
multiple parent classes).
"""
pass
self.assertEqual(list(SubCategoryForm.base_fields),
['name', 'slug', 'url'])
def test_subclassmeta_form(self):
class SomeCategoryForm(forms.ModelForm):
checkbox = forms.BooleanField()
class Meta:
model = Category
class SubclassMeta(SomeCategoryForm):
""" We can also subclass the Meta inner class to change the fields
list.
"""
class Meta(SomeCategoryForm.Meta):
exclude = ['url']
self.assertHTMLEqual(
str(SubclassMeta()),
"""<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>
<tr><th><label for="id_slug">Slug:</label></th><td><input id="id_slug" type="text" name="slug" maxlength="20" /></td></tr>
<tr><th><label for="id_checkbox">Checkbox:</label></th><td><input type="checkbox" name="checkbox" id="id_checkbox" /></td></tr>"""
)
def test_orderfields_form(self):
class OrderFields(forms.ModelForm):
class Meta:
model = Category
fields = ['url', 'name']
self.assertEqual(list(OrderFields.base_fields),
['url', 'name'])
self.assertHTMLEqual(
str(OrderFields()),
"""<tr><th><label for="id_url">The URL:</label></th><td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>
<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>"""
)
def test_orderfields2_form(self):
class OrderFields2(forms.ModelForm):
class Meta:
model = Category
fields = ['slug', 'url', 'name']
exclude = ['url']
self.assertEqual(list(OrderFields2.base_fields),
['slug', 'name'])
class TestWidgetForm(forms.ModelForm):
class Meta:
model = Category
fields = ['name', 'url', 'slug']
widgets = {
'name': forms.Textarea,
'url': forms.TextInput(attrs={'class': 'url'})
}
class TestWidgets(TestCase):
def test_base_widgets(self):
frm = TestWidgetForm()
self.assertHTMLEqual(
str(frm['name']),
'<textarea id="id_name" rows="10" cols="40" name="name"></textarea>'
)
self.assertHTMLEqual(
str(frm['url']),
'<input id="id_url" type="text" class="url" name="url" maxlength="40" />'
)
self.assertHTMLEqual(
str(frm['slug']),
'<input id="id_slug" type="text" name="slug" maxlength="20" />'
)
class IncompleteCategoryFormWithFields(forms.ModelForm):
"""
A form that replaces the model's url field with a custom one. This should
prevent the model field's validation from being called.
"""
url = forms.CharField(required=False)
class Meta:
fields = ('name', 'slug')
model = Category
class IncompleteCategoryFormWithExclude(forms.ModelForm):
"""
A form that replaces the model's url field with a custom one. This should
prevent the model field's validation from being called.
"""
url = forms.CharField(required=False)
class Meta:
exclude = ['url']
model = Category
class ValidationTest(TestCase):
def test_validates_with_replaced_field_not_specified(self):
form = IncompleteCategoryFormWithFields(data={'name': 'some name', 'slug': 'some-slug'})
assert form.is_valid()
def test_validates_with_replaced_field_excluded(self):
form = IncompleteCategoryFormWithExclude(data={'name': 'some name', 'slug': 'some-slug'})
assert form.is_valid()
def test_notrequired_overrides_notblank(self):
form = CustomAuthorForm({})
assert form.is_valid()
# unique/unique_together validation
class UniqueTest(TestCase):
def setUp(self):
self.author = Author.objects.create(name='Mike Royko')
def test_simple_unique(self):
form = ProductForm({'slug': 'teddy-bear-blue'})
self.assertTrue(form.is_valid())
obj = form.save()
form = ProductForm({'slug': 'teddy-bear-blue'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Product with this Slug already exists.'])
form = ProductForm({'slug': 'teddy-bear-blue'}, instance=obj)
self.assertTrue(form.is_valid())
def test_unique_together(self):
"""ModelForm test of unique_together constraint"""
form = PriceForm({'price': '6.00', 'quantity': '1'})
self.assertTrue(form.is_valid())
form.save()
form = PriceForm({'price': '6.00', 'quantity': '1'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Price with this Price and Quantity already exists.'])
def test_unique_null(self):
title = 'I May Be Wrong But I Doubt It'
form = BookForm({'title': title, 'author': self.author.pk, 'pubdate': '2012-12-12 00:00:00'})
self.assertTrue(form.is_valid())
form.save()
form = BookForm({'title': title, 'author': self.author.pk, 'pubdate': '2012-12-12 00:00:00'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Book with this Title and Author already exists.'])
form = BookForm({'title': title, 'pubdate': '2012-12-12 00:00:00'})
self.assertTrue(form.is_valid())
form.save()
form = BookForm({'title': title, 'pubdate': '2012-12-12 00:00:00'})
self.assertTrue(form.is_valid())
def test_inherited_unique(self):
form = BetterAuthorForm({'name': 'Mike Royko', 'score': 3})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['name'], ['Author with this Name already exists.'])
def test_inherited_unique_together(self):
title = 'Boss'
form = BookForm({'title': title, 'author': self.author.pk, 'pubdate': '2012-12-12 00:00:00'})
self.assertTrue(form.is_valid())
form.save()
form = DerivedBookForm({'title': title, 'author': self.author.pk, 'isbn': '12345'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Book with this Title and Author already exists.'])
def test_abstract_inherited_unique(self):
title = 'Boss'
isbn = '12345'
dbook = DerivedBook.objects.create(title=title, author=self.author, isbn=isbn,
pubdate='2012-12-12 00:00')
form = DerivedBookForm({'title': 'Other', 'author': self.author.pk, 'isbn': isbn})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['isbn'], ['Derived book with this Isbn already exists.'])
def test_abstract_inherited_unique_together(self):
title = 'Boss'
isbn = '12345'
dbook = DerivedBook.objects.create(title=title, author=self.author, isbn=isbn,
pubdate='2012-12-12 00:00')
form = DerivedBookForm({
'title': 'Other',
'author': self.author.pk,
'isbn': '9876',
'suffix1': '0',
'suffix2': '0'
})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'],
['Derived book with this Suffix1 and Suffix2 already exists.'])
def test_explicitpk_unspecified(self):
"""Test for primary_key being in the form and failing validation."""
form = ExplicitPKForm({'key': '', 'desc': '' })
self.assertFalse(form.is_valid())
def test_explicitpk_unique(self):
"""Ensure keys and blank character strings are tested for uniqueness."""
form = ExplicitPKForm({'key': 'key1', 'desc': ''})
self.assertTrue(form.is_valid())
form.save()
form = ExplicitPKForm({'key': 'key1', 'desc': ''})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 3)
self.assertEqual(form.errors['__all__'], ['Explicit pk with this Key and Desc already exists.'])
self.assertEqual(form.errors['desc'], ['Explicit pk with this Desc already exists.'])
self.assertEqual(form.errors['key'], ['Explicit pk with this Key already exists.'])
def test_unique_for_date(self):
p = Post.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
form = PostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], ['Title must be unique for Posted date.'])
form = PostForm({'title': "Work on Django 1.1 begins", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
form = PostForm({'title': "Django 1.0 is released", 'posted': '2008-09-04'})
self.assertTrue(form.is_valid())
form = PostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Slug must be unique for Posted year.'])
form = PostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subtitle'], ['Subtitle must be unique for Posted month.'])
form = PostForm({'subtitle': "Finally", "title": "Django 1.0 is released",
"slug": "Django 1.0", 'posted': '2008-09-03'}, instance=p)
self.assertTrue(form.is_valid())
form = PostForm({'title': "Django 1.0 is released"})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['posted'], ['This field is required.'])
def test_inherited_unique_for_date(self):
p = Post.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
form = DerivedPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], ['Title must be unique for Posted date.'])
form = DerivedPostForm({'title': "Work on Django 1.1 begins", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
form = DerivedPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-04'})
self.assertTrue(form.is_valid())
form = DerivedPostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Slug must be unique for Posted year.'])
form = DerivedPostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subtitle'], ['Subtitle must be unique for Posted month.'])
form = DerivedPostForm({'subtitle': "Finally", "title": "Django 1.0 is released",
"slug": "Django 1.0", 'posted': '2008-09-03'}, instance=p)
self.assertTrue(form.is_valid())
def test_unique_for_date_with_nullable_date(self):
p = FlexibleDatePost.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
form = FlexDatePostForm({'title': "Django 1.0 is released"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'slug': "Django 1.0"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'subtitle': "Finally"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'subtitle': "Finally", "title": "Django 1.0 is released",
"slug": "Django 1.0"}, instance=p)
self.assertTrue(form.is_valid())
class ModelToDictTests(TestCase):
"""
Tests for forms.models.model_to_dict
"""
def test_model_to_dict_many_to_many(self):
categories=[
Category(name='TestName1', slug='TestName1', url='url1'),
Category(name='TestName2', slug='TestName2', url='url2'),
Category(name='TestName3', slug='TestName3', url='url3')
]
for c in categories:
c.save()
writer = Author(name='Test writer')
writer.save()
art = Article(
headline='Test article',
slug='test-article',
pub_date=datetime.date(1988, 1, 4),
writer=writer,
article='Hello.'
)
art.save()
for c in categories:
art.categories.add(c)
art.save()
with self.assertNumQueries(1):
d = model_to_dict(art)
#Ensure all many-to-many categories appear in model_to_dict
for c in categories:
self.assertIn(c.pk, d['categories'])
#Ensure many-to-many relation appears as a list
self.assertIsInstance(d['categories'], list)
class OldFormForXTests(TestCase):
def test_base_form(self):
self.assertEqual(Category.objects.count(), 0)
f = BaseCategoryForm()
self.assertHTMLEqual(
str(f),
"""<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>
<tr><th><label for="id_slug">Slug:</label></th><td><input id="id_slug" type="text" name="slug" maxlength="20" /></td></tr>
<tr><th><label for="id_url">The URL:</label></th><td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>"""
)
self.assertHTMLEqual(
str(f.as_ul()),
"""<li><label for="id_name">Name:</label> <input id="id_name" type="text" name="name" maxlength="20" /></li>
<li><label for="id_slug">Slug:</label> <input id="id_slug" type="text" name="slug" maxlength="20" /></li>
<li><label for="id_url">The URL:</label> <input id="id_url" type="text" name="url" maxlength="40" /></li>"""
)
self.assertHTMLEqual(
str(f["name"]),
"""<input id="id_name" type="text" name="name" maxlength="20" />""")
def test_auto_id(self):
f = BaseCategoryForm(auto_id=False)
self.assertHTMLEqual(
str(f.as_ul()),
"""<li>Name: <input type="text" name="name" maxlength="20" /></li>
<li>Slug: <input type="text" name="slug" maxlength="20" /></li>
<li>The URL: <input type="text" name="url" maxlength="40" /></li>"""
)
def test_with_data(self):
self.assertEqual(Category.objects.count(), 0)
f = BaseCategoryForm({'name': 'Entertainment',
'slug': 'entertainment',
'url': 'entertainment'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['name'], 'Entertainment')
self.assertEqual(f.cleaned_data['slug'], 'entertainment')
self.assertEqual(f.cleaned_data['url'], 'entertainment')
c1 = f.save()
# Testing wether the same object is returned from the
# ORM... not the fastest way...
self.assertEqual(c1, Category.objects.all()[0])
self.assertEqual(c1.name, "Entertainment")
self.assertEqual(Category.objects.count(), 1)
f = BaseCategoryForm({'name': "It's a test",
'slug': 'its-test',
'url': 'test'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['name'], "It's a test")
self.assertEqual(f.cleaned_data['slug'], 'its-test')
self.assertEqual(f.cleaned_data['url'], 'test')
c2 = f.save()
# Testing wether the same object is returned from the
# ORM... not the fastest way...
self.assertEqual(c2, Category.objects.get(pk=c2.pk))
self.assertEqual(c2.name, "It's a test")
self.assertEqual(Category.objects.count(), 2)
# If you call save() with commit=False, then it will return an object that
# hasn't yet been saved to the database. In this case, it's up to you to call
# save() on the resulting model instance.
f = BaseCategoryForm({'name': 'Third test', 'slug': 'third-test', 'url': 'third'})
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data['url'], 'third')
self.assertEqual(f.cleaned_data['name'], 'Third test')
self.assertEqual(f.cleaned_data['slug'], 'third-test')
c3 = f.save(commit=False)
self.assertEqual(c3.name, "Third test")
self.assertEqual(Category.objects.count(), 2)
c3.save()
self.assertEqual(Category.objects.count(), 3)
# If you call save() with invalid data, you'll get a ValueError.
f = BaseCategoryForm({'name': '', 'slug': 'not a slug!', 'url': 'foo'})
self.assertEqual(f.errors['name'], ['This field is required.'])
self.assertEqual(f.errors['slug'], ["Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."])
self.assertEqual(f.cleaned_data, {'url': 'foo'})
with self.assertRaises(ValueError):
f.save()
f = BaseCategoryForm({'name': '', 'slug': '', 'url': 'foo'})
with self.assertRaises(ValueError):
f.save()
# Create a couple of Authors.
w_royko = Author(name='Mike Royko')
w_royko.save()
w_woodward = Author(name='Bob Woodward')
w_woodward.save()
# ManyToManyFields are represented by a MultipleChoiceField, ForeignKeys and any
# fields with the 'choices' attribute are represented by a ChoiceField.
f = ArticleForm(auto_id=False)
self.assertHTMLEqual(six.text_type(f), '''<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" /></td></tr>
<tr><th>Slug:</th><td><input type="text" name="slug" maxlength="50" /></td></tr>
<tr><th>Pub date:</th><td><input type="text" name="pub_date" /></td></tr>
<tr><th>Writer:</th><td><select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></td></tr>
<tr><th>Article:</th><td><textarea rows="10" cols="40" name="article"></textarea></td></tr>
<tr><th>Categories:</th><td><select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select><br /><span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></td></tr>
<tr><th>Status:</th><td><select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></td></tr>''' % (w_woodward.pk, w_royko.pk, c1.pk, c2.pk, c3.pk))
# You can restrict a form to a subset of the complete list of fields
# by providing a 'fields' argument. If you try to save a
# model created with such a form, you need to ensure that the fields
# that are _not_ on the form have default values, or are allowed to have
# a value of None. If a field isn't specified on a form, the object created
# from the form can't provide a value for that field!
f = PartialArticleForm(auto_id=False)
self.assertHTMLEqual(six.text_type(f), '''<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" /></td></tr>
<tr><th>Pub date:</th><td><input type="text" name="pub_date" /></td></tr>''')
# When the ModelForm is passed an instance, that instance's current values are
# inserted as 'initial' data in each Field.
w = Author.objects.get(name='Mike Royko')
f = RoykoForm(auto_id=False, instance=w)
self.assertHTMLEqual(six.text_type(f), '''<tr><th>Name:</th><td><input type="text" name="name" value="Mike Royko" maxlength="100" /><br /><span class="helptext">Use both first and last names.</span></td></tr>''')
art = Article(
headline='Test article',
slug='test-article',
pub_date=datetime.date(1988, 1, 4),
writer=w,
article='Hello.'
)
art.save()
art_id_1 = art.id
self.assertEqual(art_id_1 is not None, True)
f = TestArticleForm(auto_id=False, instance=art)
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="Test article" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="test-article" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>
<li>Writer: <select name="writer">
<option value="">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s" selected="selected">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article">Hello.</textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (w_woodward.pk, w_royko.pk, c1.pk, c2.pk, c3.pk))
f = TestArticleForm({
'headline': 'Test headline',
'slug': 'test-headline',
'pub_date': '1984-02-06',
'writer': six.text_type(w_royko.pk),
'article': 'Hello.'
}, instance=art)
self.assertEqual(f.errors, {})
self.assertEqual(f.is_valid(), True)
test_art = f.save()
self.assertEqual(test_art.id == art_id_1, True)
test_art = Article.objects.get(id=art_id_1)
self.assertEqual(test_art.headline, 'Test headline')
# You can create a form over a subset of the available fields
# by specifying a 'fields' argument to form_for_instance.
f = PartialArticleFormWithSlug({
'headline': 'New headline',
'slug': 'new-headline',
'pub_date': '1988-01-04'
}, auto_id=False, instance=art)
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>''')
self.assertEqual(f.is_valid(), True)
new_art = f.save()
self.assertEqual(new_art.id == art_id_1, True)
new_art = Article.objects.get(id=art_id_1)
self.assertEqual(new_art.headline, 'New headline')
# Add some categories and test the many-to-many form output.
self.assertQuerysetEqual(new_art.categories.all(), [])
new_art.categories.add(Category.objects.get(name='Entertainment'))
self.assertQuerysetEqual(new_art.categories.all(), ["Entertainment"])
f = TestArticleForm(auto_id=False, instance=new_art)
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>
<li>Writer: <select name="writer">
<option value="">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s" selected="selected">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article">Hello.</textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s" selected="selected">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (w_woodward.pk, w_royko.pk, c1.pk, c2.pk, c3.pk))
# Initial values can be provided for model forms
f = TestArticleForm(
auto_id=False,
initial={
'headline': 'Your headline here',
'categories': [str(c1.id), str(c2.id)]
})
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="Your headline here" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s" selected="selected">Entertainment</option>
<option value="%s" selected="selected">It's a test</option>
<option value="%s">Third test</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (w_woodward.pk, w_royko.pk, c1.pk, c2.pk, c3.pk))
f = TestArticleForm({
'headline': 'New headline',
'slug': 'new-headline',
'pub_date': '1988-01-04',
'writer': six.text_type(w_royko.pk),
'article': 'Hello.',
'categories': [six.text_type(c1.id), six.text_type(c2.id)]
}, instance=new_art)
new_art = f.save()
self.assertEqual(new_art.id == art_id_1, True)
new_art = Article.objects.get(id=art_id_1)
self.assertQuerysetEqual(new_art.categories.order_by('name'),
["Entertainment", "It's a test"])
# Now, submit form data with no categories. This deletes the existing categories.
f = TestArticleForm({'headline': 'New headline', 'slug': 'new-headline', 'pub_date': '1988-01-04',
'writer': six.text_type(w_royko.pk), 'article': 'Hello.'}, instance=new_art)
new_art = f.save()
self.assertEqual(new_art.id == art_id_1, True)
new_art = Article.objects.get(id=art_id_1)
self.assertQuerysetEqual(new_art.categories.all(), [])
# Create a new article, with categories, via the form.
f = ArticleForm({'headline': 'The walrus was Paul', 'slug': 'walrus-was-paul', 'pub_date': '1967-11-01',
'writer': six.text_type(w_royko.pk), 'article': 'Test.', 'categories': [six.text_type(c1.id), six.text_type(c2.id)]})
new_art = f.save()
art_id_2 = new_art.id
self.assertEqual(art_id_2 not in (None, art_id_1), True)
new_art = Article.objects.get(id=art_id_2)
self.assertQuerysetEqual(new_art.categories.order_by('name'), ["Entertainment", "It's a test"])
# Create a new article, with no categories, via the form.
f = ArticleForm({'headline': 'The walrus was Paul', 'slug': 'walrus-was-paul', 'pub_date': '1967-11-01',
'writer': six.text_type(w_royko.pk), 'article': 'Test.'})
new_art = f.save()
art_id_3 = new_art.id
self.assertEqual(art_id_3 not in (None, art_id_1, art_id_2), True)
new_art = Article.objects.get(id=art_id_3)
self.assertQuerysetEqual(new_art.categories.all(), [])
# Create a new article, with categories, via the form, but use commit=False.
# The m2m data won't be saved until save_m2m() is invoked on the form.
f = ArticleForm({'headline': 'The walrus was Paul', 'slug': 'walrus-was-paul', 'pub_date': '1967-11-01',
'writer': six.text_type(w_royko.pk), 'article': 'Test.', 'categories': [six.text_type(c1.id), six.text_type(c2.id)]})
new_art = f.save(commit=False)
# Manually save the instance
new_art.save()
art_id_4 = new_art.id
self.assertEqual(art_id_4 not in (None, art_id_1, art_id_2, art_id_3), True)
# The instance doesn't have m2m data yet
new_art = Article.objects.get(id=art_id_4)
self.assertQuerysetEqual(new_art.categories.all(), [])
# Save the m2m data on the form
f.save_m2m()
self.assertQuerysetEqual(new_art.categories.order_by('name'), ["Entertainment", "It's a test"])
# Here, we define a custom ModelForm. Because it happens to have the same fields as
# the Category model, we can just call the form's save() to apply its changes to an
# existing Category instance.
cat = Category.objects.get(name='Third test')
self.assertEqual(cat.name, "Third test")
self.assertEqual(cat.id == c3.id, True)
form = ShortCategory({'name': 'Third', 'slug': 'third', 'url': '3rd'}, instance=cat)
self.assertEqual(form.save().name, 'Third')
self.assertEqual(Category.objects.get(id=c3.id).name, 'Third')
# Here, we demonstrate that choices for a ForeignKey ChoiceField are determined
# at runtime, based on the data in the database when the form is displayed, not
# the data in the database when the form is instantiated.
f = ArticleForm(auto_id=False)
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (w_woodward.pk, w_royko.pk, c1.pk, c2.pk, c3.pk))
c4 = Category.objects.create(name='Fourth', url='4th')
self.assertEqual(c4.name, 'Fourth')
w_bernstein = Author.objects.create(name='Carl Bernstein')
self.assertEqual(w_bernstein.name, 'Carl Bernstein')
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Carl Bernstein</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third</option>
<option value="%s">Fourth</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (w_woodward.pk, w_bernstein.pk, w_royko.pk, c1.pk, c2.pk, c3.pk, c4.pk))
# ModelChoiceField ############################################################
f = forms.ModelChoiceField(Category.objects.all())
self.assertEqual(list(f.choices), [
('', '---------'),
(c1.pk, 'Entertainment'),
(c2.pk, "It's a test"),
(c3.pk, 'Third'),
(c4.pk, 'Fourth')])
self.assertEqual(5, len(f.choices))
with self.assertRaises(ValidationError):
f.clean('')
with self.assertRaises(ValidationError):
f.clean(None)
with self.assertRaises(ValidationError):
f.clean(0)
self.assertEqual(f.clean(c3.id).name, 'Third')
self.assertEqual(f.clean(c2.id).name, "It's a test")
# Add a Category object *after* the ModelChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
c5 = Category.objects.create(name='Fifth', url='5th')
self.assertEqual(c5.name, 'Fifth')
self.assertEqual(f.clean(c5.id).name, 'Fifth')
# Delete a Category object *after* the ModelChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
Category.objects.get(url='5th').delete()
with self.assertRaises(ValidationError):
f.clean(c5.id)
f = forms.ModelChoiceField(Category.objects.filter(pk=c1.id), required=False)
self.assertEqual(f.clean(''), None)
f.clean('')
self.assertEqual(f.clean(str(c1.id)).name, "Entertainment")
with self.assertRaises(ValidationError):
f.clean('100')
# queryset can be changed after the field is created.
f.queryset = Category.objects.exclude(name='Fourth')
self.assertEqual(list(f.choices), [
('', '---------'),
(c1.pk, 'Entertainment'),
(c2.pk, "It's a test"),
(c3.pk, 'Third')])
self.assertEqual(f.clean(c3.id).name, 'Third')
with self.assertRaises(ValidationError):
f.clean(c4.id)
# check that we can safely iterate choices repeatedly
gen_one = list(f.choices)
gen_two = f.choices
self.assertEqual(gen_one[2], (c2.pk, "It's a test"))
self.assertEqual(list(gen_two), [
('', '---------'),
(c1.pk, 'Entertainment'),
(c2.pk, "It's a test"),
(c3.pk, 'Third')])
# check that we can override the label_from_instance method to print custom labels (#4620)
f.queryset = Category.objects.all()
f.label_from_instance = lambda obj: "category " + str(obj)
self.assertEqual(list(f.choices), [
('', '---------'),
(c1.pk, 'category Entertainment'),
(c2.pk, "category It's a test"),
(c3.pk, 'category Third'),
(c4.pk, 'category Fourth')])
# ModelMultipleChoiceField ####################################################
f = forms.ModelMultipleChoiceField(Category.objects.all())
self.assertEqual(list(f.choices), [
(c1.pk, 'Entertainment'),
(c2.pk, "It's a test"),
(c3.pk, 'Third'),
(c4.pk, 'Fourth')])
with self.assertRaises(ValidationError):
f.clean(None)
with self.assertRaises(ValidationError):
f.clean([])
self.assertQuerysetEqual(f.clean([c1.id]), ["Entertainment"])
self.assertQuerysetEqual(f.clean([c2.id]), ["It's a test"])
self.assertQuerysetEqual(f.clean([str(c1.id)]), ["Entertainment"])
self.assertQuerysetEqual(f.clean([str(c1.id), str(c2.id)]), ["Entertainment", "It's a test"],
ordered=False)
self.assertQuerysetEqual(f.clean([c1.id, str(c2.id)]), ["Entertainment", "It's a test"],
ordered=False)
self.assertQuerysetEqual(f.clean((c1.id, str(c2.id))), ["Entertainment", "It's a test"],
ordered=False)
with self.assertRaises(ValidationError):
f.clean(['100'])
with self.assertRaises(ValidationError):
f.clean('hello')
with self.assertRaises(ValidationError):
f.clean(['fail'])
# Add a Category object *after* the ModelMultipleChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
# Note, we are using an id of 1006 here since tests that run before
# this may create categories with primary keys up to 6. Use
# a number that is will not conflict.
c6 = Category.objects.create(id=1006, name='Sixth', url='6th')
self.assertEqual(c6.name, 'Sixth')
self.assertQuerysetEqual(f.clean([c6.id]), ["Sixth"])
# Delete a Category object *after* the ModelMultipleChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
Category.objects.get(url='6th').delete()
with self.assertRaises(ValidationError):
f.clean([c6.id])
f = forms.ModelMultipleChoiceField(Category.objects.all(), required=False)
self.assertIsInstance(f.clean([]), EmptyQuerySet)
self.assertIsInstance(f.clean(()), EmptyQuerySet)
with self.assertRaises(ValidationError):
f.clean(['10'])
with self.assertRaises(ValidationError):
f.clean([str(c3.id), '10'])
with self.assertRaises(ValidationError):
f.clean([str(c1.id), '10'])
# queryset can be changed after the field is created.
f.queryset = Category.objects.exclude(name='Fourth')
self.assertEqual(list(f.choices), [
(c1.pk, 'Entertainment'),
(c2.pk, "It's a test"),
(c3.pk, 'Third')])
self.assertQuerysetEqual(f.clean([c3.id]), ["Third"])
with self.assertRaises(ValidationError):
f.clean([c4.id])
with self.assertRaises(ValidationError):
f.clean([str(c3.id), str(c4.id)])
f.queryset = Category.objects.all()
f.label_from_instance = lambda obj: "multicategory " + str(obj)
self.assertEqual(list(f.choices), [
(c1.pk, 'multicategory Entertainment'),
(c2.pk, "multicategory It's a test"),
(c3.pk, 'multicategory Third'),
(c4.pk, 'multicategory Fourth')])
# OneToOneField ###############################################################
self.assertEqual(list(ImprovedArticleForm.base_fields), ['article'])
self.assertEqual(list(ImprovedArticleWithParentLinkForm.base_fields), [])
bw = BetterAuthor(name='Joe Better', score=10)
bw.save()
self.assertEqual(sorted(model_to_dict(bw)),
['author_ptr', 'id', 'name', 'score'])
form = BetterAuthorForm({'name': 'Some Name', 'score': 12})
self.assertEqual(form.is_valid(), True)
bw2 = form.save()
bw2.delete()
form = AuthorProfileForm()
self.assertHTMLEqual(form.as_p(), '''<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Carl Bernstein</option>
<option value="%s">Joe Better</option>
<option value="%s">Mike Royko</option>
</select></p>
<p><label for="id_age">Age:</label> <input type="number" name="age" id="id_age" min="0" /></p>''' % (w_woodward.pk, w_bernstein.pk, bw.pk, w_royko.pk))
data = {
'writer': six.text_type(w_woodward.pk),
'age': '65',
}
form = AuthorProfileForm(data)
instance = form.save()
self.assertEqual(six.text_type(instance), 'Bob Woodward is 65')
form = AuthorProfileForm(instance=instance)
self.assertHTMLEqual(form.as_p(), '''<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer">
<option value="">---------</option>
<option value="%s" selected="selected">Bob Woodward</option>
<option value="%s">Carl Bernstein</option>
<option value="%s">Joe Better</option>
<option value="%s">Mike Royko</option>
</select></p>
<p><label for="id_age">Age:</label> <input type="number" name="age" value="65" id="id_age" min="0" /></p>''' % (w_woodward.pk, w_bernstein.pk, bw.pk, w_royko.pk))
def test_file_field(self):
# Test conditions when files is either not given or empty.
f = TextFileForm(data={'description': 'Assistance'})
self.assertEqual(f.is_valid(), False)
f = TextFileForm(data={'description': 'Assistance'}, files={})
self.assertEqual(f.is_valid(), False)
# Upload a file and ensure it all works as expected.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test1.txt', b'hello world')})
self.assertEqual(f.is_valid(), True)
self.assertEqual(type(f.cleaned_data['file']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
instance.file.delete()
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test1.txt', b'hello world')})
self.assertEqual(f.is_valid(), True)
self.assertEqual(type(f.cleaned_data['file']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
# Check if the max_length attribute has been inherited from the model.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test-maxlength.txt', b'hello world')})
self.assertEqual(f.is_valid(), False)
# Edit an instance that already has the file defined in the model. This will not
# save the file again, but leave it exactly as it is.
f = TextFileForm(
data={'description': 'Assistance'},
instance=instance)
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data['file'].name, 'tests/test1.txt')
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
# Override the file by uploading a new one.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test2.txt', b'hello world')}, instance=instance)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test2.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test2.txt', b'hello world')})
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test2.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
instance.delete()
# Test the non-required FileField
f = TextFileForm(data={'description': 'Assistance'})
f.fields['file'].required = False
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.file.name, '')
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test3.txt', b'hello world')}, instance=instance)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test3.txt')
# Instance can be edited w/out re-uploading the file and existing file should be preserved.
f = TextFileForm(
data={'description': 'New Description'},
instance=instance)
f.fields['file'].required = False
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.description, 'New Description')
self.assertEqual(instance.file.name, 'tests/test3.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
instance.delete()
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test3.txt', b'hello world')})
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test3.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
instance.delete()
def test_big_integer_field(self):
bif = BigIntForm({'biggie': '-9223372036854775808'})
self.assertEqual(bif.is_valid(), True)
bif = BigIntForm({'biggie': '-9223372036854775809'})
self.assertEqual(bif.is_valid(), False)
self.assertEqual(bif.errors, {'biggie': ['Ensure this value is greater than or equal to -9223372036854775808.']})
bif = BigIntForm({'biggie': '9223372036854775807'})
self.assertEqual(bif.is_valid(), True)
bif = BigIntForm({'biggie': '9223372036854775808'})
self.assertEqual(bif.is_valid(), False)
self.assertEqual(bif.errors, {'biggie': ['Ensure this value is less than or equal to 9223372036854775807.']})
@skipUnless(test_images, "PIL not installed")
def test_image_field(self):
# ImageField and FileField are nearly identical, but they differ slighty when
# it comes to validation. This specifically tests that #6302 is fixed for
# both file fields and image fields.
with open(os.path.join(os.path.dirname(upath(__file__)), "test.png"), 'rb') as fp:
image_data = fp.read()
with open(os.path.join(os.path.dirname(upath(__file__)), "test2.png"), 'rb') as fp:
image_data2 = fp.read()
f = ImageFileForm(
data={'description': 'An image'},
files={'image': SimpleUploadedFile('test.png', image_data)})
self.assertEqual(f.is_valid(), True)
self.assertEqual(type(f.cleaned_data['image']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
f = ImageFileForm(
data={'description': 'An image'},
files={'image': SimpleUploadedFile('test.png', image_data)})
self.assertEqual(f.is_valid(), True)
self.assertEqual(type(f.cleaned_data['image']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Edit an instance that already has the (required) image defined in the model. This will not
# save the image again, but leave it exactly as it is.
f = ImageFileForm(data={'description': 'Look, it changed'}, instance=instance)
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data['image'].name, 'tests/test.png')
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.height, 16)
self.assertEqual(instance.width, 16)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
# Override the file by uploading a new one.
f = ImageFileForm(
data={'description': 'Changed it'},
files={'image': SimpleUploadedFile('test2.png', image_data2)}, instance=instance)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test2.png')
self.assertEqual(instance.height, 32)
self.assertEqual(instance.width, 48)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
instance.delete()
f = ImageFileForm(
data={'description': 'Changed it'},
files={'image': SimpleUploadedFile('test2.png', image_data2)})
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test2.png')
self.assertEqual(instance.height, 32)
self.assertEqual(instance.width, 48)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
instance.delete()
# Test the non-required ImageField
# Note: In Oracle, we expect a null ImageField to return '' instead of
# None.
if connection.features.interprets_empty_strings_as_nulls:
expected_null_imagefield_repr = ''
else:
expected_null_imagefield_repr = None
f = OptionalImageFileForm(data={'description': 'Test'})
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, expected_null_imagefield_repr)
self.assertEqual(instance.width, None)
self.assertEqual(instance.height, None)
f = OptionalImageFileForm(
data={'description': 'And a final one'},
files={'image': SimpleUploadedFile('test3.png', image_data)}, instance=instance)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test3.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Editing the instance without re-uploading the image should not affect the image or its width/height properties
f = OptionalImageFileForm(
data={'description': 'New Description'},
instance=instance)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.description, 'New Description')
self.assertEqual(instance.image.name, 'tests/test3.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Delete the current file since this is not done by Django.
instance.image.delete()
instance.delete()
f = OptionalImageFileForm(
data={'description': 'And a final one'},
files={'image': SimpleUploadedFile('test4.png', image_data2)}
)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test4.png')
self.assertEqual(instance.width, 48)
self.assertEqual(instance.height, 32)
instance.delete()
# Test callable upload_to behavior that's dependent on the value of another field in the model
f = ImageFileForm(
data={'description': 'And a final one', 'path': 'foo'},
files={'image': SimpleUploadedFile('test4.png', image_data)})
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, 'foo/test4.png')
instance.delete()
def test_media_on_modelform(self):
# Similar to a regular Form class you can define custom media to be used on
# the ModelForm.
f = ModelFormWithMedia()
self.assertHTMLEqual(six.text_type(f.media), '''<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/form/javascript"></script>''')
f = CommaSeparatedIntegerForm({'field': '1,2,3'})
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data, {'field': '1,2,3'})
f = CommaSeparatedIntegerForm({'field': '1a,2'})
self.assertEqual(f.errors, {'field': ['Enter only digits separated by commas.']})
f = CommaSeparatedIntegerForm({'field': ',,,,'})
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data, {'field': ',,,,'})
f = CommaSeparatedIntegerForm({'field': '1.2'})
self.assertEqual(f.errors, {'field': ['Enter only digits separated by commas.']})
f = CommaSeparatedIntegerForm({'field': '1,a,2'})
self.assertEqual(f.errors, {'field': ['Enter only digits separated by commas.']})
f = CommaSeparatedIntegerForm({'field': '1,,2'})
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data, {'field': '1,,2'})
f = CommaSeparatedIntegerForm({'field': '1'})
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data, {'field': '1'})
# This Price instance generated by this form is not valid because the quantity
# field is required, but the form is valid because the field is excluded from
# the form. This is for backwards compatibility.
form = PriceFormWithoutQuantity({'price': '6.00'})
self.assertEqual(form.is_valid(), True)
price = form.save(commit=False)
with self.assertRaises(ValidationError):
price.full_clean()
# The form should not validate fields that it doesn't contain even if they are
# specified using 'fields', not 'exclude'.
class Meta:
model = Price
fields = ('price',)
form = PriceFormWithoutQuantity({'price': '6.00'})
self.assertEqual(form.is_valid(), True)
# The form should still have an instance of a model that is not complete and
# not saved into a DB yet.
self.assertEqual(form.instance.price, Decimal('6.00'))
self.assertEqual(form.instance.quantity is None, True)
self.assertEqual(form.instance.pk is None, True)
# Choices on CharField and IntegerField
f = ArticleForm()
with self.assertRaises(ValidationError):
f.fields['status'].clean('42')
f = ArticleStatusForm()
with self.assertRaises(ValidationError):
f.fields['status'].clean('z')
def test_foreignkeys_which_use_to_field(self):
apple = Inventory.objects.create(barcode=86, name='Apple')
pear = Inventory.objects.create(barcode=22, name='Pear')
core = Inventory.objects.create(barcode=87, name='Core', parent=apple)
field = forms.ModelChoiceField(Inventory.objects.all(), to_field_name='barcode')
self.assertEqual(tuple(field.choices), (
('', '---------'),
(86, 'Apple'),
(87, 'Core'),
(22, 'Pear')))
form = InventoryForm(instance=core)
self.assertHTMLEqual(six.text_type(form['parent']), '''<select name="parent" id="id_parent">
<option value="">---------</option>
<option value="86" selected="selected">Apple</option>
<option value="87">Core</option>
<option value="22">Pear</option>
</select>''')
data = model_to_dict(core)
data['parent'] = '22'
form = InventoryForm(data=data, instance=core)
core = form.save()
self.assertEqual(core.parent.name, 'Pear')
class CategoryForm(forms.ModelForm):
description = forms.CharField()
class Meta:
model = Category
fields = ['description', 'url']
self.assertEqual(list(CategoryForm.base_fields),
['description', 'url'])
self.assertHTMLEqual(six.text_type(CategoryForm()), '''<tr><th><label for="id_description">Description:</label></th><td><input type="text" name="description" id="id_description" /></td></tr>
<tr><th><label for="id_url">The URL:</label></th><td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>''')
# to_field_name should also work on ModelMultipleChoiceField ##################
field = forms.ModelMultipleChoiceField(Inventory.objects.all(), to_field_name='barcode')
self.assertEqual(tuple(field.choices), ((86, 'Apple'), (87, 'Core'), (22, 'Pear')))
self.assertQuerysetEqual(field.clean([86]), ['Apple'])
form = SelectInventoryForm({'items': [87, 22]})
self.assertEqual(form.is_valid(), True)
self.assertEqual(len(form.cleaned_data), 1)
self.assertQuerysetEqual(form.cleaned_data['items'], ['Core', 'Pear'])
def test_model_field_that_returns_none_to_exclude_itself_with_explicit_fields(self):
self.assertEqual(list(CustomFieldForExclusionForm.base_fields),
['name'])
self.assertHTMLEqual(six.text_type(CustomFieldForExclusionForm()),
'''<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="10" /></td></tr>''')
def test_iterable_model_m2m(self) :
colour = Colour.objects.create(name='Blue')
form = ColourfulItemForm()
self.maxDiff = 1024
self.assertHTMLEqual(
form.as_p(),
"""<p><label for="id_name">Name:</label> <input id="id_name" type="text" name="name" maxlength="50" /></p>
<p><label for="id_colours">Colours:</label> <select multiple="multiple" name="colours" id="id_colours">
<option value="%(blue_pk)s">Blue</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></p>"""
% {'blue_pk': colour.pk})
| bsd-3-clause |
Garvys/PingPongSkill | pingpongskill/text2num.py | 1 | 2798 | # This library is a simple implementation of a function to convert textual
# numbers written in English into their integer representations.
#
# This code is open source according to the MIT License as follows.
#
# Copyright (c) 2008 Greg Hewgill
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import re
Small = {
'zero': 0,
'one': 1,
'two': 2,
'three': 3,
'four': 4,
'five': 5,
'six': 6,
'seven': 7,
'eight': 8,
'nine': 9,
'ten': 10,
'eleven': 11,
'twelve': 12,
'thirteen': 13,
'fourteen': 14,
'fifteen': 15,
'sixteen': 16,
'seventeen': 17,
'eighteen': 18,
'nineteen': 19,
'twenty': 20,
'thirty': 30,
'forty': 40,
'fifty': 50,
'sixty': 60,
'seventy': 70,
'eighty': 80,
'ninety': 90
}
Magnitude = {
'thousand': 1000,
'million': 1000000,
'billion': 1000000000,
'trillion': 1000000000000,
'quadrillion': 1000000000000000,
'quintillion': 1000000000000000000,
'sextillion': 1000000000000000000000,
'septillion': 1000000000000000000000000,
'octillion': 1000000000000000000000000000,
'nonillion': 1000000000000000000000000000000,
'decillion': 1000000000000000000000000000000000,
}
class NumberException(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
def text2num(s):
a = re.split(r"[\s-]+", s)
n = 0
g = 0
for w in a:
x = Small.get(w, None)
if x is not None:
g += x
elif w == "hundred" and g != 0:
g *= 100
else:
x = Magnitude.get(w, None)
if x is not None:
n += g * x
g = 0
else:
raise NumberException("Unknown number: " + w)
return n + g
| mit |
adoosii/edx-platform | common/lib/xmodule/xmodule/seq_module.py | 11 | 12219 | """
xModule implementation of a learning sequence
"""
# pylint: disable=abstract-method
import json
import logging
import warnings
from lxml import etree
from xblock.core import XBlock
from xblock.fields import Integer, Scope, Boolean
from xblock.fragment import Fragment
from pkg_resources import resource_string
from .exceptions import NotFoundError
from .fields import Date
from .mako_module import MakoModuleDescriptor
from .progress import Progress
from .x_module import XModule, STUDENT_VIEW
from .xml_module import XmlDescriptor
log = logging.getLogger(__name__)
# HACK: This shouldn't be hard-coded to two types
# OBSOLETE: This obsoletes 'type'
class_priority = ['video', 'problem']
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
class SequenceFields(object):
has_children = True
# NOTE: Position is 1-indexed. This is silly, but there are now student
# positions saved on prod, so it's not easy to fix.
position = Integer(help="Last tab viewed in this sequence", scope=Scope.user_state)
due = Date(
display_name=_("Due Date"),
help=_("Enter the date by which problems are due."),
scope=Scope.settings,
)
# Entrance Exam flag -- see cms/contentstore/views/entrance_exam.py for usage
is_entrance_exam = Boolean(
display_name=_("Is Entrance Exam"),
help=_(
"Tag this course module as an Entrance Exam. "
"Note, you must enable Entrance Exams for this course setting to take effect."
),
default=False,
scope=Scope.content,
)
class ProctoringFields(object):
"""
Fields that are specific to Proctored or Timed Exams
"""
is_time_limited = Boolean(
display_name=_("Is Time Limited"),
help=_(
"This setting indicates whether students have a limited time"
" to view or interact with this courseware component."
),
default=False,
scope=Scope.settings,
)
default_time_limit_minutes = Integer(
display_name=_("Time Limit in Minutes"),
help=_(
"The number of minutes available to students for viewing or interacting with this courseware component."
),
default=None,
scope=Scope.settings,
)
is_proctored_enabled = Boolean(
display_name=_("Is Proctoring Enabled"),
help=_(
"This setting indicates whether this exam is a proctored exam."
),
default=False,
scope=Scope.settings,
)
is_practice_exam = Boolean(
display_name=_("Is Practice Exam"),
help=_(
"This setting indicates whether this exam is for testing purposes only. Practice exams are not verified."
),
default=False,
scope=Scope.settings,
)
@XBlock.wants('proctoring')
@XBlock.wants('credit')
class SequenceModule(SequenceFields, ProctoringFields, XModule):
''' Layout module which lays out content in a temporal sequence
'''
js = {
'coffee': [resource_string(__name__, 'js/src/sequence/display.coffee')],
'js': [resource_string(__name__, 'js/src/sequence/display/jquery.sequence.js')],
}
css = {
'scss': [resource_string(__name__, 'css/sequence/display.scss')],
}
js_module_name = "Sequence"
def __init__(self, *args, **kwargs):
super(SequenceModule, self).__init__(*args, **kwargs)
# If position is specified in system, then use that instead.
position = getattr(self.system, 'position', None)
if position is not None:
try:
self.position = int(self.system.position)
except (ValueError, TypeError):
# Check for https://openedx.atlassian.net/browse/LMS-6496
warnings.warn(
"Sequential position cannot be converted to an integer: {pos!r}".format(
pos=self.system.position,
),
RuntimeWarning,
)
def get_progress(self):
''' Return the total progress, adding total done and total available.
(assumes that each submodule uses the same "units" for progress.)
'''
# TODO: Cache progress or children array?
children = self.get_children()
progresses = [child.get_progress() for child in children]
progress = reduce(Progress.add_counts, progresses, None)
return progress
def handle_ajax(self, dispatch, data): # TODO: bounds checking
''' get = request.POST instance '''
if dispatch == 'goto_position':
# set position to default value if either 'position' argument not
# found in request or it is a non-positive integer
position = data.get('position', u'1')
if position.isdigit() and int(position) > 0:
self.position = int(position)
else:
self.position = 1
return json.dumps({'success': True})
raise NotFoundError('Unexpected dispatch type')
def student_view(self, context):
# If we're rendering this sequence, but no position is set yet,
# default the position to the first element
if self.position is None:
self.position = 1
## Returns a set of all types of all sub-children
contents = []
fragment = Fragment()
# Is this sequential part of a timed or proctored exam?
if self.is_time_limited:
view_html = self._time_limited_student_view(context)
# Do we have an alternate rendering
# from the edx_proctoring subsystem?
if view_html:
fragment.add_content(view_html)
return fragment
for child in self.get_display_items():
progress = child.get_progress()
rendered_child = child.render(STUDENT_VIEW, context)
fragment.add_frag_resources(rendered_child)
# `titles` is a list of titles to inject into the sequential tooltip display.
# We omit any blank titles to avoid blank lines in the tooltip display.
titles = [title.strip() for title in child.get_content_titles() if title.strip()]
childinfo = {
'content': rendered_child.content,
'title': "\n".join(titles),
'page_title': titles[0] if titles else '',
'progress_status': Progress.to_js_status_str(progress),
'progress_detail': Progress.to_js_detail_str(progress),
'type': child.get_icon_class(),
'id': child.scope_ids.usage_id.to_deprecated_string(),
}
if childinfo['title'] == '':
childinfo['title'] = child.display_name_with_default
contents.append(childinfo)
params = {
'items': contents,
'element_id': self.location.html_id(),
'item_id': self.location.to_deprecated_string(),
'position': self.position,
'tag': self.location.category,
'ajax_url': self.system.ajax_url,
}
fragment.add_content(self.system.render_template("seq_module.html", params))
return fragment
def _time_limited_student_view(self, context):
"""
Delegated rendering of a student view when in a time
limited view. This ultimately calls down into edx_proctoring
pip installed djangoapp
"""
# None = no overridden view rendering
view_html = None
proctoring_service = self.runtime.service(self, 'proctoring')
credit_service = self.runtime.service(self, 'credit')
# Is the feature turned on and do we have all required services
# Also, the ENABLE_PROCTORED_EXAMS feature flag must be set to
# True and the Sequence in question, should have the
# fields set to indicate this is a timed/proctored exam
feature_enabled = (
proctoring_service and
credit_service and
proctoring_service.is_feature_enabled()
)
if feature_enabled:
user_id = self.runtime.user_id
user_role_in_course = 'staff' if self.runtime.user_is_staff else 'student'
course_id = self.runtime.course_id
content_id = self.location
context = {
'display_name': self.display_name,
'default_time_limit_mins': (
self.default_time_limit_minutes if
self.default_time_limit_minutes else 0
),
'is_practice_exam': self.is_practice_exam
}
# inject the user's credit requirements and fulfillments
if credit_service:
credit_state = credit_service.get_credit_state(user_id, course_id)
if credit_state:
context.update({
'credit_state': credit_state
})
# See if the edx-proctoring subsystem wants to present
# a special view to the student rather
# than the actual sequence content
#
# This will return None if there is no
# overridden view to display given the
# current state of the user
view_html = proctoring_service.get_student_view(
user_id=user_id,
course_id=course_id,
content_id=content_id,
context=context,
user_role=user_role_in_course
)
return view_html
def get_icon_class(self):
child_classes = set(child.get_icon_class()
for child in self.get_children())
new_class = 'other'
for c in class_priority:
if c in child_classes:
new_class = c
return new_class
class SequenceDescriptor(SequenceFields, ProctoringFields, MakoModuleDescriptor, XmlDescriptor):
"""
A Sequences Descriptor object
"""
mako_template = 'widgets/sequence-edit.html'
module_class = SequenceModule
show_in_read_only_mode = True
js = {
'coffee': [resource_string(__name__, 'js/src/sequence/edit.coffee')],
}
js_module_name = "SequenceDescriptor"
@classmethod
def definition_from_xml(cls, xml_object, system):
children = []
for child in xml_object:
try:
child_block = system.process_xml(etree.tostring(child, encoding='unicode'))
children.append(child_block.scope_ids.usage_id)
except Exception as e:
log.exception("Unable to load child when parsing Sequence. Continuing...")
if system.error_tracker is not None:
system.error_tracker(u"ERROR: {0}".format(e))
continue
return {}, children
def definition_to_xml(self, resource_fs):
xml_object = etree.Element('sequential')
for child in self.get_children():
self.runtime.add_block_as_child_node(child, xml_object)
return xml_object
@property
def non_editable_metadata_fields(self):
"""
`is_entrance_exam` should not be editable in the Studio settings editor.
"""
non_editable_fields = super(SequenceDescriptor, self).non_editable_metadata_fields
non_editable_fields.append(self.fields['is_entrance_exam'])
return non_editable_fields
def index_dictionary(self):
"""
Return dictionary prepared with module content and type for indexing.
"""
# return key/value fields in a Python dict object
# values may be numeric / string or dict
# default implementation is an empty dict
xblock_body = super(SequenceDescriptor, self).index_dictionary()
html_body = {
"display_name": self.display_name,
}
if "content" in xblock_body:
xblock_body["content"].update(html_body)
else:
xblock_body["content"] = html_body
xblock_body["content_type"] = "Sequence"
return xblock_body
| agpl-3.0 |
AndroidOpenDevelopment/android_external_chromium_org | tools/telemetry/telemetry/core/platform/profiler/trace_profiler.py | 14 | 1966 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import codecs
from telemetry.core.platform import profiler
class TraceProfiler(profiler.Profiler):
def __init__(self, browser_backend, platform_backend, output_path, state,
categories=None):
super(TraceProfiler, self).__init__(
browser_backend, platform_backend, output_path, state)
assert self._browser_backend.supports_tracing
# We always want flow events when tracing via telemetry.
categories_with_flow = 'disabled-by-default-toplevel.flow'
if categories:
categories_with_flow = ',%s' % categories
self._browser_backend.StartTracing(categories_with_flow, timeout=10)
@classmethod
def name(cls):
return 'trace'
@classmethod
def is_supported(cls, browser_type):
return True
def CollectProfile(self):
print 'Processing trace...'
trace_result = self._browser_backend.StopTracing()
trace_file = '%s.json' % self._output_path
with codecs.open(trace_file, 'w', encoding='utf-8') as f:
trace_result.Serialize(f)
print 'Trace saved as %s' % trace_file
print 'To view, open in chrome://tracing'
return [trace_file]
class TraceDetailedProfiler(TraceProfiler):
def __init__(self, browser_backend, platform_backend, output_path, state):
super(TraceDetailedProfiler, self).__init__(
browser_backend, platform_backend, output_path, state,
categories='disabled-by-default-cc.debug*')
@classmethod
def name(cls):
return 'trace-detailed'
class TraceAllProfiler(TraceProfiler):
def __init__(self, browser_backend, platform_backend, output_path, state):
super(TraceAllProfiler, self).__init__(
browser_backend, platform_backend, output_path, state,
categories='disabled-by-default-*')
@classmethod
def name(cls):
return 'trace-all'
| bsd-3-clause |
Southpaw-TACTIC/TACTIC | src/context/client/pyasm/application/common/session_builder.py | 2 | 16991 | ###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['SessionBuilder']
import os, sys
from xml.dom.minidom import parseString
from .base_app_info import *
from .app_environment import AppEnvironment
from .application import AppException
class SessionBuilder(object):
'''builds a session from an execute xml document'''
def __init__(self):
self.env = AppEnvironment.get()
self.app = self.env.get_app()
self.node_data = {}
self.load_mode = ''
def get_tmpdir(self):
return self.env.get_tmpdir()
def get_sandbox_dir(self):
return self.env.get_sandbox_dir()
def import_file(self, node_name, path, instantiation='import'):
raise Exception("Implement import_file")
def import_anim(self, instance, path, created_node=""):
raise Exception("Implement import_anim")
def load_file(self, path, node_name):
self.app.load(path)
def check_existence(self, tactic_node_name):
''' check if this node exist '''
if not self.app.node_exists(tactic_node_name):
info = BaseAppInfo.get()
info.report_warning('opened node name missing', \
'[%s] cannot be found in the scene, which could '\
'cause invalid TacticNodeData.\n' %tactic_node_name)
def handle_mel(self, cmd_str):
cmds = cmd_str.split("\n")
for cmd in cmds:
if cmd == "":
continue
self.app.mel(cmd)
def publish_file(self, asset_code, node_name):
raise Exception("Implement publish_file")
def set_attr(self, node_name, node, current_node_name):
'''set attribute for the current app'''
attr = node.getAttribute("attr")
value = node.getAttribute("value")
attr_type = node.getAttribute("type")
self.app.set_attr(node_name,attr,value,attr_type)
def execute(self, xml):
'''executes a series of commands as dictated execute xml string'''
info = BaseAppInfo.get()
dom = parseString(xml)
root = dom.documentElement
nodes = root.childNodes
# initialize applicaton object
self.app.set_verbose()
current_path = None
current_node_name = None
current_node_naming = None
append_attr = False
for node in nodes:
node_name = node.nodeName
if node_name == "file":
url = node.getAttribute("url")
to = node.getAttribute("to")
md5_checksum = node.getAttribute("md5")
connection = node.getAttribute("connection")
# set the tactic asset directory
tactic_asset_dir = node.getAttribute("tactic_asset_dir")
if tactic_asset_dir:
tactic_asset_dir = str(tactic_asset_dir)
os.putenv("TACTIC_ASSET_DIR", tactic_asset_dir)
#os.environ["TACTIC_ASSET_DIR"] = tactic_asset_dir
if os.name =='nt' and not os.getenv("TACTIC_ASSET_DIR"):
Common.set_sys_env('TACTIC_ASSET_DIR', tactic_asset_dir)
# if the file is from the web, then download
if url.startswith("http://"):
current_path = self.env.download(url, to, md5_checksum)
elif connection == "perforce":
version = node.getAttribute("version")
from pyasm.application.perforce import Perforce
perforce = Perforce()
root = perforce.get_root()
url = "%s%s" % (root, url)
if version:
ret_val = perforce.sync("%s#%s" % (url,version))
else:
ret_val = perforce.sync(url)
current_path = url
else:
current_path = url
# set the sandbox directory
sandbox_dir = node.getAttribute("sandbox_dir")
self.env.set_sandbox_dir(sandbox_dir)
elif node_name == "reference":
namespace = node.getAttribute('namespace')
instance = node.getAttribute("instance")
asset_code = node.getAttribute("asset_code")
set = node.getAttribute("set")
tactic_node_name = node.getAttribute("node_name")
if not tactic_node_name:
tactic_node_name = asset_code
# build up the expected node name
if set:
node_name = set
else:
node_naming = self.app.get_node_naming()
node_naming.set_asset_code(asset_code)
node_naming.set_namespace(namespace)
node_name = node_naming.build_node_name()
# NOTE: this only works in some situations
unique = node.getAttribute("unique")
if unique == "true" and self.app.node_exists(node_name):
info.report_warning(node_name, \
"WARNING: Node [%s] already exists, skipping" % node_name, type='urgent')
current_node_naming = None
current_node_name = None
continue
# remember the real node naming
current_node_name = self.import_file(node_name, current_path, 'reference')
# check if tactic_node_name with namespace exists, if so, make it current
# NOTE: THIS IS HIGHLY MAYA SPECIFIC, comment it out
'''
tactic_node_name = '%s:%s' %(namespace, tactic_node_name)
if self.app.node_exists(tactic_node_name):
current_node_name = tactic_node_name
'''
current_node_naming = self.app.get_node_naming(current_node_name)
# tracks the state of this loading
self.load_mode = "reference"
elif node_name == "replace":
'''replace instance with the current file'''
namespace = node.getAttribute("namespace")
asset_code = node.getAttribute("asset_code")
set = node.getAttribute("set")
replacee = node.getAttribute('replacee')
if node.getAttribute('append_attr'):
append_attr = True
# build up the expected node name
if set:
node_name = set
elif replacee:
node_name = replacee
else:
node_naming = self.app.get_node_naming()
node_naming.set_asset_code(asset_code)
node_naming.set_namespace(namespace)
node_name = node_naming.build_node_name()
if not self.app.node_exists(node_name):
raise TacticException("Error: node [%s] does not exist in session" % node_name)
# remember the real node naming
rtn_path = self.app.replace_reference(node_name, current_path)
if rtn_path != current_path:
info.report_warning('Update failed', \
'[%s] failed to load and replace reference.\n' %current_path)
current_node_name = node_name
current_node_naming = self.app.get_node_naming(current_node_name)
elif node_name == "import" or node_name == "import_media":
instantiation_mode = node_name
namespace = node.getAttribute('namespace')
instance = node.getAttribute("instance")
asset_code = node.getAttribute("asset_code")
set = node.getAttribute("set")
is_shot = node.getAttribute("shot") == 'true'
use_namespace = node.getAttribute("use_namespace")
tactic_node_name = node.getAttribute("node_name")
if not tactic_node_name:
tactic_node_name = asset_code
# build up the expected node name
if set:
node_name = set
elif is_shot:
node_name = asset_code
else:
node_naming = self.app.get_node_naming()
node_naming.set_asset_code(asset_code)
node_naming.set_namespace(namespace)
node_name = node_naming.build_node_name()
unique = node.getAttribute("unique")
if unique == "true" and self.app.node_exists(node_name):
info.report_warning(node_name, 'Node [%s] already exists, skipping.' %node_name, type='urgent')
#print "WARNING: node '%s' already exists" % node_name
current_node_naming = None
node_name = None
continue
if use_namespace == "true":
use_namespace = True
# This causes double namespace, comment it out for now
#tactic_node_name = '%s:%s' %(namespace, tactic_node_name)
else:
use_namespace = False
# import the file
current_node_name = self.import_file(node_name, current_path, \
instantiation_mode, use_namespace=use_namespace)
# on very first checkin which uses plain asset code, this is not true
if self.app.node_exists(tactic_node_name):
current_node_name = tactic_node_name
self.app.message("current node name: %s" % current_node_name)
# remember the real node naming
current_node_naming = self.app.get_node_naming(current_node_name)
elif node_name == "open":
tactic_node_name = node.getAttribute("node_name")
asset_code = node.getAttribute("asset_code")
self.load_file(current_path, asset_code)
if tactic_node_name:
node_naming = self.app.get_node_naming()
node_naming.set_node_name(tactic_node_name)
else:
#namespace = node.getAttribute("namespace")
node_naming = self.app.get_node_naming()
node_naming.set_asset_code(asset_code)
# open mode has no namespace
#node_naming.set_namespace(namespace)
tactic_node_name = node_naming.build_node_name()
# set the user environment
sandbox_dir = self.get_sandbox_dir()
basename = os.path.basename(current_path)
self.app.set_user_environment(sandbox_dir, basename)
self.check_existence(tactic_node_name)
# remember the real node naming
current_node_name = tactic_node_name
current_node_naming = node_naming
elif node_name == "anim":
# always put the animation on the current instance. If there
# is no current instance, then it must be specified
orig_instance = node.getAttribute("instance")
asset_code = current_node_naming.get_asset_code()
node_naming = self.app.get_node_naming()
node_naming.set_instance(orig_instance)
node_naming.set_asset_code(asset_code)
node_name = node_naming.build_node_name()
self.import_anim(node_name, current_path, current_node_name)
elif node_name == "add_attr":
node_name = node.getAttribute("node")
snap_type = node.getAttribute("snapshot_type")
use_namespace = node.getAttribute("use_namespace")
if node_name == "":
node_name = current_node_name
elif node_name == "{top_node}":
node_name = current_node_name
elif use_namespace == "false":
pass
# shot snapshot uses the node_name as is with namespace probably
elif snap_type == 'shot' and self.load_mode != "reference":
pass
else:
# build the full node name
instance = ''
if current_node_naming:
instance = current_node_naming.get_instance()
node_naming = self.app.get_node_naming()
node_naming.set_instance(instance)
node_naming.set_asset_code(node_name)
node_name = node_naming.get_node_name()
attr = node.getAttribute("attr")
value = node.getAttribute("value")
attr_type = node.getAttribute("type")
try:
self.app.add_attr(node_name,attr,attr_type)
self.set_attr(node_name, node, current_node_name)
except AppException as e:
info.report_warning('MEL Script Error', str(e))
continue
elif node_name == "current_node":
asset_code = node.getAttribute("asset_code")
namespace = node.getAttribute("namespace")
node_naming = self.app.get_node_naming()
node_naming.set_namespace(namespace)
node_naming.set_asset_code(asset_code)
# if the current node is a set-type node, this
# current node_name is not really used, a node attribute
# will be specified instead for <set_node_attr/>
current_node_name = node_naming.get_node_name()
elif node_name == "set_node_attr":
node_name = node.getAttribute("node")
if node_name == "":
node_name = current_node_name
if not node_name:
continue
if self.node_data.has_key(node_name):
node_data = self.node_data[node_name]
else:
node_data = self.app.get_node_data(node_name)
# clears it for the first time if it is not in append_attr mode
# in case the user added some junk data in it
if not append_attr:
node_data.clear()
self.node_data[node_name] = node_data
name = node.getAttribute("name")
attr = node.getAttribute("attr")
value = node.getAttribute("value")
try:
node_data.set_attr(name,attr,value)
node_data.commit()
except AppException as e:
info.report_warning('MEL Script Set TacticNodedata Error', str(e))
continue
elif node_name == "add_node":
name = node.getAttribute("name")
type = node.getAttribute("type")
self.app.add_node(type, name)
elif node_name == "add_to_set":
set_name = node.getAttribute("set_name")
set_item = node.getAttribute("instance")
node_name = current_node_name
# use set_item if it is defined
if set_item:
node_name = set_item
self.app.create_set(set_name)
if node_name:
self.app.add_to_set(set_name, node_name)
elif node_name == "mel":
child = node.firstChild
cmd = child.nodeValue
self.handle_mel(cmd)
elif node_name == "save":
self.app.save_file()
elif node_name == "warning":
info = BaseAppInfo.get()
label = current_node_name
warning_label = node.getAttribute("label")
if warning_label:
label = warning_label
info.report_warning(label, '%s\n' %node.getAttribute("msg"))
# checkin/publish function
elif node_name == "publish":
code = node.getAttribute("asset_code")
node_name = node.getAttribute("node")
self.publish_file(code, node_name)
| epl-1.0 |
aljscott/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/models/test_failures.py | 123 | 8022 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import cPickle
from webkitpy.layout_tests.models import test_expectations
def is_reftest_failure(failure_list):
failure_types = [type(f) for f in failure_list]
return set((FailureReftestMismatch, FailureReftestMismatchDidNotOccur, FailureReftestNoImagesGenerated)).intersection(failure_types)
# FIXME: This is backwards. Each TestFailure subclass should know what
# test_expectation type it corresponds too. Then this method just
# collects them all from the failure list and returns the worst one.
def determine_result_type(failure_list):
"""Takes a set of test_failures and returns which result type best fits
the list of failures. "Best fits" means we use the worst type of failure.
Returns:
one of the test_expectations result types - PASS, FAIL, CRASH, etc."""
if not failure_list or len(failure_list) == 0:
return test_expectations.PASS
failure_types = [type(f) for f in failure_list]
if FailureCrash in failure_types:
return test_expectations.CRASH
elif FailureTimeout in failure_types:
return test_expectations.TIMEOUT
elif FailureEarlyExit in failure_types:
return test_expectations.SKIP
elif (FailureMissingResult in failure_types or
FailureMissingImage in failure_types or
FailureMissingImageHash in failure_types or
FailureMissingAudio in failure_types):
return test_expectations.MISSING
else:
is_text_failure = FailureTextMismatch in failure_types
is_image_failure = (FailureImageHashIncorrect in failure_types or
FailureImageHashMismatch in failure_types)
is_audio_failure = (FailureAudioMismatch in failure_types)
if is_text_failure and is_image_failure:
return test_expectations.IMAGE_PLUS_TEXT
elif is_text_failure:
return test_expectations.TEXT
elif is_image_failure or is_reftest_failure(failure_list):
return test_expectations.IMAGE
elif is_audio_failure:
return test_expectations.AUDIO
else:
raise ValueError("unclassifiable set of failures: "
+ str(failure_types))
class TestFailure(object):
"""Abstract base class that defines the failure interface."""
@staticmethod
def loads(s):
"""Creates a TestFailure object from the specified string."""
return cPickle.loads(s)
def message(self):
"""Returns a string describing the failure in more detail."""
raise NotImplementedError
def __eq__(self, other):
return self.__class__.__name__ == other.__class__.__name__
def __ne__(self, other):
return self.__class__.__name__ != other.__class__.__name__
def __hash__(self):
return hash(self.__class__.__name__)
def dumps(self):
"""Returns the string/JSON representation of a TestFailure."""
return cPickle.dumps(self)
def driver_needs_restart(self):
"""Returns True if we should kill DumpRenderTree/WebKitTestRunner before the next test."""
return False
class FailureTimeout(TestFailure):
def __init__(self, is_reftest=False):
super(FailureTimeout, self).__init__()
self.is_reftest = is_reftest
def message(self):
return "test timed out"
def driver_needs_restart(self):
return True
class FailureCrash(TestFailure):
def __init__(self, is_reftest=False, process_name='DumpRenderTree', pid=None):
super(FailureCrash, self).__init__()
self.process_name = process_name
self.pid = pid
self.is_reftest = is_reftest
def message(self):
if self.pid:
return "%s crashed [pid=%d]" % (self.process_name, self.pid)
return self.process_name + " crashed"
def driver_needs_restart(self):
return True
class FailureMissingResult(TestFailure):
def message(self):
return "-expected.txt was missing"
class FailureTextMismatch(TestFailure):
def message(self):
return "text diff"
class FailureMissingImageHash(TestFailure):
def message(self):
return "-expected.png was missing an embedded checksum"
class FailureMissingImage(TestFailure):
def message(self):
return "-expected.png was missing"
class FailureImageHashMismatch(TestFailure):
def __init__(self, diff_percent=0):
super(FailureImageHashMismatch, self).__init__()
self.diff_percent = diff_percent
def message(self):
return "image diff"
class FailureImageHashIncorrect(TestFailure):
def message(self):
return "-expected.png embedded checksum is incorrect"
class FailureReftestMismatch(TestFailure):
def __init__(self, reference_filename=None):
super(FailureReftestMismatch, self).__init__()
self.reference_filename = reference_filename
self.diff_percent = None
def message(self):
return "reference mismatch"
class FailureReftestMismatchDidNotOccur(TestFailure):
def __init__(self, reference_filename=None):
super(FailureReftestMismatchDidNotOccur, self).__init__()
self.reference_filename = reference_filename
def message(self):
return "reference mismatch didn't happen"
class FailureReftestNoImagesGenerated(TestFailure):
def __init__(self, reference_filename=None):
super(FailureReftestNoImagesGenerated, self).__init__()
self.reference_filename = reference_filename
def message(self):
return "reference didn't generate pixel results."
class FailureMissingAudio(TestFailure):
def message(self):
return "expected audio result was missing"
class FailureAudioMismatch(TestFailure):
def message(self):
return "audio mismatch"
class FailureEarlyExit(TestFailure):
def message(self):
return "skipped due to early exit"
# Convenient collection of all failure classes for anything that might
# need to enumerate over them all.
ALL_FAILURE_CLASSES = (FailureTimeout, FailureCrash, FailureMissingResult,
FailureTextMismatch, FailureMissingImageHash,
FailureMissingImage, FailureImageHashMismatch,
FailureImageHashIncorrect, FailureReftestMismatch,
FailureReftestMismatchDidNotOccur, FailureReftestNoImagesGenerated,
FailureMissingAudio, FailureAudioMismatch,
FailureEarlyExit)
| bsd-3-clause |
BlindHunter/django | django/utils/ipv6.py | 208 | 7967 | # This code was mostly based on ipaddr-py
# Copyright 2007 Google Inc. https://github.com/google/ipaddr-py
# Licensed under the Apache License, Version 2.0 (the "License").
from django.core.exceptions import ValidationError
from django.utils.six.moves import range
from django.utils.translation import ugettext_lazy as _
def clean_ipv6_address(ip_str, unpack_ipv4=False,
error_message=_("This is not a valid IPv6 address.")):
"""
Cleans an IPv6 address string.
Validity is checked by calling is_valid_ipv6_address() - if an
invalid address is passed, ValidationError is raised.
Replaces the longest continuous zero-sequence with "::" and
removes leading zeroes and makes sure all hextets are lowercase.
Args:
ip_str: A valid IPv6 address.
unpack_ipv4: if an IPv4-mapped address is found,
return the plain IPv4 address (default=False).
error_message: An error message used in the ValidationError.
Returns:
A compressed IPv6 address, or the same value
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
if not is_valid_ipv6_address(ip_str):
raise ValidationError(error_message, code='invalid')
# This algorithm can only handle fully exploded
# IP strings
ip_str = _explode_shorthand_ip_string(ip_str)
ip_str = _sanitize_ipv4_mapping(ip_str)
# If needed, unpack the IPv4 and return straight away
# - no need in running the rest of the algorithm
if unpack_ipv4:
ipv4_unpacked = _unpack_ipv4(ip_str)
if ipv4_unpacked:
return ipv4_unpacked
hextets = ip_str.split(":")
for index in range(len(hextets)):
# Remove leading zeroes
hextets[index] = hextets[index].lstrip('0')
if not hextets[index]:
hextets[index] = '0'
# Determine best hextet to compress
if hextets[index] == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
# Compress the most suitable hextet
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
result = ":".join(hextets)
return result.lower()
def _sanitize_ipv4_mapping(ip_str):
"""
Sanitize IPv4 mapping in an expanded IPv6 address.
This converts ::ffff:0a0a:0a0a to ::ffff:10.10.10.10.
If there is nothing to sanitize, returns an unchanged
string.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The sanitized output string, if applicable.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
# not an ipv4 mapping
return ip_str
hextets = ip_str.split(':')
if '.' in hextets[-1]:
# already sanitized
return ip_str
ipv4_address = "%d.%d.%d.%d" % (
int(hextets[6][0:2], 16),
int(hextets[6][2:4], 16),
int(hextets[7][0:2], 16),
int(hextets[7][2:4], 16),
)
result = ':'.join(hextets[0:6])
result += ':' + ipv4_address
return result
def _unpack_ipv4(ip_str):
"""
Unpack an IPv4 address that was mapped in a compressed IPv6 address.
This converts 0000:0000:0000:0000:0000:ffff:10.10.10.10 to 10.10.10.10.
If there is nothing to sanitize, returns None.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The unpacked IPv4 address, or None if there was nothing to unpack.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
return None
return ip_str.rsplit(':', 1)[1]
def is_valid_ipv6_address(ip_str):
"""
Ensure we have a valid IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if this is a valid IPv6 address.
"""
from django.core.validators import validate_ipv4_address
# We need to have at least one ':'.
if ':' not in ip_str:
return False
# We can only have one '::' shortener.
if ip_str.count('::') > 1:
return False
# '::' should be encompassed by start, digits or end.
if ':::' in ip_str:
return False
# A single colon can neither start nor end an address.
if ((ip_str.startswith(':') and not ip_str.startswith('::')) or
(ip_str.endswith(':') and not ip_str.endswith('::'))):
return False
# We can never have more than 7 ':' (1::2:3:4:5:6:7:8 is invalid)
if ip_str.count(':') > 7:
return False
# If we have no concatenation, we need to have 8 fields with 7 ':'.
if '::' not in ip_str and ip_str.count(':') != 7:
# We might have an IPv4 mapped address.
if ip_str.count('.') != 3:
return False
ip_str = _explode_shorthand_ip_string(ip_str)
# Now that we have that all squared away, let's check that each of the
# hextets are between 0x0 and 0xFFFF.
for hextet in ip_str.split(':'):
if hextet.count('.') == 3:
# If we have an IPv4 mapped address, the IPv4 portion has to
# be at the end of the IPv6 portion.
if not ip_str.split(':')[-1] == hextet:
return False
try:
validate_ipv4_address(hextet)
except ValidationError:
return False
else:
try:
# a value error here means that we got a bad hextet,
# something like 0xzzzz
if int(hextet, 16) < 0x0 or int(hextet, 16) > 0xFFFF:
return False
except ValueError:
return False
return True
def _explode_shorthand_ip_string(ip_str):
"""
Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if not _is_shorthand_ip(ip_str):
# We've already got a longhand ip_str.
return ip_str
new_ip = []
hextet = ip_str.split('::')
# If there is a ::, we need to expand it with zeroes
# to get to 8 hextets - unless there is a dot in the last hextet,
# meaning we're doing v4-mapping
if '.' in ip_str.split(':')[-1]:
fill_to = 7
else:
fill_to = 8
if len(hextet) > 1:
sep = len(hextet[0].split(':')) + len(hextet[1].split(':'))
new_ip = hextet[0].split(':')
for __ in range(fill_to - sep):
new_ip.append('0000')
new_ip += hextet[1].split(':')
else:
new_ip = ip_str.split(':')
# Now need to make sure every hextet is 4 lower case characters.
# If a hextet is < 4 characters, we've got missing leading 0's.
ret_ip = []
for hextet in new_ip:
ret_ip.append(('0' * (4 - len(hextet)) + hextet).lower())
return ':'.join(ret_ip)
def _is_shorthand_ip(ip_str):
"""Determine if the address is shortened.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if the address is shortened.
"""
if ip_str.count('::') == 1:
return True
if any(len(x) < 4 for x in ip_str.split(':')):
return True
return False
| bsd-3-clause |
mverrilli/kombu | kombu/tests/test_common.py | 5 | 13802 | from __future__ import absolute_import
import socket
from amqp import RecoverableConnectionError
from kombu import common
from kombu.common import (
Broadcast, maybe_declare,
send_reply, collect_replies,
declaration_cached, ignore_errors,
QoS, PREFETCH_COUNT_MAX,
)
from .case import Case, ContextMock, Mock, MockPool, patch
class test_ignore_errors(Case):
def test_ignored(self):
connection = Mock()
connection.channel_errors = (KeyError,)
connection.connection_errors = (KeyError,)
with ignore_errors(connection):
raise KeyError()
def raising():
raise KeyError()
ignore_errors(connection, raising)
connection.channel_errors = connection.connection_errors = \
()
with self.assertRaises(KeyError):
with ignore_errors(connection):
raise KeyError()
class test_declaration_cached(Case):
def test_when_cached(self):
chan = Mock()
chan.connection.client.declared_entities = ['foo']
self.assertTrue(declaration_cached('foo', chan))
def test_when_not_cached(self):
chan = Mock()
chan.connection.client.declared_entities = ['bar']
self.assertFalse(declaration_cached('foo', chan))
class test_Broadcast(Case):
def test_arguments(self):
q = Broadcast(name='test_Broadcast')
self.assertTrue(q.name.startswith('bcast.'))
self.assertEqual(q.alias, 'test_Broadcast')
self.assertTrue(q.auto_delete)
self.assertEqual(q.exchange.name, 'test_Broadcast')
self.assertEqual(q.exchange.type, 'fanout')
q = Broadcast('test_Broadcast', 'explicit_queue_name')
self.assertEqual(q.name, 'explicit_queue_name')
self.assertEqual(q.exchange.name, 'test_Broadcast')
class test_maybe_declare(Case):
def test_cacheable(self):
channel = Mock()
client = channel.connection.client = Mock()
client.declared_entities = set()
entity = Mock()
entity.can_cache_declaration = True
entity.auto_delete = False
entity.is_bound = True
entity.channel = channel
maybe_declare(entity, channel)
self.assertEqual(entity.declare.call_count, 1)
self.assertIn(
hash(entity), channel.connection.client.declared_entities,
)
maybe_declare(entity, channel)
self.assertEqual(entity.declare.call_count, 1)
entity.channel.connection = None
with self.assertRaises(RecoverableConnectionError):
maybe_declare(entity)
def test_binds_entities(self):
channel = Mock()
channel.connection.client.declared_entities = set()
entity = Mock()
entity.can_cache_declaration = True
entity.is_bound = False
entity.bind.return_value = entity
entity.bind.return_value.channel = channel
maybe_declare(entity, channel)
entity.bind.assert_called_with(channel)
def test_with_retry(self):
channel = Mock()
client = channel.connection.client = Mock()
client.declared_entities = set()
entity = Mock()
entity.can_cache_declaration = True
entity.is_bound = True
entity.channel = channel
maybe_declare(entity, channel, retry=True)
self.assertTrue(channel.connection.client.ensure.call_count)
class test_replies(Case):
def test_send_reply(self):
req = Mock()
req.content_type = 'application/json'
req.content_encoding = 'binary'
req.properties = {'reply_to': 'hello',
'correlation_id': 'world'}
channel = Mock()
exchange = Mock()
exchange.is_bound = True
exchange.channel = channel
producer = Mock()
producer.channel = channel
producer.channel.connection.client.declared_entities = set()
send_reply(exchange, req, {'hello': 'world'}, producer)
self.assertTrue(producer.publish.call_count)
args = producer.publish.call_args
self.assertDictEqual(args[0][0], {'hello': 'world'})
self.assertDictEqual(args[1], {'exchange': exchange,
'routing_key': 'hello',
'correlation_id': 'world',
'serializer': 'json',
'retry': False,
'retry_policy': None,
'content_encoding': 'binary'})
@patch('kombu.common.itermessages')
def test_collect_replies_with_ack(self, itermessages):
conn, channel, queue = Mock(), Mock(), Mock()
body, message = Mock(), Mock()
itermessages.return_value = [(body, message)]
it = collect_replies(conn, channel, queue, no_ack=False)
m = next(it)
self.assertIs(m, body)
itermessages.assert_called_with(conn, channel, queue, no_ack=False)
message.ack.assert_called_with()
with self.assertRaises(StopIteration):
next(it)
channel.after_reply_message_received.assert_called_with(queue.name)
@patch('kombu.common.itermessages')
def test_collect_replies_no_ack(self, itermessages):
conn, channel, queue = Mock(), Mock(), Mock()
body, message = Mock(), Mock()
itermessages.return_value = [(body, message)]
it = collect_replies(conn, channel, queue)
m = next(it)
self.assertIs(m, body)
itermessages.assert_called_with(conn, channel, queue, no_ack=True)
self.assertFalse(message.ack.called)
@patch('kombu.common.itermessages')
def test_collect_replies_no_replies(self, itermessages):
conn, channel, queue = Mock(), Mock(), Mock()
itermessages.return_value = []
it = collect_replies(conn, channel, queue)
with self.assertRaises(StopIteration):
next(it)
self.assertFalse(channel.after_reply_message_received.called)
class test_insured(Case):
@patch('kombu.common.logger')
def test_ensure_errback(self, logger):
common._ensure_errback('foo', 30)
self.assertTrue(logger.error.called)
def test_revive_connection(self):
on_revive = Mock()
channel = Mock()
common.revive_connection(Mock(), channel, on_revive)
on_revive.assert_called_with(channel)
common.revive_connection(Mock(), channel, None)
def get_insured_mocks(self, insured_returns=('works', 'ignored')):
conn = ContextMock()
pool = MockPool(conn)
fun = Mock()
insured = conn.autoretry.return_value = Mock()
insured.return_value = insured_returns
return conn, pool, fun, insured
def test_insured(self):
conn, pool, fun, insured = self.get_insured_mocks()
ret = common.insured(pool, fun, (2, 2), {'foo': 'bar'})
self.assertEqual(ret, 'works')
conn.ensure_connection.assert_called_with(
errback=common._ensure_errback,
)
self.assertTrue(insured.called)
i_args, i_kwargs = insured.call_args
self.assertTupleEqual(i_args, (2, 2))
self.assertDictEqual(i_kwargs, {'foo': 'bar',
'connection': conn})
self.assertTrue(conn.autoretry.called)
ar_args, ar_kwargs = conn.autoretry.call_args
self.assertTupleEqual(ar_args, (fun, conn.default_channel))
self.assertTrue(ar_kwargs.get('on_revive'))
self.assertTrue(ar_kwargs.get('errback'))
def test_insured_custom_errback(self):
conn, pool, fun, insured = self.get_insured_mocks()
custom_errback = Mock()
common.insured(pool, fun, (2, 2), {'foo': 'bar'},
errback=custom_errback)
conn.ensure_connection.assert_called_with(errback=custom_errback)
class MockConsumer(object):
consumers = set()
def __init__(self, channel, queues=None, callbacks=None, **kwargs):
self.channel = channel
self.queues = queues
self.callbacks = callbacks
def __enter__(self):
self.consumers.add(self)
return self
def __exit__(self, *exc_info):
self.consumers.discard(self)
class test_itermessages(Case):
class MockConnection(object):
should_raise_timeout = False
def drain_events(self, **kwargs):
if self.should_raise_timeout:
raise socket.timeout()
for consumer in MockConsumer.consumers:
for callback in consumer.callbacks:
callback('body', 'message')
def test_default(self):
conn = self.MockConnection()
channel = Mock()
channel.connection.client = conn
conn.Consumer = MockConsumer
it = common.itermessages(conn, channel, 'q', limit=1)
ret = next(it)
self.assertTupleEqual(ret, ('body', 'message'))
with self.assertRaises(StopIteration):
next(it)
def test_when_raises_socket_timeout(self):
conn = self.MockConnection()
conn.should_raise_timeout = True
channel = Mock()
channel.connection.client = conn
conn.Consumer = MockConsumer
it = common.itermessages(conn, channel, 'q', limit=1)
with self.assertRaises(StopIteration):
next(it)
@patch('kombu.common.deque')
def test_when_raises_IndexError(self, deque):
deque_instance = deque.return_value = Mock()
deque_instance.popleft.side_effect = IndexError()
conn = self.MockConnection()
channel = Mock()
conn.Consumer = MockConsumer
it = common.itermessages(conn, channel, 'q', limit=1)
with self.assertRaises(StopIteration):
next(it)
class test_QoS(Case):
class _QoS(QoS):
def __init__(self, value):
self.value = value
QoS.__init__(self, None, value)
def set(self, value):
return value
def test_qos_exceeds_16bit(self):
with patch('kombu.common.logger') as logger:
callback = Mock()
qos = QoS(callback, 10)
qos.prev = 100
# cannot use 2 ** 32 because of a bug on OSX Py2.5:
# https://jira.mongodb.org/browse/PYTHON-389
qos.set(4294967296)
self.assertTrue(logger.warn.called)
callback.assert_called_with(prefetch_count=0)
def test_qos_increment_decrement(self):
qos = self._QoS(10)
self.assertEqual(qos.increment_eventually(), 11)
self.assertEqual(qos.increment_eventually(3), 14)
self.assertEqual(qos.increment_eventually(-30), 14)
self.assertEqual(qos.decrement_eventually(7), 7)
self.assertEqual(qos.decrement_eventually(), 6)
def test_qos_disabled_increment_decrement(self):
qos = self._QoS(0)
self.assertEqual(qos.increment_eventually(), 0)
self.assertEqual(qos.increment_eventually(3), 0)
self.assertEqual(qos.increment_eventually(-30), 0)
self.assertEqual(qos.decrement_eventually(7), 0)
self.assertEqual(qos.decrement_eventually(), 0)
self.assertEqual(qos.decrement_eventually(10), 0)
def test_qos_thread_safe(self):
qos = self._QoS(10)
def add():
for i in range(1000):
qos.increment_eventually()
def sub():
for i in range(1000):
qos.decrement_eventually()
def threaded(funs):
from threading import Thread
threads = [Thread(target=fun) for fun in funs]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
threaded([add, add])
self.assertEqual(qos.value, 2010)
qos.value = 1000
threaded([add, sub]) # n = 2
self.assertEqual(qos.value, 1000)
def test_exceeds_short(self):
qos = QoS(Mock(), PREFETCH_COUNT_MAX - 1)
qos.update()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX - 1)
qos.increment_eventually()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX)
qos.increment_eventually()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX + 1)
qos.decrement_eventually()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX)
qos.decrement_eventually()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX - 1)
def test_consumer_increment_decrement(self):
mconsumer = Mock()
qos = QoS(mconsumer.qos, 10)
qos.update()
self.assertEqual(qos.value, 10)
mconsumer.qos.assert_called_with(prefetch_count=10)
qos.decrement_eventually()
qos.update()
self.assertEqual(qos.value, 9)
mconsumer.qos.assert_called_with(prefetch_count=9)
qos.decrement_eventually()
self.assertEqual(qos.value, 8)
mconsumer.qos.assert_called_with(prefetch_count=9)
self.assertIn({'prefetch_count': 9}, mconsumer.qos.call_args)
# Does not decrement 0 value
qos.value = 0
qos.decrement_eventually()
self.assertEqual(qos.value, 0)
qos.increment_eventually()
self.assertEqual(qos.value, 0)
def test_consumer_decrement_eventually(self):
mconsumer = Mock()
qos = QoS(mconsumer.qos, 10)
qos.decrement_eventually()
self.assertEqual(qos.value, 9)
qos.value = 0
qos.decrement_eventually()
self.assertEqual(qos.value, 0)
def test_set(self):
mconsumer = Mock()
qos = QoS(mconsumer.qos, 10)
qos.set(12)
self.assertEqual(qos.prev, 12)
qos.set(qos.prev)
| bsd-3-clause |
Boussadia/weboob | weboob/applications/qvideoob/qvideoob.py | 2 | 1602 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.video import ICapVideo
from weboob.tools.application.qt import QtApplication
from .main_window import MainWindow
class QVideoob(QtApplication):
APPNAME = 'qvideoob'
VERSION = '0.i'
COPYRIGHT = 'Copyright(C) 2010-2011 Romain Bignon'
DESCRIPTION = "Qt application allowing to search videos on various websites and play them."
SHORT_DESCRIPTION = "search and play videos"
CAPS = ICapVideo
CONFIG = {'settings': {'nsfw': 1,
'sfw': 1,
'sortby': 0,
'backend': ''
}
}
def main(self, argv):
self.load_backends(ICapVideo)
self.load_config()
self.main_window = MainWindow(self.config, self.weboob, self)
self.main_window.show()
return self.weboob.loop()
| agpl-3.0 |
olasitarska/django | django/contrib/auth/tests/custom_user.py | 37 | 6464 | from django.db import models
from django.contrib.auth.models import (
BaseUserManager,
AbstractBaseUser,
AbstractUser,
UserManager,
PermissionsMixin,
Group,
Permission,
)
# The custom User uses email as the unique identifier, and requires
# that every user provide a date of birth. This lets us test
# changes in username datatype, and non-text required fields.
class CustomUserManager(BaseUserManager):
def create_user(self, email, date_of_birth, password=None):
"""
Creates and saves a User with the given email and password.
"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
date_of_birth=date_of_birth,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password, date_of_birth):
u = self.create_user(email, password=password, date_of_birth=date_of_birth)
u.is_admin = True
u.save(using=self._db)
return u
class CustomUserWithFKManager(BaseUserManager):
def create_superuser(self, username, email, group, password):
user = self.model(username_id=username, email_id=email, group_id=group)
user.set_password(password)
user.save(using=self._db)
return user
class Email(models.Model):
email = models.EmailField(verbose_name='email address', max_length=255, unique=True)
class CustomUser(AbstractBaseUser):
email = models.EmailField(verbose_name='email address', max_length=255, unique=True)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
date_of_birth = models.DateField()
custom_objects = CustomUserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['date_of_birth']
class Meta:
app_label = 'auth'
def get_full_name(self):
return self.email
def get_short_name(self):
return self.email
def __unicode__(self):
return self.email
# Maybe required?
def get_group_permissions(self, obj=None):
return set()
def get_all_permissions(self, obj=None):
return set()
def has_perm(self, perm, obj=None):
return True
def has_perms(self, perm_list, obj=None):
return True
def has_module_perms(self, app_label):
return True
# Admin required fields
@property
def is_staff(self):
return self.is_admin
class CustomUserWithFK(AbstractBaseUser):
username = models.ForeignKey(Email, related_name='primary')
email = models.ForeignKey(Email, to_field='email', related_name='secondary')
group = models.ForeignKey(Group)
custom_objects = CustomUserWithFKManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email', 'group']
class Meta:
app_label = 'auth'
# At this point, temporarily remove the groups and user_permissions M2M
# fields from the AbstractUser class, so they don't clash with the related_name
# that sets.
old_au_local_m2m = AbstractUser._meta.local_many_to_many
old_pm_local_m2m = PermissionsMixin._meta.local_many_to_many
groups = models.ManyToManyField(Group, blank=True)
groups.contribute_to_class(PermissionsMixin, "groups")
user_permissions = models.ManyToManyField(Permission, blank=True)
user_permissions.contribute_to_class(PermissionsMixin, "user_permissions")
PermissionsMixin._meta.local_many_to_many = [groups, user_permissions]
AbstractUser._meta.local_many_to_many = [groups, user_permissions]
# The extension user is a simple extension of the built-in user class,
# adding a required date_of_birth field. This allows us to check for
# any hard references to the name "User" in forms/handlers etc.
class ExtensionUser(AbstractUser):
date_of_birth = models.DateField()
custom_objects = UserManager()
REQUIRED_FIELDS = AbstractUser.REQUIRED_FIELDS + ['date_of_birth']
class Meta:
app_label = 'auth'
# The CustomPermissionsUser users email as the identifier, but uses the normal
# Django permissions model. This allows us to check that the PermissionsMixin
# includes everything that is needed to interact with the ModelBackend.
class CustomPermissionsUserManager(CustomUserManager):
def create_superuser(self, email, password, date_of_birth):
u = self.create_user(email, password=password, date_of_birth=date_of_birth)
u.is_superuser = True
u.save(using=self._db)
return u
class CustomPermissionsUser(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(verbose_name='email address', max_length=255, unique=True)
date_of_birth = models.DateField()
custom_objects = CustomPermissionsUserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['date_of_birth']
class Meta:
app_label = 'auth'
def get_full_name(self):
return self.email
def get_short_name(self):
return self.email
def __unicode__(self):
return self.email
class IsActiveTestUser1(AbstractBaseUser):
"""
This test user class and derivatives test the default is_active behavior
"""
username = models.CharField(max_length=30, unique=True)
custom_objects = BaseUserManager()
USERNAME_FIELD = 'username'
class Meta:
app_label = 'auth'
# the is_active attr is provided by AbstractBaseUser
class CustomUserNonUniqueUsername(AbstractBaseUser):
"A user with a non-unique username"
username = models.CharField(max_length=30)
USERNAME_FIELD = 'username'
class Meta:
app_label = 'auth'
class CustomUserNonListRequiredFields(AbstractBaseUser):
"A user with a non-list REQUIRED_FIELDS"
username = models.CharField(max_length=30, unique=True)
date_of_birth = models.DateField()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = 'date_of_birth'
class Meta:
app_label = 'auth'
class CustomUserBadRequiredFields(AbstractBaseUser):
"A user with a non-unique username"
username = models.CharField(max_length=30, unique=True)
date_of_birth = models.DateField()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['username', 'date_of_birth']
class Meta:
app_label = 'auth'
# Undo swap hack
AbstractUser._meta.local_many_to_many = old_au_local_m2m
PermissionsMixin._meta.local_many_to_many = old_pm_local_m2m
| bsd-3-clause |
matthewrmshin/rose | metomi/rose/env.py | 4 | 5952 | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright (C) 2012-2019 British Crown (Met Office) & Contributors.
#
# This file is part of Rose, a framework for meteorological suites.
#
# Rose is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Rose is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rose. If not, see <http://www.gnu.org/licenses/>.
# ----------------------------------------------------------------------------
"""Environment variable substitution in strings.
Note: os.path.expandvars(path) does not work correctly because unbound
environment variables are left unchanged.
"""
import os
import re
from metomi.rose.reporter import Event
# _RE_DEFAULT = re.compile(r"""
# \A # start
# (?P<head>.*?) # shortest of anything
# (?P<escape>\\*) # escapes
# (?P<symbol> # start symbol
# \$ # variable sigil, dollar
# (?P<brace_open>\{)? # brace open, optional
# (?P<name>[A-z_]\w*) # variable name
# (?(brace_open)\}) # brace close, if brace_open
# ) # end symbol
# (?P<tail>.*) # rest of string
# \Z # end
# """, re.M | re.S | re.X)
_RE_DEFAULT = re.compile(
r"\A"
r"(?P<head>.*?)"
r"(?P<escape>\\*)"
r"(?P<symbol>"
r"\$"
r"(?P<brace_open>\{)?"
r"(?P<name>[A-z_]\w*)"
r"(?(brace_open)\})"
r")"
r"(?P<tail>.*)"
r"\Z",
re.M | re.S)
# _RE_BRACE = re.compile(r"""
# \A # start
# (?P<head>.*?) # shortest of anything
# (?P<escape>\\*) # escapes
# (?P<symbol>\$\{ # start symbol ${
# (?P<name>[A-z_]\w*) # variable name
# \}) # } end symbol
# (?P<tail>.*) # rest of string
# \Z # end
# """, re.M | re.S | re.X)
_RE_BRACE = re.compile(
r"\A"
r"(?P<head>.*?)"
r"(?P<escape>\\*)"
r"(?P<symbol>\$\{"
r"(?P<name>[A-z_]\w*)"
r"\})"
r"(?P<tail>.*)"
r"\Z",
re.M | re.S)
_MATCH_MODES = {"brace": _RE_BRACE,
"default": _RE_DEFAULT,
None: _RE_DEFAULT}
_EXPORTED_ENVS = {}
class EnvExportEvent(Event):
"""Event raised when an environment variable is exported."""
RE_SHELL_ESCAPE = re.compile(r"([\"'\s])")
def __str__(self):
key, value = self.args
return "export %s=%s" % (key, self.RE_SHELL_ESCAPE.sub(r"\\\1", value))
class UnboundEnvironmentVariableError(Exception):
"""An error raised on attempt to substitute an unbound variable."""
def __repr__(self):
return "[UNDEFINED ENVIRONMENT VARIABLE] %s" % (self.args)
__str__ = __repr__
def env_export(key, value, event_handler=None):
"""Export an environment variable."""
if key not in _EXPORTED_ENVS or os.environ.get(key) != value:
# N.B. Should be safe, because the list of environment variables is
# normally quite small.
_EXPORTED_ENVS[key] = value
os.environb[key.encode('UTF-8')] = value.encode('UTF-8')
if callable(event_handler):
event_handler(EnvExportEvent(key, value))
def env_var_escape(text, match_mode=None):
"""Escape $NAME and ${NAME} syntax in "text"."""
ret = ""
tail = text
while tail:
match = _MATCH_MODES[match_mode].match(tail)
if match:
groups = match.groupdict()
ret += (groups["head"] + groups["escape"] * 2 + "\\" +
groups["symbol"])
tail = groups["tail"]
else:
ret += tail
tail = ""
return ret
def env_var_process(text, unbound=None, match_mode=None):
"""Substitute environment variables into a string.
For each $NAME and ${NAME} in "text", substitute with the value
of the environment variable NAME. If NAME is not defined in the
environment and "unbound" is None, raise an
UnboundEnvironmentVariableError. If NAME is not defined in the
environment and "unbound" is not None, substitute NAME with the
value of "unbound".
"""
ret = ""
try:
tail = text.decode()
except AttributeError:
tail = text
while tail:
match = _MATCH_MODES[match_mode].match(tail)
if match:
groups = match.groupdict()
substitute = groups["symbol"]
if len(groups["escape"]) % 2 == 0:
if groups["name"] in os.environ:
substitute = os.environ[groups["name"]]
elif unbound is not None:
substitute = str(unbound)
else:
raise UnboundEnvironmentVariableError(groups["name"])
ret += (groups["head"] +
groups["escape"][0:len(groups["escape"]) // 2] +
substitute)
tail = groups["tail"]
else:
ret += tail
tail = ""
return ret
def contains_env_var(text, match_mode=None):
"""Check if a string contains unescaped $NAME and/or ${NAME} syntax."""
match = _MATCH_MODES[match_mode].match(text)
return (match and len(match.groupdict()["escape"]) % 2 == 0)
| gpl-3.0 |
Kevin-Roberts/coinbase_trader | trader.py | 1 | 16564 | """
Coinbase Python Client Library
AUTHOR
Kevin Roberts
Github: Kevin-Roberts
Started on: 12-11-2013
LICENSE (The MIT License)
Copyright (c) 2013 Kevin Roberts "kr0b1486@hotmail.com"
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
__author__ = 'Kevin-Roberts'
import sys
from coinbase import CoinbaseAccount
from coinbase.models import CoinbaseTransfer, CoinbaseError
import oauth2client # Maybe someone can test if I need this or not
import threading
from datetime import datetime
import time
MARKET_BUY = "MarketBuy"
MARKET_SELL = "MarketSell"
LIMIT_BUY = "LimitBuy"
LIMIT_SELL = "LimitSell"
STOP_LOSS = "StopLoss"
TRAIL_STOP_VALUE = "ValueTrailingStopLoss"
TRAIL_STOP_PERCENT = "PercentageTrailingStopLoss"
ORDER_ID = 0
TRADER_ID = 0
_orderIdLock = threading.Lock()
_traderIdLock = threading.Lock()
_logwriteLock = threading.Lock()
_stoppedTraders = []
class CoinOrder(object):
"""
Represents DIfferent Types of Orders. Used in the order list.
"""
def __init__(self, ordertype, qty, price = 0, changeval = 0, parentorder = None, nextorder = None):
"""
ordertype: constant of the order type to be executed
qty: qty of btc in order
price: price of order, different uses for different order Types
changeval: percent of value of change before stop loss activated
parentorder: Order that spawned this order
nextorder: Order to be executed once this order is executed.
"""
global ORDER_ID, _orderIdLock
with _orderIdLock:
self.orderid = ORDER_ID
ORDER_ID+=1
self.ordertype = ordertype
self.qty = qty
self.price = price
self.changeval = changeval
self.parentorder = parentorder
self.executed = False
self.nextOrder = nextorder
class Trader(object):
_orderbookLock = threading.Lock()
_executeLock = threading.Lock()
_stopTradeLock = threading.Lock()
stopTrade = False
def __init__(self,api_key = None, oauth2_credentials = None, orderbook = None, logname = "traderlog.txt"):
if (api_key is None) and (oauth2_credentials is None):
raise ValueError("api_key and oauth2_credentials cannot be None")
if (api_key is not None) and (oauth2_credentials is not None):
raise ValueError("User Provided both api_key and oauth2_credentials, select one") # I might want to instead just use one or the other
with _traderIdLock:
global TRADER_ID
self.traderid = TRADER_ID
TRADER_ID+=1
self.orderbook = [] if orderbook is None else orderbook
f = open(logname, 'w')
f.close()
self.logname = logname
self.logwrite(str(datetime.now()) + '\n')
self.account = CoinbaseAccount(oauth2_credentials,api_key)
def logwrite(self, message):
"""
Writes to the logfile with appended newline.
"""
with open(self.logname, 'a') as log:
with _logwriteLock:
log.write(message + '\n')
def logexecution(self, order, result):
"""
Write the result and order to the log file
"""
self.logorder(order)
logstr = "Order Type: " + str(result.type) + " Code: " + str(result.code) + " Executed at: " + str(result.created_at)
logstr = logstr + "\nBTC Amount: " + str(result.btc_amount) + " Total Price: " + str(result.total_amount) + " Fees: " + str(result.fees_bank+result.fees_coinbase)
logstr = "Result of Order\n" + logstr
self.logwrite(logstr)
def logorder(self, order, header = None):
"""
Writes the order the log file
"""
logstr = header + '\n' if isinstance(header,str) else ''
logstr = "OrderID: %s OrderType: %s Quantity: %s Price: %s" % (order.orderid, order.ordertype, order.qty, order.price)
logstr = logstr + " Change Value: " + str(order.changeval)
logstr = logstr + " Executed: " + str(order.executed) + '\n'
if order.parentorder is not None:
self.logorder(order.parentorder, "Parent Order:")
self.logwrite(logstr)
def ExecuteOrder(self, order):
"""
Executes an order based on its order.ordertype
Returns None if the trade is valid is not yet active(i.e limit not met)
Returns a CoinbaseError or CoinbaseTransfer object if order attempted to execute
Returns False if the order should be Deleted or removed from the orderbook.
"""
with self._executeLock:
traderesult = None
currentprice = -1
if order.ordertype in [MARKET_BUY, LIMIT_BUY]:
currentprice = self.account.buy_price(qty = order.qty)
print "Current Buy Price: " + str(currentprice/order.qty)
elif order.ordertype in [MARKET_SELL, LIMIT_SELL, STOP_LOSS, TRAIL_STOP_VALUE, TRAIL_STOP_PERCENT]:
currentprice = self.account.sell_price(qty = order.qty)
print "Current Sell Price: " + str(currentprice/order.qty)
if order.ordertype == MARKET_BUY:
traderesult = self.account.buy_btc(qty = order.qty)
elif order.ordertype == MARKET_SELL:
traderesult = self.account.sell_btc(qty = order.qty)
elif order.ordertype == LIMIT_BUY:
if currentprice <= order.price:
traderesult = self.account.buy_btc(qty = order.qty)
elif order.ordertype == LIMIT_SELL:
if currentprice >= order.price:
traderesult = self.account.sell_btc(qty = order.qty)
elif order.ordertype == STOP_LOSS:
if currentprice <= order.price:
traderesult = CoinOrder(ordertype = MARKET_SELL, qty = order.qty, parentorder = order)
elif order.ordertype == TRAIL_STOP_VALUE:
if currentprice > order.price:
order.price = currentprice
elif currentprice <= (order.price - order.changeval):
traderesult = CoinOrder(ordertype = MARKET_SELL, qty = order.qty, parentorder = order)
elif order.ordertype == TRAIL_STOP_PERCENT:
if currentprice > order.price:
order.price = currentprice
elif currentprice <= (order.price * (1.0 - order.changeval) ):
traderesult = CoinOrder(ordertype = MARKET_SELL, qty = order.qty, parentorder = order)
else:
traderesult = False # deletes the order from the order book
if isinstance(order.nextOrder, CoinOrder):
if isinstance(traderesult, CoinbaseTransfer):
traderesult = (traderesult, order.nextOrder)
if isinstance(traderesult, CoinbaseError):
print "Triggered Order Lost due to error"
return traderesult
def trade(self, runtime = None, sleeptime = 60, startNewThread = False):
"""
Call this function to execute trades in added to the order book. Returns True on success and Writes
the specified log file.
:param runtime: Number of seconds to trade should execute, infinity (None) is the default.
:param sleeptime: Interval of time between checking orders (coinbase updates their prices once per 60 seconds)
:param startNewThread: Optionally run trade in a new thread, orders can then be added while trade() runs (using the usual methods)
"""
if startNewThread == True:
newThread = threading.Thread(target=self.trade, args=[runtime, sleeptime, False])
newThread.daemon = True
newThread.start()
return True
with self._stopTradeLock:
self.stopTrade = False
initialBtcBal = self.account.balance
initialUsdVal = self.account.sell_price(initialBtcBal)
initialSellRate = initialUsdVal/initialBtcBal if initialBtcBal != 0 else 0
self.logwrite("Initial BTC Balance: " + str(initialBtcBal) + " Initial USD Value: " + str(initialUsdVal) + " Price Per Coin: " + str(initialSellRate))
while ( (runtime is None) or (runtime>0) ) and (len(self.orderbook) > 0):
with self._stopTradeLock:
if self.traderid in _stoppedTraders:
_stoppedTraders.remove(self.traderid)
return True
sleep = True
temporderbook = []
with self._orderbookLock:
constantorderbook = self.orderbook
for order in constantorderbook:
result = self.ExecuteOrder(order)
if isinstance(result, tuple):
# Append next order and process the trade result
temporderbook.append(result[1])
result = result[0]
if result is False:
# Invalid Order ID, discard order
pass
elif isinstance(result, CoinbaseError):
# There is an error, check if its due to improper supply.
self.logorder(order, result.error[0])
if result.error[0] == "You must acknowledge that the price can vary by checking the box below.":
temporderbook.append(order) # Means the order failed due to low supply and not agreeing to the price varying.
elif len(self.orderbook) == 1:
print result.error
sleep = False # This is done to exit quickly if the last trade errors
elif isinstance(result, CoinOrder):
order.executed = True
temporderbook.append(result)
sleep = False # If a coinorder is returned it should be executed asap.
sleeptime = 1 # If I can't be executed after the first time, keep trying every second otherwise
elif isinstance(result, CoinbaseTransfer):
# Trade executed
order.executed = True
self.logexecution(order, result)
elif result is None:
temporderbook.append(order)
with self._orderbookLock:
self.orderbook = temporderbook
if sleep is True:
if runtime is not None:
runtime = runtime - sleeptime
if runtime is None or runtime > 0:
time.sleep(sleeptime)
return True
def stoptrade(self):
"""
Call to stop trade() method from executing. Only needed for threading mode.
"""
with self._stopTradeLock:
_stoppedTraders.append(self.traderid)
def _addOrder(self, ordertype, qty, price = 0, changeval = None, queueExecution = True):
"""
Generic Order Adding Function. price is in price per share. User shouldn't call.
ordertype: type of order, constant
qty: qty in order
price: price to execute
changeval: change to observer for stoploss trades
queueExecution: True or False to add to orderbook list, True if you want to
execute with trade(). False if you just want the order object to be returned
"""
price = price * qty
order = CoinOrder(ordertype = ordertype, qty = qty, price = price, changeval = changeval)
if queueExecution is True:
with self._orderbookLock:
self.orderbook.append(order)
self.logorder(order, "Added Order:")
return order
def setMarketBuy(self, qty, queue = True):
"""
Buy qty bitcoins as soon as possible
"""
return self._addOrder(ordertype = MARKET_BUY, qty = qty, queueExecution = queue)
def setMarketSell(self, qty, queue = True):
"""
Sell qty bitcoins as soon as possible
"""
return self._addOrder(ordertype = MARKET_SELL, qty = qty, queueExecution = queue)
def setLimitBuy(self, qty, price, queue = True):
"""
Helps buy low, Buy at specified price or lower. Input price per share
"""
return self._addOrder(ordertype = LIMIT_BUY,qty = qty, price = price, queueExecution = queue)
def setLimitSell(self, qty, price, queue = True):
"""
Helps sell high, Sell at specified price or higher. Input execution price per share
"""
return self._addOrder(ordertype = LIMIT_SELL, qty = qty, price = price, queueExecution = queue)
def setStopLoss(self, qty, price, queue = True):
"""
If the price goes below a specified price, sell it all ASAP via market sell order. Input execution price per share
"""
return self._addOrder(ordertype = STOP_LOSS, qty = qty, price = price, queueExecution = queue)
def setTrailStopLossValue(self, qty, changeval, queue = True):
"""
Sell qty bitcoins when they drop "value" below their maximum per share value since purchase. Basically a Moving Limit sell at: maxPriceSeen - value
"""
return self._addOrder(ordertype = TRAIL_STOP_VALUE, qty = qty, price = 0, changeval = changeval*qty, queueExecution = queue)
def setTrailStopLossPercent(self, qty, changeval, maxprice = 0, queue = True):
"""
Sell qty bitcoins when they have a changepercent drop below their maximum value since purchase. Basically a Moving Limit sell at: maxPriceSeen * (1 - (changepercent/100) )
"""
return self._addOrder(ordertype = TRAIL_STOP_PERCENT, qty = qty, price = maxprice, changeval = changeval/100.0, queueExecution = queue)
def oneStartsAnother(self, initialOrder, triggerOrder, queue = True):
"""
Input two orders then once ExecuteOrder() executes the order (in trade) it Returns
that value to the trade() method which will then begin executing the next order specified.
This can be used to execute a buy which upon execution will trigger a sell order at a higher price.
These orders could be combined multiple times by having any order trigger another oneStartsAnother order.
These order should NOT be added to the queue (i.e. set queue = False upon order creation).
"""
initialOrder.nextOrder = triggerOrder
with self._orderbookLock:
if queue is True:
self.orderbook.append(initialOrder)
self.logorder(initialOrder, "Added Order:")
return initialOrder
def RemoveOrder(self, orderid):
"""
Accepts either the orderid (starts at 0 and increments to the number of total orders)
Or the CoinOrder object returned when the order was created.
"""
removedOrder = None
if isinstance(orderid, int):
temp = []
with self._orderbookLock:
constorderbook = self.orderbook
for order in self.orderbook:
if order.id != orderid:
temp.append(order)
else:
removedOrder = order
with self._orderbookLock:
self.orderbook = temp
if isinstance(orderid, CoinOrder):
try:
with self._orderbookLock:
self.orderbook.remove(order)
removedOrder = order
except:
pass
return removedOrder | mit |
40223201/w16b_test | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/compat.py | 603 | 3054 | """Python 2.x/3.x compatibility tools"""
import sys
__all__ = ['geterror', 'long_', 'xrange_', 'ord_', 'unichr_',
'unicode_', 'raw_input_', 'as_bytes', 'as_unicode']
def geterror ():
return sys.exc_info()[1]
try:
long_ = long
except NameError:
long_ = int
try:
xrange_ = xrange
except NameError:
xrange_ = range
def get_BytesIO():
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
return BytesIO
def get_StringIO():
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
return StringIO
def ord_(o):
try:
return ord(o)
except TypeError:
return o
try:
unichr_ = unichr
except NameError:
unichr_ = chr
try:
unicode_ = unicode
except NameError:
unicode_ = str
try:
bytes_ = bytes
except NameError:
bytes_ = str
try:
raw_input_ = raw_input
except NameError:
raw_input_ = input
if sys.platform == 'win32':
filesystem_errors = "replace"
elif sys.version_info >= (3, 0, 0):
filesystem_errors = "surrogateescape"
else:
filesystem_errors = "strict"
def filesystem_encode(u):
fsencoding = sys.getfilesystemencoding()
if (fsencoding.lower() == 'ascii') and sys.platform.startswith('linux'):
# Don't believe Linux systems claiming ASCII-only filesystems. In
# practice, arbitrary bytes are allowed, and most things expect UTF-8.
fsencoding = 'utf-8'
return u.encode(fsencoding, filesystem_errors)
# Represent escaped bytes and strings in a portable way.
#
# as_bytes: Allow a Python 3.x string to represent a bytes object.
# e.g.: as_bytes("a\x01\b") == b"a\x01b" # Python 3.x
# as_bytes("a\x01\b") == "a\x01b" # Python 2.x
# as_unicode: Allow a Python "r" string to represent a unicode string.
# e.g.: as_unicode(r"Bo\u00F6tes") == u"Bo\u00F6tes" # Python 2.x
# as_unicode(r"Bo\u00F6tes") == "Bo\u00F6tes" # Python 3.x
try:
unicode
def as_bytes(string):
""" '<binary literal>' => '<binary literal>' """
return string
def as_unicode(rstring):
""" r'<Unicode literal>' => u'<Unicode literal>' """
return rstring.decode('unicode_escape', 'strict')
except NameError:
def as_bytes(string):
""" '<binary literal>' => b'<binary literal>' """
return string.encode('latin-1', 'strict')
def as_unicode(rstring):
""" r'<Unicode literal>' => '<Unicode literal>' """
return rstring.encode('ascii', 'strict').decode('unicode_escape',
'stict')
# Include a next compatible function for Python versions < 2.6
try:
next_ = next
except NameError:
def next_(i, *args):
try:
return i.next()
except StopIteration:
if args:
return args[0]
raise
# itertools.imap is missing in 3.x
try:
import itertools.imap as imap_
except ImportError:
imap_ = map
| agpl-3.0 |
timopulkkinen/BubbleFish | third_party/libxml/src/check-relaxng-test-suite2.py | 343 | 10578 | #!/usr/bin/python
import sys
import time
import os
import string
import StringIO
sys.path.insert(0, "python")
import libxml2
# Memory debug specific
libxml2.debugMemory(1)
debug = 0
quiet = 1
#
# the testsuite description
#
CONF=os.path.join(os.path.dirname(__file__), "test/relaxng/testsuite.xml")
LOG="check-relaxng-test-suite2.log"
log = open(LOG, "w")
nb_schemas_tests = 0
nb_schemas_success = 0
nb_schemas_failed = 0
nb_instances_tests = 0
nb_instances_success = 0
nb_instances_failed = 0
libxml2.lineNumbersDefault(1)
#
# Resolver callback
#
resources = {}
def resolver(URL, ID, ctxt):
global resources
if resources.has_key(URL):
return(StringIO.StringIO(resources[URL]))
log.write("Resolver failure: asked %s\n" % (URL))
log.write("resources: %s\n" % (resources))
return None
#
# Load the previous results
#
#results = {}
#previous = {}
#
#try:
# res = libxml2.parseFile(RES)
#except:
# log.write("Could not parse %s" % (RES))
#
# handle a valid instance
#
def handle_valid(node, schema):
global log
global nb_instances_success
global nb_instances_failed
instance = node.prop("dtd")
if instance == None:
instance = ""
child = node.children
while child != None:
if child.type != 'text':
instance = instance + child.serialize()
child = child.next
# mem = libxml2.debugMemory(1);
try:
doc = libxml2.parseDoc(instance)
except:
doc = None
if doc == None:
log.write("\nFailed to parse correct instance:\n-----\n")
log.write(instance)
log.write("\n-----\n")
nb_instances_failed = nb_instances_failed + 1
return
if debug:
print "instance line %d" % (node.lineNo())
try:
ctxt = schema.relaxNGNewValidCtxt()
ret = doc.relaxNGValidateDoc(ctxt)
del ctxt
except:
ret = -1
doc.freeDoc()
# if mem != libxml2.debugMemory(1):
# print "validating instance %d line %d leaks" % (
# nb_instances_tests, node.lineNo())
if ret != 0:
log.write("\nFailed to validate correct instance:\n-----\n")
log.write(instance)
log.write("\n-----\n")
nb_instances_failed = nb_instances_failed + 1
else:
nb_instances_success = nb_instances_success + 1
#
# handle an invalid instance
#
def handle_invalid(node, schema):
global log
global nb_instances_success
global nb_instances_failed
instance = node.prop("dtd")
if instance == None:
instance = ""
child = node.children
while child != None:
if child.type != 'text':
instance = instance + child.serialize()
child = child.next
# mem = libxml2.debugMemory(1);
try:
doc = libxml2.parseDoc(instance)
except:
doc = None
if doc == None:
log.write("\nStrange: failed to parse incorrect instance:\n-----\n")
log.write(instance)
log.write("\n-----\n")
return
if debug:
print "instance line %d" % (node.lineNo())
try:
ctxt = schema.relaxNGNewValidCtxt()
ret = doc.relaxNGValidateDoc(ctxt)
del ctxt
except:
ret = -1
doc.freeDoc()
# mem2 = libxml2.debugMemory(1)
# if mem != mem2:
# print "validating instance %d line %d leaks %d bytes" % (
# nb_instances_tests, node.lineNo(), mem2 - mem)
if ret == 0:
log.write("\nFailed to detect validation problem in instance:\n-----\n")
log.write(instance)
log.write("\n-----\n")
nb_instances_failed = nb_instances_failed + 1
else:
nb_instances_success = nb_instances_success + 1
#
# handle an incorrect test
#
def handle_correct(node):
global log
global nb_schemas_success
global nb_schemas_failed
schema = ""
child = node.children
while child != None:
if child.type != 'text':
schema = schema + child.serialize()
child = child.next
try:
rngp = libxml2.relaxNGNewMemParserCtxt(schema, len(schema))
rngs = rngp.relaxNGParse()
except:
rngs = None
if rngs == None:
log.write("\nFailed to compile correct schema:\n-----\n")
log.write(schema)
log.write("\n-----\n")
nb_schemas_failed = nb_schemas_failed + 1
else:
nb_schemas_success = nb_schemas_success + 1
return rngs
def handle_incorrect(node):
global log
global nb_schemas_success
global nb_schemas_failed
schema = ""
child = node.children
while child != None:
if child.type != 'text':
schema = schema + child.serialize()
child = child.next
try:
rngp = libxml2.relaxNGNewMemParserCtxt(schema, len(schema))
rngs = rngp.relaxNGParse()
except:
rngs = None
if rngs != None:
log.write("\nFailed to detect schema error in:\n-----\n")
log.write(schema)
log.write("\n-----\n")
nb_schemas_failed = nb_schemas_failed + 1
else:
# log.write("\nSuccess detecting schema error in:\n-----\n")
# log.write(schema)
# log.write("\n-----\n")
nb_schemas_success = nb_schemas_success + 1
return None
#
# resource handling: keep a dictionary of URL->string mappings
#
def handle_resource(node, dir):
global resources
try:
name = node.prop('name')
except:
name = None
if name == None or name == '':
log.write("resource has no name")
return;
if dir != None:
# name = libxml2.buildURI(name, dir)
name = dir + '/' + name
res = ""
child = node.children
while child != None:
if child.type != 'text':
res = res + child.serialize()
child = child.next
resources[name] = res
#
# dir handling: pseudo directory resources
#
def handle_dir(node, dir):
try:
name = node.prop('name')
except:
name = None
if name == None or name == '':
log.write("resource has no name")
return;
if dir != None:
# name = libxml2.buildURI(name, dir)
name = dir + '/' + name
dirs = node.xpathEval('dir')
for dir in dirs:
handle_dir(dir, name)
res = node.xpathEval('resource')
for r in res:
handle_resource(r, name)
#
# handle a testCase element
#
def handle_testCase(node):
global nb_schemas_tests
global nb_instances_tests
global resources
sections = node.xpathEval('string(section)')
log.write("\n ======== test %d line %d section %s ==========\n" % (
nb_schemas_tests, node.lineNo(), sections))
resources = {}
if debug:
print "test %d line %d" % (nb_schemas_tests, node.lineNo())
dirs = node.xpathEval('dir')
for dir in dirs:
handle_dir(dir, None)
res = node.xpathEval('resource')
for r in res:
handle_resource(r, None)
tsts = node.xpathEval('incorrect')
if tsts != []:
if len(tsts) != 1:
print "warning test line %d has more than one <incorrect> example" %(node.lineNo())
schema = handle_incorrect(tsts[0])
else:
tsts = node.xpathEval('correct')
if tsts != []:
if len(tsts) != 1:
print "warning test line %d has more than one <correct> example"% (node.lineNo())
schema = handle_correct(tsts[0])
else:
print "warning <testCase> line %d has no <correct> nor <incorrect> child" % (node.lineNo())
nb_schemas_tests = nb_schemas_tests + 1;
valids = node.xpathEval('valid')
invalids = node.xpathEval('invalid')
nb_instances_tests = nb_instances_tests + len(valids) + len(invalids)
if schema != None:
for valid in valids:
handle_valid(valid, schema)
for invalid in invalids:
handle_invalid(invalid, schema)
#
# handle a testSuite element
#
def handle_testSuite(node, level = 0):
global nb_schemas_tests, nb_schemas_success, nb_schemas_failed
global nb_instances_tests, nb_instances_success, nb_instances_failed
if level >= 1:
old_schemas_tests = nb_schemas_tests
old_schemas_success = nb_schemas_success
old_schemas_failed = nb_schemas_failed
old_instances_tests = nb_instances_tests
old_instances_success = nb_instances_success
old_instances_failed = nb_instances_failed
docs = node.xpathEval('documentation')
authors = node.xpathEval('author')
if docs != []:
msg = ""
for doc in docs:
msg = msg + doc.content + " "
if authors != []:
msg = msg + "written by "
for author in authors:
msg = msg + author.content + " "
if quiet == 0:
print msg
sections = node.xpathEval('section')
if sections != [] and level <= 0:
msg = ""
for section in sections:
msg = msg + section.content + " "
if quiet == 0:
print "Tests for section %s" % (msg)
for test in node.xpathEval('testCase'):
handle_testCase(test)
for test in node.xpathEval('testSuite'):
handle_testSuite(test, level + 1)
if level >= 1 and sections != []:
msg = ""
for section in sections:
msg = msg + section.content + " "
print "Result of tests for section %s" % (msg)
if nb_schemas_tests != old_schemas_tests:
print "found %d test schemas: %d success %d failures" % (
nb_schemas_tests - old_schemas_tests,
nb_schemas_success - old_schemas_success,
nb_schemas_failed - old_schemas_failed)
if nb_instances_tests != old_instances_tests:
print "found %d test instances: %d success %d failures" % (
nb_instances_tests - old_instances_tests,
nb_instances_success - old_instances_success,
nb_instances_failed - old_instances_failed)
#
# Parse the conf file
#
libxml2.substituteEntitiesDefault(1);
testsuite = libxml2.parseFile(CONF)
#
# Error and warnng callbacks
#
def callback(ctx, str):
global log
log.write("%s%s" % (ctx, str))
libxml2.registerErrorHandler(callback, "")
libxml2.setEntityLoader(resolver)
root = testsuite.getRootElement()
if root.name != 'testSuite':
print "%s doesn't start with a testSuite element, aborting" % (CONF)
sys.exit(1)
if quiet == 0:
print "Running Relax NG testsuite"
handle_testSuite(root)
if quiet == 0:
print "\nTOTAL:\n"
if quiet == 0 or nb_schemas_failed != 0:
print "found %d test schemas: %d success %d failures" % (
nb_schemas_tests, nb_schemas_success, nb_schemas_failed)
if quiet == 0 or nb_instances_failed != 0:
print "found %d test instances: %d success %d failures" % (
nb_instances_tests, nb_instances_success, nb_instances_failed)
testsuite.freeDoc()
# Memory debug specific
libxml2.relaxNGCleanupTypes()
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
if quiet == 0:
print "OK"
else:
print "Memory leak %d bytes" % (libxml2.debugMemory(1))
libxml2.dumpMemory()
| bsd-3-clause |
duramato/SickRage | lib/hachoir_parser/misc/pcf.py | 95 | 5994 | """
X11 Portable Compiled Font (pcf) parser.
Documents:
- Format for X11 pcf bitmap font files
http://fontforge.sourceforge.net/pcf-format.html
(file is based on the X11 sources)
Author: Victor Stinner
Creation date: 2007-03-20
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet, Enum,
UInt8, UInt32, Bytes, RawBytes, NullBytes,
Bit, Bits, PaddingBits, CString)
from hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN
from hachoir_core.text_handler import textHandler, hexadecimal, filesizeHandler
from hachoir_core.tools import paddingSize
class TOC(FieldSet):
TYPE_NAME = {
0x00000001: "Properties",
0x00000002: "Accelerators",
0x00000004: "Metrics",
0x00000008: "Bitmaps",
0x00000010: "Ink metrics",
0x00000020: "BDF encodings",
0x00000040: "SWidths",
0x00000080: "Glyph names",
0x00000100: "BDF accelerators",
}
FORMAT_NAME = {
0x00000000: "Default",
0x00000200: "Ink bounds",
0x00000100: "Accelerator W ink bounds",
# 0x00000200: "Compressed metrics",
}
def createFields(self):
yield Enum(UInt32(self, "type"), self.TYPE_NAME)
yield UInt32(self, "format")
yield filesizeHandler(UInt32(self, "size"))
yield UInt32(self, "offset")
def createDescription(self):
return "%s at %s (%s)" % (
self["type"].display, self["offset"].value, self["size"].display)
class PropertiesFormat(FieldSet):
static_size = 32
endian = LITTLE_ENDIAN
def createFields(self):
yield Bits(self, "reserved[]", 2)
yield Bit(self, "byte_big_endian")
yield Bit(self, "bit_big_endian")
yield Bits(self, "scan_unit", 2)
yield textHandler(PaddingBits(self, "reserved[]", 26), hexadecimal)
class Property(FieldSet):
def createFields(self):
yield UInt32(self, "name_offset")
yield UInt8(self, "is_string")
yield UInt32(self, "value_offset")
def createDescription(self):
# FIXME: Use link or any better way to read name value
name = self["../name[%s]" % (self.index-2)].value
return "Property %s" % name
class GlyphNames(FieldSet):
def __init__(self, parent, name, toc, description, size=None):
FieldSet.__init__(self, parent, name, description, size=size)
self.toc = toc
if self["format/byte_big_endian"].value:
self.endian = BIG_ENDIAN
else:
self.endian = LITTLE_ENDIAN
def createFields(self):
yield PropertiesFormat(self, "format")
yield UInt32(self, "count")
offsets = []
for index in xrange(self["count"].value):
offset = UInt32(self, "offset[]")
yield offset
offsets.append(offset.value)
yield UInt32(self, "total_str_length")
offsets.sort()
offset0 = self.current_size // 8
for offset in offsets:
padding = self.seekByte(offset0+offset)
if padding:
yield padding
yield CString(self, "name[]")
padding = (self.size - self.current_size) // 8
if padding:
yield NullBytes(self, "end_padding", padding)
class Properties(GlyphNames):
def createFields(self):
yield PropertiesFormat(self, "format")
yield UInt32(self, "nb_prop")
properties = []
for index in xrange(self["nb_prop"].value):
property = Property(self, "property[]")
yield property
properties.append(property)
padding = paddingSize(self.current_size//8, 4)
if padding:
yield NullBytes(self, "padding", padding)
yield UInt32(self, "total_str_length")
properties.sort(key=lambda entry: entry["name_offset"].value)
offset0 = self.current_size // 8
for property in properties:
padding = self.seekByte(offset0+property["name_offset"].value)
if padding:
yield padding
yield CString(self, "name[]", "Name of %s" % property.name)
if property["is_string"].value:
yield CString(self, "value[]", "Value of %s" % property.name)
padding = (self.size - self.current_size) // 8
if padding:
yield NullBytes(self, "end_padding", padding)
class PcfFile(Parser):
MAGIC = "\1fcp"
PARSER_TAGS = {
"id": "pcf",
"category": "misc",
"file_ext": ("pcf",),
"magic": ((MAGIC, 0),),
"min_size": 32, # FIXME
"description": "X11 Portable Compiled Font (pcf)",
}
endian = LITTLE_ENDIAN
def validate(self):
if self["signature"].value != self.MAGIC:
return "Invalid signature"
return True
def createFields(self):
yield Bytes(self, "signature", 4, r'File signature ("\1pcf")')
yield UInt32(self, "nb_toc")
entries = []
for index in xrange(self["nb_toc"].value):
entry = TOC(self, "toc[]")
yield entry
entries.append(entry)
entries.sort(key=lambda entry: entry["offset"].value)
for entry in entries:
size = entry["size"].value
padding = self.seekByte(entry["offset"].value)
if padding:
yield padding
maxsize = (self.size-self.current_size)//8
if maxsize < size:
self.warning("Truncate content of %s to %s bytes (was %s)" % (entry.path, maxsize, size))
size = maxsize
if not size:
continue
if entry["type"].value == 1:
yield Properties(self, "properties", entry, "Properties", size=size*8)
elif entry["type"].value == 128:
yield GlyphNames(self, "glyph_names", entry, "Glyph names", size=size*8)
else:
yield RawBytes(self, "data[]", size, "Content of %s" % entry.path)
| gpl-3.0 |
pgielda/linux-zynq | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
adoosii/edx-platform | common/test/acceptance/tests/test_ora.py | 118 | 15617 | """
Tests for ORA (Open Response Assessment) through the LMS UI.
"""
import json
from unittest import skip
from bok_choy.promise import Promise, BrokenPromise
from ..pages.lms.peer_confirm import PeerConfirmPage
from ..pages.lms.auto_auth import AutoAuthPage
from ..pages.lms.course_info import CourseInfoPage
from ..pages.lms.tab_nav import TabNavPage
from ..pages.lms.course_nav import CourseNavPage
from ..pages.lms.open_response import OpenResponsePage
from ..pages.lms.peer_grade import PeerGradePage
from ..pages.lms.peer_calibrate import PeerCalibratePage
from ..pages.lms.progress import ProgressPage
from ..fixtures.course import XBlockFixtureDesc, CourseFixture
from ..fixtures.xqueue import XQueueResponseFixture
from .helpers import load_data_str, UniqueCourseTest
class OpenResponseTest(UniqueCourseTest):
"""
Tests that interact with ORA (Open Response Assessment) through the LMS UI.
This base class sets up a course with open response problems and defines
some helper functions used in the ORA tests.
"""
# Grade response (dict) to return from the XQueue stub
# in response to our unique submission text.
XQUEUE_GRADE_RESPONSE = None
def setUp(self):
"""
Install a test course with ORA problems.
Always start in the subsection with open response problems.
"""
super(OpenResponseTest, self).setUp()
# Create page objects
self.auth_page = AutoAuthPage(self.browser, course_id=self.course_id)
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.tab_nav = TabNavPage(self.browser)
self.course_nav = CourseNavPage(self.browser)
self.open_response = OpenResponsePage(self.browser)
self.peer_grade = PeerGradePage(self.browser)
self.peer_calibrate = PeerCalibratePage(self.browser)
self.peer_confirm = PeerConfirmPage(self.browser)
self.progress_page = ProgressPage(self.browser, self.course_id)
# Configure the test course
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
# Create a unique name for the peer assessed problem. This will show up
# in the list of peer problems, which is shared among tests running
# in parallel; it needs to be unique so we can find it.
# It's also import that the problem has "Peer" in the name; otherwise,
# the ORA stub will ignore it.
self.peer_problem_name = "Peer-Assessed {}".format(self.unique_id[0:6])
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc(
'combinedopenended',
'Self-Assessed',
data=load_data_str('ora_self_problem.xml'),
metadata={
'graded': True,
},
),
XBlockFixtureDesc(
'combinedopenended',
'AI-Assessed',
data=load_data_str('ora_ai_problem.xml'),
metadata={
'graded': True,
},
),
XBlockFixtureDesc(
'combinedopenended',
self.peer_problem_name,
data=load_data_str('ora_peer_problem.xml'),
metadata={
'graded': True,
},
),
# This is the interface a student can use to grade his/her peers
XBlockFixtureDesc('peergrading', 'Peer Module'),
)
)
).install()
# Configure the XQueue stub's response for the text we will submit
# The submission text is unique so we can associate each response with a particular test case.
self.submission = "Test submission " + self.unique_id[0:4]
if self.XQUEUE_GRADE_RESPONSE is not None:
XQueueResponseFixture(self.submission, self.XQUEUE_GRADE_RESPONSE).install()
# Log in and navigate to the essay problems
self.auth_page.visit()
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
def submit_essay(self, expected_assessment_type, expected_prompt):
"""
Submit an essay and verify that the problem uses
the `expected_assessment_type` ("self", "ai", or "peer") and
shows the `expected_prompt` (a string).
"""
# Check the assessment type and prompt
self.assertEqual(self.open_response.assessment_type, expected_assessment_type)
self.assertIn(expected_prompt, self.open_response.prompt)
# Enter a submission, which will trigger a pre-defined response from the XQueue stub.
self.open_response.set_response(self.submission)
# Save the response and expect some UI feedback
self.open_response.save_response()
self.assertEqual(
self.open_response.alert_message,
"Answer saved, but not yet submitted."
)
# Submit the response
self.open_response.submit_response()
def get_asynch_feedback(self, assessment_type):
"""
Wait for and retrieve asynchronous feedback
(e.g. from AI, instructor, or peer grading)
`assessment_type` is either "ai" or "peer".
"""
# Because the check function involves fairly complicated actions
# (navigating through several screens), we give it more time to complete
# than the default.
return Promise(
self._check_feedback_func(assessment_type),
'Got feedback for {0} problem'.format(assessment_type),
timeout=600, try_interval=5
).fulfill()
def _check_feedback_func(self, assessment_type):
"""
Navigate away from, then return to, the peer problem to
receive updated feedback.
The returned function will return a tuple `(is_success, rubric_feedback)`,
`is_success` is True iff we have received feedback for the problem;
`rubric_feedback` is a list of "correct" or "incorrect" strings.
"""
if assessment_type == 'ai':
section_name = 'AI-Assessed'
elif assessment_type == 'peer':
section_name = self.peer_problem_name
else:
raise ValueError('Assessment type not recognized. Must be either "ai" or "peer"')
def _inner_check():
self.course_nav.go_to_sequential('Self-Assessed')
self.course_nav.go_to_sequential(section_name)
try:
feedback = self.open_response.rubric.feedback
# Unsuccessful if the rubric hasn't loaded
except BrokenPromise:
return False, None
# Successful if `feedback` is a non-empty list
else:
return bool(feedback), feedback
return _inner_check
class SelfAssessmentTest(OpenResponseTest):
"""
Test ORA self-assessment.
"""
def test_self_assessment(self):
"""
Given I am viewing a self-assessment problem
When I submit an essay and complete a self-assessment rubric
Then I see a scored rubric
And I see my score in the progress page.
"""
# Navigate to the self-assessment problem and submit an essay
self.course_nav.go_to_sequential('Self-Assessed')
self.submit_essay('self', 'Censorship in the Libraries')
# Fill in the rubric and expect that we get feedback
rubric = self.open_response.rubric
self.assertEqual(rubric.categories, ["Writing Applications", "Language Conventions"])
rubric.set_scores([0, 1])
rubric.submit('self')
self.assertEqual(rubric.feedback, ['incorrect', 'correct'])
# Verify the progress page
self.progress_page.visit()
scores = self.progress_page.scores('Test Section', 'Test Subsection')
# The first score is self-assessment, which we've answered, so it's 1/2
# The other scores are AI- and peer-assessment, which we haven't answered so those are 0/2
self.assertEqual(scores, [(1, 2), (0, 2), (0, 2)])
class AIAssessmentTest(OpenResponseTest):
"""
Test ORA AI-assessment.
"""
XQUEUE_GRADE_RESPONSE = {
'score': 1,
'feedback': json.dumps({"spelling": "Ok.", "grammar": "Ok.", "markup_text": "NA"}),
'grader_type': 'BC',
'success': True,
'grader_id': 1,
'submission_id': 1,
'rubric_scores_complete': True,
'rubric_xml': load_data_str('ora_rubric.xml')
}
@skip('Intermittently failing, see ORA-342')
def test_ai_assessment(self):
"""
Given I am viewing an AI-assessment problem that has a trained ML model
When I submit an essay and wait for a response
Then I see a scored rubric
And I see my score in the progress page.
"""
# Navigate to the AI-assessment problem and submit an essay
self.course_nav.go_to_sequential('AI-Assessed')
self.submit_essay('ai', 'Censorship in the Libraries')
# Refresh the page to get the updated feedback
# then verify that we get the feedback sent by our stub XQueue implementation
self.assertEqual(self.get_asynch_feedback('ai'), ['incorrect', 'correct'])
# Verify the progress page
self.progress_page.visit()
scores = self.progress_page.scores('Test Section', 'Test Subsection')
# First score is the self-assessment score, which we haven't answered, so it's 0/2
# Second score is the AI-assessment score, which we have answered, so it's 1/2
# Third score is peer-assessment, which we haven't answered, so it's 0/2
self.assertEqual(scores, [(0, 2), (1, 2), (0, 2)])
class InstructorAssessmentTest(OpenResponseTest):
"""
Test an AI-assessment that has been graded by an instructor.
This runs the same test as the AI-assessment test, except
that the feedback comes from an instructor instead of the machine grader.
From the student's perspective, it should look the same.
"""
XQUEUE_GRADE_RESPONSE = {
'score': 1,
'feedback': json.dumps({"feedback": "Good job!"}),
'grader_type': 'IN',
'success': True,
'grader_id': 1,
'submission_id': 1,
'rubric_scores_complete': True,
'rubric_xml': load_data_str('ora_rubric.xml')
}
@skip('Intermittently failing, see ORA-342')
def test_instructor_assessment(self):
"""
Given an instructor has graded my submission
When I view my submission
Then I see a scored rubric
And my progress page shows the problem score.
"""
# Navigate to the AI-assessment problem and submit an essay
# We have configured the stub to simulate that this essay will be staff-graded
self.course_nav.go_to_sequential('AI-Assessed')
self.submit_essay('ai', 'Censorship in the Libraries')
# Refresh the page to get the updated feedback
# then verify that we get the feedback sent by our stub XQueue implementation
self.assertEqual(self.get_asynch_feedback('ai'), ['incorrect', 'correct'])
# Verify the progress page
self.progress_page.visit()
scores = self.progress_page.scores('Test Section', 'Test Subsection')
# First score is the self-assessment score, which we haven't answered, so it's 0/2
# Second score is the AI-assessment score, which we have answered, so it's 1/2
# Third score is peer-assessment, which we haven't answered, so it's 0/2
self.assertEqual(scores, [(0, 2), (1, 2), (0, 2)])
class PeerAssessmentTest(OpenResponseTest):
"""
Test ORA peer-assessment, including calibration and giving/receiving scores.
"""
# Unlike other assessment types, peer assessment has multiple scores
XQUEUE_GRADE_RESPONSE = {
'score': [2, 2, 2],
'feedback': [json.dumps({"feedback": ""})] * 3,
'grader_type': 'PE',
'success': True,
'grader_id': [1, 2, 3],
'submission_id': 1,
'rubric_scores_complete': [True, True, True],
'rubric_xml': [load_data_str('ora_rubric.xml')] * 3
}
def test_peer_calibrate_and_grade(self):
"""
Given I am viewing a peer-assessment problem
And the instructor has submitted enough example essays
When I submit acceptable scores for enough calibration essays
Then I am able to peer-grade other students' essays.
Given I have submitted an essay for peer-assessment
And I have peer-graded enough students essays
And enough other students have scored my essay
Then I can view the scores and written feedback
And I see my score in the progress page.
"""
# Initially, the student should NOT be able to grade peers,
# because he/she hasn't submitted any essays.
self.course_nav.go_to_sequential('Peer Module')
self.assertIn("You currently do not have any peer grading to do", self.peer_calibrate.message)
# Submit an essay
self.course_nav.go_to_sequential(self.peer_problem_name)
self.submit_essay('peer', 'Censorship in the Libraries')
# Need to reload the page to update the peer grading module
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
self.course_nav.go_to_section('Test Section', 'Test Subsection')
# Select the problem to calibrate
self.course_nav.go_to_sequential('Peer Module')
self.assertIn(self.peer_problem_name, self.peer_grade.problem_list)
self.peer_grade.select_problem(self.peer_problem_name)
# Calibrate
self.peer_confirm.start(is_calibrating=True)
rubric = self.peer_calibrate.rubric
self.assertEqual(rubric.categories, ["Writing Applications", "Language Conventions"])
rubric.set_scores([0, 1])
rubric.submit('peer')
self.peer_calibrate.continue_to_grading()
# Grade a peer
self.peer_confirm.start()
rubric = self.peer_grade.rubric
self.assertEqual(rubric.categories, ["Writing Applications", "Language Conventions"])
rubric.set_scores([0, 1])
rubric.submit()
# Expect to receive essay feedback
# We receive feedback from all three peers, each of which
# provide 2 scores (one for each rubric item)
# Written feedback is a dummy value sent by the XQueue stub.
self.course_nav.go_to_sequential(self.peer_problem_name)
self.assertEqual(self.get_asynch_feedback('peer'), ['incorrect', 'correct'] * 3)
# Verify the progress page
self.progress_page.visit()
scores = self.progress_page.scores('Test Section', 'Test Subsection')
# First score is the self-assessment score, which we haven't answered, so it's 0/2
# Second score is the AI-assessment score, which we haven't answered, so it's 0/2
# Third score is peer-assessment, which we have answered, so it's 2/2
self.assertEqual(scores, [(0, 2), (0, 2), (2, 2)])
| agpl-3.0 |
StephenKing/ryu | ryu/controller/tunnels.py | 50 | 6952 | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2012 Isaku Yamahata <yamahata at private email ne jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
import ryu.exception as ryu_exc
from ryu.base import app_manager
from ryu.controller import event
class RemoteDPIDAlreadyExist(ryu_exc.RyuException):
message = ('port (%(dpid)s, %(port)s) has already '
'remote dpid %(remote_dpid)s')
class TunnelKeyAlreadyExist(ryu_exc.RyuException):
message = 'tunnel key %(tunnel_key)s already exists'
class TunnelKeyNotFound(ryu_exc.RyuException):
message = 'no tunnel key for network %(network_id)s'
class EventTunnelKeyBase(event.EventBase):
def __init__(self, network_id, tunnel_key):
super(EventTunnelKeyBase, self).__init__()
self.network_id = network_id
self.tunnel_key = tunnel_key
class EventTunnelKeyAdd(EventTunnelKeyBase):
def __init__(self, network_id, tunnel_key):
super(EventTunnelKeyAdd, self).__init__(network_id, tunnel_key)
class EventTunnelKeyDel(EventTunnelKeyBase):
def __init__(self, network_id, tunnel_key):
super(EventTunnelKeyDel, self).__init__(network_id, tunnel_key)
class EventTunnelPort(event.EventBase):
def __init__(self, dpid, port_no, remote_dpid, add_del):
super(EventTunnelPort, self).__init__()
self.dpid = dpid
self.port_no = port_no
self.remote_dpid = remote_dpid
self.add_del = add_del
class TunnelKeys(dict):
"""network id(uuid) <-> tunnel key(32bit unsigned int)"""
def __init__(self, f):
super(TunnelKeys, self).__init__()
self.send_event = f
def get_key(self, network_id):
try:
return self[network_id]
except KeyError:
raise TunnelKeyNotFound(network_id=network_id)
def _set_key(self, network_id, tunnel_key):
self[network_id] = tunnel_key
self.send_event(EventTunnelKeyAdd(network_id, tunnel_key))
def register_key(self, network_id, tunnel_key):
if network_id in self:
raise ryu_exc.NetworkAlreadyExist(network_id=network_id)
if tunnel_key in self.values():
raise TunnelKeyAlreadyExist(tunnel_key=tunnel_key)
self._set_key(network_id, tunnel_key)
def update_key(self, network_id, tunnel_key):
if network_id not in self and tunnel_key in self.values():
raise TunnelKeyAlreadyExist(key=tunnel_key)
key = self.get(network_id)
if key is None:
self._set_key(network_id, tunnel_key)
return
if key != tunnel_key:
raise ryu_exc.NetworkAlreadyExist(network_id=network_id)
def delete_key(self, network_id):
try:
tunnel_key = self[network_id]
self.send_event(EventTunnelKeyDel(network_id, tunnel_key))
del self[network_id]
except KeyError:
raise ryu_exc.NetworkNotFound(network_id=network_id)
class DPIDs(object):
"""dpid -> port_no -> remote_dpid"""
def __init__(self, f):
super(DPIDs, self).__init__()
self.dpids = collections.defaultdict(dict)
self.send_event = f
def list_ports(self, dpid):
return self.dpids[dpid]
def _add_remote_dpid(self, dpid, port_no, remote_dpid):
self.dpids[dpid][port_no] = remote_dpid
self.send_event(EventTunnelPort(dpid, port_no, remote_dpid, True))
def add_remote_dpid(self, dpid, port_no, remote_dpid):
if port_no in self.dpids[dpid]:
raise ryu_exc.PortAlreadyExist(dpid=dpid, port=port_no,
network_id=None)
self._add_remote_dpid(dpid, port_no, remote_dpid)
def update_remote_dpid(self, dpid, port_no, remote_dpid):
remote_dpid_ = self.dpids[dpid].get(port_no)
if remote_dpid_ is None:
self._add_remote_dpid(dpid, port_no, remote_dpid)
elif remote_dpid_ != remote_dpid:
raise ryu_exc.RemoteDPIDAlreadyExist(dpid=dpid, port=port_no,
remote_dpid=remote_dpid)
def get_remote_dpid(self, dpid, port_no):
try:
return self.dpids[dpid][port_no]
except KeyError:
raise ryu_exc.PortNotFound(dpid=dpid, port=port_no)
def delete_port(self, dpid, port_no):
try:
remote_dpid = self.dpids[dpid][port_no]
self.send_event(EventTunnelPort(dpid, port_no, remote_dpid, False))
del self.dpids[dpid][port_no]
except KeyError:
raise ryu_exc.PortNotFound(dpid=dpid, port=port_no)
def get_port(self, dpid, remote_dpid):
try:
dp = self.dpids[dpid]
except KeyError:
raise ryu_exc.PortNotFound(dpid=dpid, port=None, network_id=None)
res = [port_no for (port_no, remote_dpid_) in dp.items()
if remote_dpid_ == remote_dpid]
assert len(res) <= 1
if len(res) == 0:
raise ryu_exc.PortNotFound(dpid=dpid, port=None, network_id=None)
return res[0]
class Tunnels(app_manager.RyuApp):
def __init__(self):
super(Tunnels, self).__init__()
self.name = 'tunnels'
self.tunnel_keys = TunnelKeys(self.send_event_to_observers)
self.dpids = DPIDs(self.send_event_to_observers)
def get_key(self, network_id):
return self.tunnel_keys.get_key(network_id)
def register_key(self, network_id, tunnel_key):
self.tunnel_keys.register_key(network_id, tunnel_key)
def update_key(self, network_id, tunnel_key):
self.tunnel_keys.update_key(network_id, tunnel_key)
def delete_key(self, network_id):
self.tunnel_keys.delete_key(network_id)
def list_ports(self, dpid):
return self.dpids.list_ports(dpid).keys()
def register_port(self, dpid, port_no, remote_dpid):
self.dpids.add_remote_dpid(dpid, port_no, remote_dpid)
def update_port(self, dpid, port_no, remote_dpid):
self.dpids.update_remote_dpid(dpid, port_no, remote_dpid)
def get_remote_dpid(self, dpid, port_no):
return self.dpids.get_remote_dpid(dpid, port_no)
def delete_port(self, dpid, port_no):
self.dpids.delete_port(dpid, port_no)
#
# methods for gre tunnel
#
def get_port(self, dpid, remote_dpid):
return self.dpids.get_port(dpid, remote_dpid)
| apache-2.0 |
campenberger/boto | boto/s3/prefix.py | 237 | 1661 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Prefix(object):
def __init__(self, bucket=None, name=None):
self.bucket = bucket
self.name = name
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Prefix':
self.name = value
else:
setattr(self, name, value)
@property
def provider(self):
provider = None
if self.bucket and self.bucket.connection:
provider = self.bucket.connection.provider
return provider
| mit |
pgraziano/ursula | plugins/callbacks/timestamp.py | 20 | 3585 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import time
from ansible.plugins.callback import CallbackBase
def secs_to_str(seconds):
# http://bytes.com/topic/python/answers/635958-handy-short-cut-formatting-elapsed-time-floating-point-seconds
def rediv(ll, b):
return list(divmod(ll[0], b)) + ll[1:]
numbers = tuple(reduce(rediv, [[seconds * 1000, ], 1000, 60, 60]))
return "%d:%02d:%02d.%03d" % numbers
def fill_str(string, fchar="*"):
if len(string) == 0:
width = 79
else:
string = "%s " % string
width = 79 - len(string)
if width < 3:
width = 3
filler = fchar * width
return "%s%s " % (string, filler)
class CallbackModule(CallbackBase):
def __init__(self, *args, **kwargs):
self.count = 0
self.stats = {}
self.current = None
self.tn = self.t0 = time.time()
super(CallbackModule, self).__init__(*args, **kwargs)
def v2_playbook_on_task_start(self, task, is_conditional):
self.timestamp()
if self.current is not None:
# Record the running time of the last executed task
self.stats[self.current] = time.time() - self.stats[self.current]
# Record the start time of the current task
self.current = task.get_name()
self.stats[self.current] = time.time()
self.count += 1
def v2_playbook_on_setup(self):
self.timestamp()
def v2_playbook_on_play_start(self, play):
self.timestamp()
self._display.display(fill_str("", fchar="="))
def v2_playbook_on_stats(self, play):
self.timestamp()
self._display.display(fill_str("", fchar="="))
self._display.display("Total tasks: %d" % self.count)
self._display.display(fill_str("", fchar="="))
self._display.display("Slowest 25 Tasks")
self._display.display(fill_str("", fchar="="))
# Record the timing of the very last task
if self.current is not None:
self.stats[self.current] = time.time() - self.stats[self.current]
# Sort the tasks by their running time
results = sorted(
self.stats.items(),
key=lambda value: value[1],
reverse=True,
)
# Print the timings
for name, elapsed in results[:25]:
name = '{0} '.format(name)
elapsed = ' {0:.02f}s'.format(elapsed)
self._display.display("{0:-<70}{1:->9}".format(name, elapsed))
def timestamp(self):
time_current = time.strftime('%A %d %B %Y %H:%M:%S %z')
time_elapsed = secs_to_str(time.time() - self.tn)
time_total_elapsed = secs_to_str(time.time() - self.t0)
self._display.display(
fill_str(
'%s (%s) %s' % (time_current,
time_elapsed,
time_total_elapsed)
)
)
self.tn = time.time()
| mit |
haoqili/MozSecWorld | vendor-local/lib/python/PIL/ImagePath.py | 40 | 1342 | #
# The Python Imaging Library
# $Id$
#
# path interface
#
# History:
# 1996-11-04 fl Created
# 2002-04-14 fl Added documentation stub class
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
import Image
##
# Path wrapper.
class Path:
##
# Creates a path object.
#
# @param xy Sequence. The sequence can contain 2-tuples [(x, y), ...]
# or a flat list of numbers [x, y, ...].
def __init__(self, xy):
pass
##
# Compacts the path, by removing points that are close to each
# other. This method modifies the path in place.
def compact(self, distance=2):
pass
##
# Gets the bounding box.
def getbbox(self):
pass
##
# Maps the path through a function.
def map(self, function):
pass
##
# Converts the path to Python list.
#
# @param flat By default, this function returns a list of 2-tuples
# [(x, y), ...]. If this argument is true, it returns a flat
# list [x, y, ...] instead.
# @return A list of coordinates.
def tolist(self, flat=0):
pass
##
# Transforms the path.
def transform(self, matrix):
pass
# override with C implementation
Path = Image.core.path
| bsd-3-clause |
vsfly/ucore_lab | related_info/ostep/ostep4-paging-linear-translate.py | 54 | 6658 | #! /usr/bin/env python
import sys
from optparse import OptionParser
import random
import math
def mustbepowerof2(bits, size, msg):
if math.pow(2,bits) != size:
print 'Error in argument: %s' % msg
sys.exit(1)
def mustbemultipleof(bignum, num, msg):
if (int(float(bignum)/float(num)) != (int(bignum) / int(num))):
print 'Error in argument: %s' % msg
sys.exit(1)
def convert(size):
length = len(size)
lastchar = size[length-1]
if (lastchar == 'k') or (lastchar == 'K'):
m = 1024
nsize = int(size[0:length-1]) * m
elif (lastchar == 'm') or (lastchar == 'M'):
m = 1024*1024
nsize = int(size[0:length-1]) * m
elif (lastchar == 'g') or (lastchar == 'G'):
m = 1024*1024*1024
nsize = int(size[0:length-1]) * m
else:
nsize = int(size)
return nsize
#
# main program
#
parser = OptionParser()
parser.add_option('-A', '--addresses', default='-1',
help='a set of comma-separated pages to access; -1 means randomly generate',
action='store', type='string', dest='addresses')
parser.add_option('-s', '--seed', default=0, help='the random seed', action='store', type='int', dest='seed')
parser.add_option('-a', '--asize', default='16k', help='address space size (e.g., 16, 64k, 32m, 1g)', action='store', type='string', dest='asize')
parser.add_option('-p', '--physmem', default='64k', help='physical memory size (e.g., 16, 64k, 32m, 1g)', action='store', type='string', dest='psize')
parser.add_option('-P', '--pagesize', default='4k', help='page size (e.g., 4k, 8k, whatever)', action='store', type='string', dest='pagesize')
parser.add_option('-n', '--numaddrs', default=5, help='number of virtual addresses to generate', action='store', type='int', dest='num')
parser.add_option('-u', '--used', default=50, help='percent of virtual address space that is used', action='store', type='int', dest='used')
parser.add_option('-v', help='verbose mode', action='store_true', default=False, dest='verbose')
parser.add_option('-c', help='compute answers for me', action='store_true', default=False, dest='solve')
(options, args) = parser.parse_args()
print 'ARG seed', options.seed
print 'ARG address space size', options.asize
print 'ARG phys mem size', options.psize
print 'ARG page size', options.pagesize
print 'ARG verbose', options.verbose
print 'ARG addresses', options.addresses
print ''
random.seed(options.seed)
asize = convert(options.asize)
psize = convert(options.psize)
pagesize = convert(options.pagesize)
addresses = str(options.addresses)
if psize <= 1:
print 'Error: must specify a non-zero physical memory size.'
exit(1)
if asize < 1:
print 'Error: must specify a non-zero address-space size.'
exit(1)
if psize <= asize:
print 'Error: physical memory size must be GREATER than address space size (for this simulation)'
exit(1)
if psize >= convert('1g') or asize >= convert('1g'):
print 'Error: must use smaller sizes (less than 1 GB) for this simulation.'
exit(1)
mustbemultipleof(asize, pagesize, 'address space must be a multiple of the pagesize')
mustbemultipleof(psize, pagesize, 'physical memory must be a multiple of the pagesize')
# print some useful info, like the darn page table
pages = psize / pagesize;
import array
used = array.array('i')
pt = array.array('i')
for i in range(0,pages):
used.insert(i,0)
vpages = asize / pagesize
# now, assign some pages of the VA
vabits = int(math.log(float(asize))/math.log(2.0))
mustbepowerof2(vabits, asize, 'address space must be a power of 2')
pagebits = int(math.log(float(pagesize))/math.log(2.0))
mustbepowerof2(pagebits, pagesize, 'page size must be a power of 2')
vpnbits = vabits - pagebits
pagemask = (1 << pagebits) - 1
# import ctypes
# vpnmask = ctypes.c_uint32(~pagemask).value
vpnmask = 0xFFFFFFFF & ~pagemask
#if vpnmask2 != vpnmask:
# print 'ERROR'
# exit(1)
# print 'va:%d page:%d vpn:%d -- %08x %08x' % (vabits, pagebits, vpnbits, vpnmask, pagemask)
print ''
print 'The format of the page table is simple:'
print 'The high-order (left-most) bit is the VALID bit.'
print ' If the bit is 1, the rest of the entry is the PFN.'
print ' If the bit is 0, the page is not valid.'
print 'Use verbose mode (-v) if you want to print the VPN # by'
print 'each entry of the page table.'
print ''
print 'Page Table (from entry 0 down to the max size)'
for v in range(0,vpages):
done = 0
while done == 0:
if ((random.random() * 100.0) > (100.0 - float(options.used))):
u = int(pages * random.random())
if used[u] == 0:
done = 1
# print '%8d - %d' % (v, u)
if options.verbose == True:
print ' [%8d] ' % v,
else:
print ' ',
print '0x%08x' % (0x80000000 | u)
pt.insert(v,u)
else:
# print '%8d - not valid' % v
if options.verbose == True:
print ' [%8d] ' % v,
else:
print ' ',
print '0x%08x' % 0
pt.insert(v,-1)
done = 1
print ''
#
# now, need to generate virtual address trace
#
addrList = []
if addresses == '-1':
# need to generate addresses
for i in range(0, options.num):
n = int(asize * random.random())
addrList.append(n)
else:
addrList = addresses.split(',')
print 'Virtual Address Trace'
for vStr in addrList:
# vaddr = int(asize * random.random())
vaddr = int(vStr)
if options.solve == False:
print ' VA 0x%08x (decimal: %8d) --> PA or invalid address?' % (vaddr, vaddr)
else:
paddr = 0
# split vaddr into VPN | offset
vpn = (vaddr & vpnmask) >> pagebits
if pt[vpn] < 0:
print ' VA 0x%08x (decimal: %8d) --> Invalid (VPN %d not valid)' % (vaddr, vaddr, vpn)
else:
pfn = pt[vpn]
offset = vaddr & pagemask
paddr = (pfn << pagebits) | offset
print ' VA 0x%08x (decimal: %8d) --> %08x (decimal %8d) [VPN %d]' % (vaddr, vaddr, paddr, paddr, vpn)
print ''
if options.solve == False:
print 'For each virtual address, write down the physical address it translates to'
print 'OR write down that it is an out-of-bounds address (e.g., segfault).'
print ''
| gpl-2.0 |
roadmapper/ansible | test/lib/ansible_test/_internal/cloud/fallaxy.py | 7 | 5719 | """Fallaxy (ansible-galaxy) plugin for integration tests."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import uuid
from . import (
CloudProvider,
CloudEnvironment,
CloudEnvironmentConfig,
)
from ..util import (
find_executable,
display,
)
from ..docker_util import (
docker_run,
docker_rm,
docker_inspect,
docker_pull,
get_docker_container_id,
)
class FallaxyProvider(CloudProvider):
"""Fallaxy plugin.
Sets up Fallaxy (ansible-galaxy) stub server for tests.
It's source source itself resides at: https://github.com/ansible/fallaxy-test-container
"""
DOCKER_SIMULATOR_NAME = 'fallaxy-stub'
def __init__(self, args):
"""
:type args: TestConfig
"""
super(FallaxyProvider, self).__init__(args)
if os.environ.get('ANSIBLE_FALLAXY_CONTAINER'):
self.image = os.environ.get('ANSIBLE_FALLAXY_CONTAINER')
else:
self.image = 'quay.io/ansible/fallaxy-test-container:1.0.0'
self.container_name = ''
def filter(self, targets, exclude):
"""Filter out the tests with the necessary config and res unavailable.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
docker_cmd = 'docker'
docker = find_executable(docker_cmd, required=False)
if docker:
return
skip = 'cloud/%s/' % self.platform
skipped = [target.name for target in targets if skip in target.aliases]
if skipped:
exclude.append(skip)
display.warning('Excluding tests marked "%s" which require the "%s" command: %s'
% (skip.rstrip('/'), docker_cmd, ', '.join(skipped)))
def setup(self):
"""Setup cloud resource before delegation and reg cleanup callback."""
super(FallaxyProvider, self).setup()
if self._use_static_config():
self._setup_static()
else:
self._setup_dynamic()
def get_docker_run_options(self):
"""Get additional options needed when delegating tests to a container.
:rtype: list[str]
"""
return ['--link', self.DOCKER_SIMULATOR_NAME] if self.managed else []
def cleanup(self):
"""Clean up the resource and temporary configs files after tests."""
if self.container_name:
docker_rm(self.args, self.container_name)
super(FallaxyProvider, self).cleanup()
def _setup_dynamic(self):
container_id = get_docker_container_id()
if container_id:
display.info('Running in docker container: %s' % container_id, verbosity=1)
self.container_name = self.DOCKER_SIMULATOR_NAME
results = docker_inspect(self.args, self.container_name)
if results and not results[0].get('State', {}).get('Running'):
docker_rm(self.args, self.container_name)
results = []
display.info('%s Fallaxy simulator docker container.'
% ('Using the existing' if results else 'Starting a new'),
verbosity=1)
fallaxy_port = 8080
fallaxy_token = str(uuid.uuid4()).replace('-', '')
if not results:
if self.args.docker or container_id:
publish_ports = []
else:
# publish the simulator ports when not running inside docker
publish_ports = [
'-p', ':'.join((str(fallaxy_port),) * 2),
]
if not os.environ.get('ANSIBLE_FALLAXY_CONTAINER'):
docker_pull(self.args, self.image)
docker_run(
self.args,
self.image,
['-d', '--name', self.container_name, '-e', 'FALLAXY_TOKEN=%s' % fallaxy_token] + publish_ports,
)
if self.args.docker:
fallaxy_host = self.DOCKER_SIMULATOR_NAME
elif container_id:
fallaxy_host = self._get_simulator_address()
display.info('Found Fallaxy simulator container address: %s' % fallaxy_host, verbosity=1)
else:
fallaxy_host = 'localhost'
self._set_cloud_config('FALLAXY_HOST', fallaxy_host)
self._set_cloud_config('FALLAXY_PORT', str(fallaxy_port))
self._set_cloud_config('FALLAXY_TOKEN', fallaxy_token)
def _get_simulator_address(self):
results = docker_inspect(self.args, self.container_name)
ipaddress = results[0]['NetworkSettings']['IPAddress']
return ipaddress
def _setup_static(self):
raise NotImplementedError()
class FallaxyEnvironment(CloudEnvironment):
"""Fallaxy environment plugin.
Updates integration test environment after delegation.
"""
def get_environment_config(self):
"""
:rtype: CloudEnvironmentConfig
"""
fallaxy_token = self._get_cloud_config('FALLAXY_TOKEN')
fallaxy_host = self._get_cloud_config('FALLAXY_HOST')
fallaxy_port = self._get_cloud_config('FALLAXY_PORT')
return CloudEnvironmentConfig(
ansible_vars=dict(
fallaxy_token=fallaxy_token,
fallaxy_galaxy_server='http://%s:%s/api/' % (fallaxy_host, fallaxy_port),
fallaxy_ah_server='http://%s:%s/api/automation-hub/' % (fallaxy_host, fallaxy_port),
),
env_vars=dict(
FALLAXY_TOKEN=fallaxy_token,
FALLAXY_GALAXY_SERVER='http://%s:%s/api/' % (fallaxy_host, fallaxy_port),
FALLAXY_AH_SERVER='http://%s:%s/api/automation-hub/' % (fallaxy_host, fallaxy_port),
),
)
| gpl-3.0 |
wbond/certbuilder | dev/_task.py | 7 | 4196 | # coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import ast
import _ast
import os
import sys
from . import package_root, task_keyword_args
from ._import import _import_from
if sys.version_info < (3,):
byte_cls = str
else:
byte_cls = bytes
def _list_tasks():
"""
Fetches a list of all valid tasks that may be run, and the args they
accept. Does not actually import the task module to prevent errors if a
user does not have the dependencies installed for every task.
:return:
A list of 2-element tuples:
0: a unicode string of the task name
1: a list of dicts containing the parameter definitions
"""
out = []
dev_path = os.path.join(package_root, 'dev')
for fname in sorted(os.listdir(dev_path)):
if fname.startswith('.') or fname.startswith('_'):
continue
if not fname.endswith('.py'):
continue
name = fname[:-3]
args = ()
full_path = os.path.join(package_root, 'dev', fname)
with open(full_path, 'rb') as f:
full_code = f.read()
if sys.version_info >= (3,):
full_code = full_code.decode('utf-8')
task_node = ast.parse(full_code, filename=full_path)
for node in ast.iter_child_nodes(task_node):
if isinstance(node, _ast.Assign):
if len(node.targets) == 1 \
and isinstance(node.targets[0], _ast.Name) \
and node.targets[0].id == 'run_args':
args = ast.literal_eval(node.value)
break
out.append((name, args))
return out
def show_usage():
"""
Prints to stderr the valid options for invoking tasks
"""
valid_tasks = []
for task in _list_tasks():
usage = task[0]
for run_arg in task[1]:
usage += ' '
name = run_arg.get('name', '')
if run_arg.get('required', False):
usage += '{%s}' % name
else:
usage += '[%s]' % name
valid_tasks.append(usage)
out = 'Usage: run.py'
for karg in task_keyword_args:
out += ' [%s=%s]' % (karg['name'], karg['placeholder'])
out += ' (%s)' % ' | '.join(valid_tasks)
print(out, file=sys.stderr)
sys.exit(1)
def _get_arg(num):
"""
:return:
A unicode string of the requested command line arg
"""
if len(sys.argv) < num + 1:
return None
arg = sys.argv[num]
if isinstance(arg, byte_cls):
arg = arg.decode('utf-8')
return arg
def run_task():
"""
Parses the command line args, invoking the requested task
"""
arg_num = 1
task = None
args = []
kwargs = {}
# We look for the task name, processing any global task keyword args
# by setting the appropriate env var
while True:
val = _get_arg(arg_num)
if val is None:
break
next_arg = False
for karg in task_keyword_args:
if val.startswith(karg['name'] + '='):
os.environ[karg['env_var']] = val[len(karg['name']) + 1:]
next_arg = True
break
if next_arg:
arg_num += 1
continue
task = val
break
if task is None:
show_usage()
task_mod = _import_from('dev.%s' % task, package_root, allow_error=True)
if task_mod is None:
show_usage()
run_args = task_mod.__dict__.get('run_args', [])
max_args = arg_num + 1 + len(run_args)
if len(sys.argv) > max_args:
show_usage()
for i, run_arg in enumerate(run_args):
val = _get_arg(arg_num + 1 + i)
if val is None:
if run_arg.get('required', False):
show_usage()
break
if run_arg.get('cast') == 'int' and val.isdigit():
val = int(val)
kwarg = run_arg.get('kwarg')
if kwarg:
kwargs[kwarg] = val
else:
args.append(val)
run = task_mod.__dict__.get('run')
result = run(*args, **kwargs)
sys.exit(int(not result))
| mit |
Lujeni/ansible | test/units/modules/network/nos/nos_module.py | 52 | 2511 | # (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as file_desc:
data = file_desc.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestNosModule(ModuleTestCase):
def execute_module(self, failed=False, changed=False, commands=None, sort=True, defaults=False):
self.load_fixtures(commands)
if failed:
result = self.failed()
self.assertTrue(result['failed'], result)
else:
result = self.changed(changed)
self.assertEqual(result['changed'], changed, result)
if commands is not None:
if sort:
self.assertEqual(sorted(commands), sorted(result['commands']), result['commands'])
else:
self.assertEqual(commands, result['commands'], result['commands'])
return result
def failed(self):
with self.assertRaises(AnsibleFailJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertTrue(result['failed'], result)
return result
def changed(self, changed=False):
with self.assertRaises(AnsibleExitJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertEqual(result['changed'], changed, result)
return result
def load_fixtures(self, commands=None):
pass
| gpl-3.0 |
ageron/tensorflow | tensorflow/python/data/kernel_tests/multi_device_iterator_test.py | 5 | 14657 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.MultiDeviceIterator`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import multi_device_iterator_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class MultiDeviceIteratorTest(test_base.DatasetTestBase,
parameterized.TestCase):
@parameterized.parameters(0, 1, 42,)
@test_util.run_v1_only("b/121264236")
def testInitOnly(self, num_inits):
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"])
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.test_session(config=config):
for _ in range(num_inits):
self.evaluate(multi_device_iterator.initializer)
@test_util.run_v1_only("b/121264236")
def testBasic(self):
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"])
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.test_session(config=config):
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@test_util.run_v1_only("b/121264236")
def testOneOnSameDevice(self):
with ops.device("/cpu:0"):
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:0", "/cpu:1"])
config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=config):
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@test_util.run_v1_only("b/121264236")
def testRepeatDevices(self):
with ops.device("/cpu:0"):
dataset = dataset_ops.Dataset.range(20)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2", "/cpu:1", "/cpu:2"])
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.test_session(config=config):
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 20, 4):
elements = multi_device_iterator.get_next()
elem_on_1, elem_on_2, elem_on_3, elem_on_4 = elements
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
self.assertEqual(i + 2, self.evaluate(elem_on_3))
self.assertEqual(i + 3, self.evaluate(elem_on_4))
with self.assertRaises(errors.OutOfRangeError):
elements = multi_device_iterator.get_next()
elem_on_1, elem_on_2, elem_on_3, elem_on_4 = elements
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
self.evaluate(elem_on_3)
self.evaluate(elem_on_4)
@test_util.run_v1_only("b/121264236")
def testNotFullyDivisible(self):
dataset = dataset_ops.Dataset.range(9)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"])
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.test_session(config=config):
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 8, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
elem_on_1 = multi_device_iterator.get_next("/cpu:1")
self.assertEqual(8, self.evaluate(elem_on_1))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@test_util.run_v1_only("b/121264236")
def testGetNextAsOptional(self):
if context.executing_eagerly():
return
dataset = dataset_ops.Dataset.range(9)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"])
elem_on_1, elem_on_2 = multi_device_iterator.get_next_as_optional()
elem_on_1_has_value_t = elem_on_1.has_value()
elem_on_1_t = elem_on_1.get_value()
elem_on_2_has_value_t = elem_on_2.has_value()
elem_on_2_t = elem_on_2.get_value()
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.test_session(config=config) as sess:
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 8, 2):
elem_on_1_has_value, elem_on_1_value = sess.run(
[elem_on_1_has_value_t, elem_on_1_t])
self.assertTrue(elem_on_1_has_value)
self.assertEqual(i, elem_on_1_value)
elem_on_2_has_value, elem_on_2_value = sess.run(
[elem_on_2_has_value_t, elem_on_2_t])
self.assertTrue(elem_on_2_has_value)
self.assertEqual(i + 1, elem_on_2_value)
elem_on_1_has_value, elem_on_1_value = sess.run(
[elem_on_1_has_value_t, elem_on_1_t])
self.assertTrue(elem_on_1_has_value)
self.assertEqual(8, elem_on_1_value)
self.assertFalse(self.evaluate(elem_on_1_has_value_t))
self.assertFalse(self.evaluate(elem_on_2_has_value_t))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(elem_on_1_t)
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(elem_on_2_t)
@test_util.run_v1_only("b/121264236")
def testUneven(self):
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"], max_buffer_size=4)
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.test_session(config=config):
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1 = multi_device_iterator.get_next("/cpu:1")
self.assertEqual(i, self.evaluate(elem_on_1))
for i in range(0, 10, 2):
elem_on_2 = multi_device_iterator.get_next("/cpu:2")
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@test_util.run_v1_only("b/121264236")
def testMultipleInitializationsGraph(self):
if context.executing_eagerly():
return
with ops.device("/cpu:0"):
epoch = array_ops.placeholder(dtypes.int64, shape=[])
dataset1 = dataset_ops.Dataset.from_tensors(epoch).repeat(1000)
dataset2 = dataset_ops.Dataset.range(1000)
dataset = dataset_ops.Dataset.zip((dataset1, dataset2))
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"], prefetch_buffer_size=4)
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
init_op = multi_device_iterator.initializer
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.test_session(config=config) as sess:
for i in range(1000):
sess.run(init_op, feed_dict={epoch: i})
self.assertEqual([(i, 0), (i, 1)], self.evaluate([elem_on_1,
elem_on_2]))
@test_util.run_v1_only("b/121264236")
def testMultipleInitializationsEager(self):
if not context.executing_eagerly():
return
with ops.device("/cpu:0"):
dataset1 = dataset_ops.Dataset.range(1000)
dataset2 = dataset_ops.Dataset.range(1000)
dataset = dataset_ops.Dataset.zip((dataset1, dataset2))
for _ in range(5):
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"], prefetch_buffer_size=4)
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual([(0, 0), (1, 1)], self.evaluate([elem_on_1, elem_on_2]))
@test_util.run_v1_only("b/121264236")
def testBasicGpu(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/gpu:0"])
config = config_pb2.ConfigProto(device_count={"CPU": 2, "GPU": 1})
with self.test_session(config=config):
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@test_util.run_v1_only("b/121264236")
def testUnevenGpu(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/gpu:0"], max_buffer_size=4)
config = config_pb2.ConfigProto(device_count={"CPU": 2, "GPU": 1})
with self.test_session(config=config):
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1 = multi_device_iterator.get_next("/cpu:1")
self.assertEqual(i, self.evaluate(elem_on_1))
for i in range(0, 10, 2):
elem_on_2 = multi_device_iterator.get_next("/gpu:0")
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@test_util.run_v1_only("b/121264236")
def testGetNextAsOptionalGpu(self):
if not test_util.is_gpu_available() or context.executing_eagerly():
self.skipTest("No GPU available")
dataset = dataset_ops.Dataset.range(9)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/gpu:0"])
elem_on_1, elem_on_2 = multi_device_iterator.get_next_as_optional()
elem_on_1_has_value_t = elem_on_1.has_value()
elem_on_1_t = elem_on_1.get_value()
elem_on_2_has_value_t = elem_on_2.has_value()
elem_on_2_t = elem_on_2.get_value()
config = config_pb2.ConfigProto(device_count={"CPU": 2, "GPU": 1})
with self.test_session(config=config) as sess:
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 8, 2):
elem_on_1_has_value, elem_on_1_value = sess.run(
[elem_on_1_has_value_t, elem_on_1_t])
self.assertTrue(elem_on_1_has_value)
self.assertEqual(i, elem_on_1_value)
elem_on_2_has_value, elem_on_2_value = sess.run(
[elem_on_2_has_value_t, elem_on_2_t])
self.assertTrue(elem_on_2_has_value)
self.assertEqual(i + 1, elem_on_2_value)
elem_on_1_has_value, elem_on_1_value = sess.run(
[elem_on_1_has_value_t, elem_on_1_t])
self.assertTrue(elem_on_1_has_value)
self.assertEqual(8, elem_on_1_value)
self.assertFalse(self.evaluate(elem_on_1_has_value_t))
self.assertFalse(self.evaluate(elem_on_2_has_value_t))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(elem_on_1_t)
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(elem_on_2_t)
@test_util.run_v1_only("b/121264236")
def testOptimization(self):
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(optimization.assert_next(["MemoryCacheImpl"]))
dataset = dataset.skip(0) # this should be optimized away
dataset = dataset.cache()
options = dataset_ops.Options()
options.experimental_optimization.noop_elimination = True
dataset = dataset.with_options(options)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"])
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.test_session(config=config):
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
if __name__ == "__main__":
ops.enable_eager_execution(
config=config_pb2.ConfigProto(device_count={"CPU": 3, "GPU": 1}))
test.main()
| apache-2.0 |
madjar/cython | tests/run/decorators_py_T593.py | 30 | 1447 | # mode: run
# ticket: 593
# tag: property, decorator
"""
>>> am_i_buggy
False
"""
def testme(func):
try:
am_i_buggy
return True
except NameError:
return False
@testme
def am_i_buggy():
pass
def called_deco(a,b,c):
a.append( (1,b,c) )
def count(f):
a.append( (2,b,c) )
return f
return count
L = []
@called_deco(L, 5, c=6)
@called_deco(L, c=3, b=4)
@called_deco(L, 1, 2)
def wrapped_func(x):
"""
>>> L
[(1, 5, 6), (1, 4, 3), (1, 1, 2), (2, 1, 2), (2, 4, 3), (2, 5, 6)]
>>> wrapped_func(99)
99
>>> L
[(1, 5, 6), (1, 4, 3), (1, 1, 2), (2, 1, 2), (2, 4, 3), (2, 5, 6)]
"""
return x
def class_in_closure(x):
"""
>>> C1, c0 = class_in_closure(5)
>>> C1().smeth1()
(5, ())
>>> C1.smeth1(1,2)
(5, (1, 2))
>>> C1.smeth1()
(5, ())
>>> c0.smeth0()
1
>>> c0.__class__.smeth0()
1
"""
class ClosureClass1(object):
@staticmethod
def smeth1(*args):
return x, args
class ClosureClass0(object):
@staticmethod
def smeth0():
return 1
return ClosureClass1, ClosureClass0()
def class_not_in_closure():
"""
>>> c = class_not_in_closure()
>>> c.smeth0()
1
>>> c.__class__.smeth0()
1
"""
class ClosureClass0(object):
@staticmethod
def smeth0():
return 1
return ClosureClass0()
| apache-2.0 |
yujikato/DIRAC | src/DIRAC/Core/Utilities/MJF.py | 2 | 6755 | """ The MJF utility calculates the amount of wall clock time
left for a given batch system slot or VM. This is essential for the
'Filling Mode' where several jobs may be executed in the same slot.
Machine Job/Features are used following HSF-TN-2016-02 if available.
Otherwise values are filled in using the batch system and CS
information.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import ssl
import time
from six.moves.urllib.request import urlopen
import DIRAC
from DIRAC import gLogger, gConfig
__RCSID__ = "$Id$"
class MJF(object):
""" Machine/Job Features methods
"""
mjfKeys = {'MACHINEFEATURES': ['total_cpu', 'hs06', 'shutdowntime', 'grace_secs'],
'JOBFEATURES': ['allocated_cpu', 'hs06_job', 'shutdowntime_job', 'grace_secs_job',
'jobstart_secs', 'job_id', 'wall_limit_secs', 'cpu_limit_secs',
'max_rss_bytes', 'max_swap_bytes', 'scratch_limit_bytes']}
#############################################################################
def __init__(self):
""" Standard constructor
"""
self.log = gLogger.getSubLogger('MJF')
capath = DIRAC.Core.Security.Locations.getCAsLocation()
if not capath:
raise Exception('Unable to find CA files location! Not in /etc/grid-security/certificates/ etc.')
# Used by urllib when talking to HTTPS web servers
self.context = ssl.create_default_context(capath=capath)
def updateConfig(self, pilotStartTime=None):
"""Populate /LocalSite/MACHINEFEATURES and /LocalSite/JOBFEATURES with MJF values
This is run early in the job to update the configuration file that subsequent DIRAC
scripts read when they start.
"""
if pilotStartTime:
gConfig.setOptionValue('/LocalSite/JOBFEATURES/jobstart_secs', str(pilotStartTime))
for mORj in ['MACHINEFEATURES', 'JOBFEATURES']:
for key in self.mjfKeys[mORj]:
value = self.__fetchMachineJobFeature(mORj, key)
if value is not None:
gConfig.setOptionValue('/LocalSite/%s/%s' % (mORj, key), value)
def getMachineFeature(self, key):
"""Returns MACHINEFEATURES/key value saved in /LocalSite configuration by
updateConfigFile() unless MACHINEFEATURES/shutdowntime when we try to fetch
from the source URL itself again in case it changes.
"""
if key == 'shutdowntime':
value = self.__fetchMachineJobFeature('MACHINEFEATURES', 'shutdowntime')
# If unable to fetch shutdowntime, go back to any value in /LocalSite
# in case HTTP(S) server is down
if value is not None:
return value
return gConfig.getValue('/LocalSite/MACHINEFEATURES/' + key, None)
def getIntMachineFeature(self, key):
"""Returns MACHINEFEATURES/key as an int or None if not an int or not present
"""
value = self.getMachineFeature(key)
try:
return int(value)
except ValueError:
return None
def getJobFeature(self, key):
"""Returns JOBFEATURES/key value saved in /LocalSite configuration by
updateConfigFile() unless JOBFEATURES/shutdowntime_job when we try to fetch
from the source URL itself again in case it changes.
"""
if key == 'shutdowntime_job':
value = self.__fetchMachineJobFeature('JOBFEATURES', 'shutdowntime_job')
# If unable to fetch shutdowntime_job, go back to any value in /LocalSite
# in case HTTP(S) server is down
if value is not None:
return value
return gConfig.getValue('/LocalSite/JOBFEATURES/' + key, None)
def getIntJobFeature(self, key):
"""Returns JOBFEATURES/key as an int or None if not an int or not present
"""
value = self.getJobFeature(key)
try:
return int(value)
except ValueError:
return None
def __fetchMachineJobFeature(self, mORj, key):
"""Returns raw MJF value for a given key, perhaps by HTTP(S), perhaps from a local file
mORj must be MACHINEFEATURES or JOBFEATURES
If the value cannot be found, then return None. There are many legitimate ways for
a site not to provide some MJF values so we don't log errors, failures etc.
"""
if mORj != 'MACHINEFEATURES' and mORj != 'JOBFEATURES':
raise Exception('Must request MACHINEFEATURES or JOBFEATURES')
if mORj not in os.environ:
return None
url = os.environ[mORj] + '/' + key
# Simple if a file
if url[0] == '/':
try:
with open(url, 'r') as fd:
return fd.read().strip()
except Exception:
return None
# Otherwise make sure it's an HTTP(S) URL
if not url.startswith('http://') and not url.startswith('https://'):
return None
# We could have used urlopen() for local files too, but we also
# need to check HTTP return code in case we get an HTML error page
# instead of a true key value.
try:
mjfUrl = urlopen(url=url, context=self.context)
# HTTP return codes other than 2xx mean failure
if int(mjfUrl.getcode() / 100) != 2:
return None
return mjfUrl.read().strip()
except Exception:
return None
finally:
try:
mjfUrl.close()
except UnboundLocalError:
pass
def getWallClockSecondsLeft(self):
"""Returns the number of seconds until either the wall clock limit
or the shutdowntime(_job) is reached.
"""
now = int(time.time())
secondsLeft = None
jobstartSecs = self.getIntJobFeature('jobstart_secs')
wallLimitSecs = self.getIntJobFeature('wall_limit_secs')
shutdowntimeJob = self.getIntJobFeature('shutdowntime_job')
shutdowntime = self.getIntMachineFeature('shutdowntime')
# look for local shutdown file
try:
with open('/var/run/shutdown_time', 'r') as fd:
shutdowntimeLocal = int(fd.read().strip())
except (IOError, ValueError):
shutdowntimeLocal = None
if jobstartSecs is not None and wallLimitSecs is not None:
secondsLeft = jobstartSecs + wallLimitSecs - now
if shutdowntimeJob is not None:
if secondsLeft is None:
secondsLeft = shutdowntimeJob - now
elif shutdowntimeJob - now < secondsLeft:
secondsLeft = shutdowntimeJob - now
if shutdowntime is not None:
if secondsLeft is None:
secondsLeft = shutdowntime - now
elif shutdowntime - now < secondsLeft:
secondsLeft = shutdowntime - now
if shutdowntimeLocal is not None:
if secondsLeft is None:
secondsLeft = shutdowntimeLocal - now
elif shutdowntimeLocal - now < secondsLeft:
secondsLeft = shutdowntimeLocal - now
# Wall Clock time left or None if unknown
return secondsLeft
| gpl-3.0 |
qzhan15/mydpdk | test/test/autotest_runner.py | 12 | 14886 | # BSD LICENSE
#
# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The main logic behind running autotests in parallel
import StringIO
import csv
import multiprocessing
import pexpect
import re
import subprocess
import sys
import time
# wait for prompt
def wait_prompt(child):
try:
child.sendline()
result = child.expect(["RTE>>", pexpect.TIMEOUT, pexpect.EOF],
timeout=120)
except:
return False
if result == 0:
return True
else:
return False
# run a test group
# each result tuple in results list consists of:
# result value (0 or -1)
# result string
# test name
# total test run time (double)
# raw test log
# test report (if not available, should be None)
#
# this function needs to be outside AutotestRunner class
# because otherwise Pool won't work (or rather it will require
# quite a bit of effort to make it work).
def run_test_group(cmdline, test_group):
results = []
child = None
start_time = time.time()
startuplog = None
# run test app
try:
# prepare logging of init
startuplog = StringIO.StringIO()
print >>startuplog, "\n%s %s\n" % ("=" * 20, test_group["Prefix"])
print >>startuplog, "\ncmdline=%s" % cmdline
child = pexpect.spawn(cmdline, logfile=startuplog)
# wait for target to boot
if not wait_prompt(child):
child.close()
results.append((-1,
"Fail [No prompt]",
"Start %s" % test_group["Prefix"],
time.time() - start_time,
startuplog.getvalue(),
None))
# mark all tests as failed
for test in test_group["Tests"]:
results.append((-1, "Fail [No prompt]", test["Name"],
time.time() - start_time, "", None))
# exit test
return results
except:
results.append((-1,
"Fail [Can't run]",
"Start %s" % test_group["Prefix"],
time.time() - start_time,
startuplog.getvalue(),
None))
# mark all tests as failed
for t in test_group["Tests"]:
results.append((-1, "Fail [Can't run]", t["Name"],
time.time() - start_time, "", None))
# exit test
return results
# startup was successful
results.append((0, "Success", "Start %s" % test_group["Prefix"],
time.time() - start_time, startuplog.getvalue(), None))
# parse the binary for available test commands
binary = cmdline.split()[0]
stripped = 'not stripped' not in subprocess.check_output(['file', binary])
if not stripped:
symbols = subprocess.check_output(['nm', binary]).decode('utf-8')
avail_cmds = re.findall('test_register_(\w+)', symbols)
# run all tests in test group
for test in test_group["Tests"]:
# create log buffer for each test
# in multiprocessing environment, the logging would be
# interleaved and will create a mess, hence the buffering
logfile = StringIO.StringIO()
child.logfile = logfile
result = ()
# make a note when the test started
start_time = time.time()
try:
# print test name to log buffer
print >>logfile, "\n%s %s\n" % ("-" * 20, test["Name"])
# run test function associated with the test
if stripped or test["Command"] in avail_cmds:
result = test["Func"](child, test["Command"])
else:
result = (0, "Skipped [Not Available]")
# make a note when the test was finished
end_time = time.time()
# append test data to the result tuple
result += (test["Name"], end_time - start_time,
logfile.getvalue())
# call report function, if any defined, and supply it with
# target and complete log for test run
if test["Report"]:
report = test["Report"](self.target, log)
# append report to results tuple
result += (report,)
else:
# report is None
result += (None,)
except:
# make a note when the test crashed
end_time = time.time()
# mark test as failed
result = (-1, "Fail [Crash]", test["Name"],
end_time - start_time, logfile.getvalue(), None)
finally:
# append the results to the results list
results.append(result)
# regardless of whether test has crashed, try quitting it
try:
child.sendline("quit")
child.close()
# if the test crashed, just do nothing instead
except:
# nop
pass
# return test results
return results
# class representing an instance of autotests run
class AutotestRunner:
cmdline = ""
parallel_test_groups = []
non_parallel_test_groups = []
logfile = None
csvwriter = None
target = ""
start = None
n_tests = 0
fails = 0
log_buffers = []
blacklist = []
whitelist = []
def __init__(self, cmdline, target, blacklist, whitelist):
self.cmdline = cmdline
self.target = target
self.blacklist = blacklist
self.whitelist = whitelist
# log file filename
logfile = "%s.log" % target
csvfile = "%s.csv" % target
self.logfile = open(logfile, "w")
csvfile = open(csvfile, "w")
self.csvwriter = csv.writer(csvfile)
# prepare results table
self.csvwriter.writerow(["test_name", "test_result", "result_str"])
# set up cmdline string
def __get_cmdline(self, test):
cmdline = self.cmdline
# append memory limitations for each test
# otherwise tests won't run in parallel
if "i686" not in self.target:
cmdline += " --socket-mem=%s" % test["Memory"]
else:
# affinitize startup so that tests don't fail on i686
cmdline = "taskset 1 " + cmdline
cmdline += " -m " + str(sum(map(int, test["Memory"].split(","))))
# set group prefix for autotest group
# otherwise they won't run in parallel
cmdline += " --file-prefix=%s" % test["Prefix"]
return cmdline
def add_parallel_test_group(self, test_group):
self.parallel_test_groups.append(test_group)
def add_non_parallel_test_group(self, test_group):
self.non_parallel_test_groups.append(test_group)
def __process_results(self, results):
# this iterates over individual test results
for i, result in enumerate(results):
# increase total number of tests that were run
# do not include "start" test
if i > 0:
self.n_tests += 1
# unpack result tuple
test_result, result_str, test_name, \
test_time, log, report = result
# get total run time
cur_time = time.time()
total_time = int(cur_time - self.start)
# print results, test run time and total time since start
result = ("%s:" % test_name).ljust(30)
result += result_str.ljust(29)
result += "[%02dm %02ds]" % (test_time / 60, test_time % 60)
# don't print out total time every line, it's the same anyway
if i == len(results) - 1:
print(result,
"[%02dm %02ds]" % (total_time / 60, total_time % 60))
else:
print(result)
# if test failed and it wasn't a "start" test
if test_result < 0 and not i == 0:
self.fails += 1
# collect logs
self.log_buffers.append(log)
# create report if it exists
if report:
try:
f = open("%s_%s_report.rst" %
(self.target, test_name), "w")
except IOError:
print("Report for %s could not be created!" % test_name)
else:
with f:
f.write(report)
# write test result to CSV file
if i != 0:
self.csvwriter.writerow([test_name, test_result, result_str])
# this function iterates over test groups and removes each
# test that is not in whitelist/blacklist
def __filter_groups(self, test_groups):
groups_to_remove = []
# filter out tests from parallel test groups
for i, test_group in enumerate(test_groups):
# iterate over a copy so that we could safely delete individual
# tests
for test in test_group["Tests"][:]:
test_id = test["Command"]
# dump tests are specified in full e.g. "Dump_mempool"
if "_autotest" in test_id:
test_id = test_id[:-len("_autotest")]
# filter out blacklisted/whitelisted tests
if self.blacklist and test_id in self.blacklist:
test_group["Tests"].remove(test)
continue
if self.whitelist and test_id not in self.whitelist:
test_group["Tests"].remove(test)
continue
# modify or remove original group
if len(test_group["Tests"]) > 0:
test_groups[i] = test_group
else:
# remember which groups should be deleted
# put the numbers backwards so that we start
# deleting from the end, not from the beginning
groups_to_remove.insert(0, i)
# remove test groups that need to be removed
for i in groups_to_remove:
del test_groups[i]
return test_groups
# iterate over test groups and run tests associated with them
def run_all_tests(self):
# filter groups
self.parallel_test_groups = \
self.__filter_groups(self.parallel_test_groups)
self.non_parallel_test_groups = \
self.__filter_groups(self.non_parallel_test_groups)
# create a pool of worker threads
pool = multiprocessing.Pool(processes=1)
results = []
# whatever happens, try to save as much logs as possible
try:
# create table header
print("")
print("Test name".ljust(30), "Test result".ljust(29),
"Test".center(9), "Total".center(9))
print("=" * 80)
# make a note of tests start time
self.start = time.time()
# assign worker threads to run test groups
for test_group in self.parallel_test_groups:
result = pool.apply_async(run_test_group,
[self.__get_cmdline(test_group),
test_group])
results.append(result)
# iterate while we have group execution results to get
while len(results) > 0:
# iterate over a copy to be able to safely delete results
# this iterates over a list of group results
for group_result in results[:]:
# if the thread hasn't finished yet, continue
if not group_result.ready():
continue
res = group_result.get()
self.__process_results(res)
# remove result from results list once we're done with it
results.remove(group_result)
# run non_parallel tests. they are run one by one, synchronously
for test_group in self.non_parallel_test_groups:
group_result = run_test_group(
self.__get_cmdline(test_group), test_group)
self.__process_results(group_result)
# get total run time
cur_time = time.time()
total_time = int(cur_time - self.start)
# print out summary
print("=" * 80)
print("Total run time: %02dm %02ds" % (total_time / 60,
total_time % 60))
if self.fails != 0:
print("Number of failed tests: %s" % str(self.fails))
# write summary to logfile
self.logfile.write("Summary\n")
self.logfile.write("Target: ".ljust(15) + "%s\n" % self.target)
self.logfile.write("Tests: ".ljust(15) + "%i\n" % self.n_tests)
self.logfile.write("Failed tests: ".ljust(
15) + "%i\n" % self.fails)
except:
print("Exception occurred")
print(sys.exc_info())
self.fails = 1
# drop logs from all executions to a logfile
for buf in self.log_buffers:
self.logfile.write(buf.replace("\r", ""))
return self.fails
| gpl-2.0 |
jimsimon/sky_engine | sky/tools/webkitpy/common/checkout/diff_test_data.py | 11 | 3643 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# FIXME: Store this as a .patch file in some new fixtures directory or similar.
DIFF_TEST_DATA = '''diff --git a/WebCore/rendering/style/StyleFlexibleBoxData.h b/WebCore/rendering/style/StyleFlexibleBoxData.h
index f5d5e74..3b6aa92 100644
--- a/WebCore/rendering/style/StyleFlexibleBoxData.h
+++ b/WebCore/rendering/style/StyleFlexibleBoxData.h
@@ -47,7 +47,6 @@ public:
unsigned align : 3; // EBoxAlignment
unsigned pack: 3; // EBoxAlignment
- unsigned orient: 1; // EBoxOrient
unsigned lines : 1; // EBoxLines
private:
diff --git a/WebCore/rendering/style/StyleRareInheritedData.cpp b/WebCore/rendering/style/StyleRareInheritedData.cpp
index ce21720..324929e 100644
--- a/WebCore/rendering/style/StyleRareInheritedData.cpp
+++ b/WebCore/rendering/style/StyleRareInheritedData.cpp
@@ -39,6 +39,7 @@ StyleRareInheritedData::StyleRareInheritedData()
, textSizeAdjust(RenderStyle::initialTextSizeAdjust())
, resize(RenderStyle::initialResize())
, userSelect(RenderStyle::initialUserSelect())
+ , boxOrient(RenderStyle::initialBoxOrient())
{
}
@@ -58,6 +59,7 @@ StyleRareInheritedData::StyleRareInheritedData(const StyleRareInheritedData& o)
, textSizeAdjust(o.textSizeAdjust)
, resize(o.resize)
, userSelect(o.userSelect)
+ , boxOrient(o.boxOrient)
{
}
@@ -81,7 +83,8 @@ bool StyleRareInheritedData::operator==(const StyleRareInheritedData& o) const
&& khtmlLineBreak == o.khtmlLineBreak
&& textSizeAdjust == o.textSizeAdjust
&& resize == o.resize
- && userSelect == o.userSelect;
+ && userSelect == o.userSelect
+ && boxOrient == o.boxOrient;
}
bool StyleRareInheritedData::shadowDataEquivalent(const StyleRareInheritedData& o) const
diff --git a/tests/platform/mac/fast/flexbox/box-orient-button-expected.checksum b/tests/platform/mac/fast/flexbox/box-orient-button-expected.checksum
new file mode 100644
index 0000000..6db26bd
--- /dev/null
+++ b/tests/platform/mac/fast/flexbox/box-orient-button-expected.checksum
@@ -0,0 +1 @@
+61a373ee739673a9dcd7bac62b9f182e
\ No newline at end of file
'''
| bsd-3-clause |
frosenberg/kubernetes | examples/cluster-dns/images/frontend/client.py | 468 | 1227 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import requests
import socket
from urlparse import urlparse
def CheckServiceAddress(address):
hostname = urlparse(address).hostname
service_address = socket.gethostbyname(hostname)
print service_address
def GetServerResponse(address):
print 'Send request to:', address
response = requests.get(address)
print response
print response.content
def Main():
parser = argparse.ArgumentParser()
parser.add_argument('address')
args = parser.parse_args()
CheckServiceAddress(args.address)
GetServerResponse(args.address)
if __name__ == "__main__":
Main()
| apache-2.0 |
mlassnig/pilot | SiteMover.py | 3 | 121268 | # Base class of site movers
# All site movers inherit from this class
import os
import commands
import re
import time
from urllib import urlopen, urlencode
from urllib2 import Request, urlopen
from futil import *
from pUtil import tolog, readpar, dumpOrderedItems, getDirectAccessDic, getSiteInformation
from PilotErrors import PilotErrors
from timed_command import timed_command
from configSiteMover import config_sm
from FileHandling import getExtension, getTracingReportFilename, writeJSON
PERMISSIONS_DIR = config_sm.PERMISSIONS_DIR
PERMISSIONS_FILE = config_sm.PERMISSIONS_FILE
CMD_CHECKSUM = config_sm.COMMAND_MD5
ARCH_DEFAULT = config_sm.ARCH_DEFAULT
class SiteMover(object):
"""
File movers move files between a storage element (of different kinds) and a local directory
get_data: SE->local
put_data: local->SE
getMover: static function returning a SiteMover
It furter provides functions useful for child classes (AAASiteMover):
put_data_retfail -- facilitate return in case of failure
mkdirWperm -- create recursively dirs setting appropriate permissions
getLocalFileInfo -- get size and checksum of a local file
This is the Default SiteMover, the SE has to be locally accessible for all the WNs
and all commands like cp, mkdir, md5checksum have to be available on files in the SE
E.g. NFS exported file system
"""
__childDict = {}
copyCommand = "cp"
checksum_command = "adler32"
has_mkdir = True
has_df = True
has_getsize = True
has_md5sum = True
has_chmod = True
permissions_DIR = PERMISSIONS_DIR
permissions_FILE = PERMISSIONS_FILE
arch_type = ARCH_DEFAULT
timeout = 5*3600
useTracingService = True
filesInRucioDataset = {}
CONDPROJ = ['oflcond', 'comcond', 'cmccond', 'tbcond', 'tbmccond', 'testcond']
PRODFTYPE = ['AOD', 'CBNT', 'ESD', 'EVNT', 'HIST', 'HITS', 'RDO', 'TAG', 'log', 'NTUP']
def __init__(self, setup_path='', *args, **kwrds):
self._setup = setup_path
def init_data(self, job):
pass
def get_timeout(self):
return self.timeout
def getChecksumCommand(self):
""" return the checksum command to be used with this site mover """
return self.checksum_command
def getID(self):
""" return the current copy command """
return self.copyCommand
def getSetup(self):
""" Return the setup string (pacman setup os setup script) for the copy command used by the mover """
return self._setup
def mountNSF4AndGetPFN(self, error, gpfn):
""" Get and check PNFS mount point, return the pfn """
ec = 0
pilotErrorDiag = ""
src_loc_pfn = ""
try:
if 'SFN' in gpfn:
seName = gpfn.replace("srm://", "").split(':8446/srm/managerv2?SFN=')[0]
src_loc_pfn = gpfn.split(':8446/srm/managerv2?SFN=')[1]
else:
seName = gpfn.replace("srm://", "").split('/')[0]
src_loc_pfn = gpfn.split('%s' % (seName))[1]
# seName = gpfn.replace("srm://", "").split(':8446/srm/managerv2?SFN=')[0]
# src_loc_pfn = gpfn.split(':8446/srm/managerv2?SFN=')[1]
except Exception, e:
pilotErrorDiag = "Exception caught: %s" % (e)
tolog("!!WARNING!!1887!! %s" % (pilotErrorDiag))
ec = error.ERR_STAGEINFAILED
return ec, pilotErrorDiag, src_loc_pfn
_cmd_str = 'mount -l -t nfs4|grep %s' % (seName)
timeout = 3600
try:
s, telapsed, cout, cerr = timed_command(_cmd_str, timeout)
except Exception, e:
tolog("!!WARNING!!1887!! timed_command() threw an exception: %s" % str(e))
s = 1
o = str(e)
telapsed = timeout
else:
o = cout + cerr
tolog("Elapsed time: %d" % (telapsed))
if s == 0:
try:
pnfsMountPoint = o.split()[2]
except Exception, e:
pilotErrorDiag = "Exception caught: %s" % (e)
tolog("!!WARNING!!1887!! %s" % (pilotErrorDiag))
ec = error.ERR_STAGEINFAILED
else:
if os.path.ismount("%s" % (pnfsMountPoint)):
tolog("PNFS Server: %s, mount point: %s" % (seName, pnfsMountPoint))
src_loc_pfn = '%s%s' % (pnfsMountPoint, src_loc_pfn)
else:
tolog("!!WARNING!!1887!! %s is no mount point" % (pnfsMountPoint))
pilotErrorDiag = "PNFS system error: %s" % (o)
ec = error.ERR_GETFAILEDTOMOUNTNFS4
else:
tolog("!!WARNING!!1887!! Command failed: %s" % (_cmd_str))
if is_timeout(s):
pilotErrorDiag = "Mount command was timed out after %d seconds" % (telapsed)
ec = error.ERR_GETTIMEOUT
else:
pilotErrorDiag = "PNFS system error: %s" % (o)
ec = error.ERR_GETPNFSSYSTEMERROR
return ec, pilotErrorDiag, src_loc_pfn
def get_data(self, gpfn, lfn, path, fsize=0, fchecksum=0, guid=0, **pdict):
"""
Move a file from the local SE (where it was put from DDM) to the working directory.
gpfn: full source URL (e.g. method://[host[:port]/full-dir-path/filename - a SRM URL is OK)
path: destination absolute path (in a local file system). It is assumed to be there. get_data returns an error if the path is missing
The local file is assumed to have a relative path that is the same of the relative path in the 'gpfn'
loc_...: variables used to access the file in the locally exported file system
"""
error = PilotErrors()
pilotErrorDiag = ""
# Get input parameters from pdict
timeout = pdict.get('timeout', 5*3600)
experiment = pdict.get('experiment', "ATLAS")
# get the Rucio tracing report
report = self.getStubTracingReport(pdict['report'], 'sm', lfn, guid)
# get the site information object
si = getSiteInformation(experiment)
src_loc_pfn = ''
if si.isTier3():
src_loc_pfn = gpfn
else:
if 'dpm' in gpfn:
# Get and Check PNFS mount point
ec, pilotErrorDiag, src_loc_pfn = self.mountNSF4AndGetPFN(error, gpfn)
if ec != 0:
return ec, pilotErrorDiag
else:
# remove any host and SFN info from PFN path
src_loc_pfn = self.extractPathFromPFN(gpfn)
src_loc_filename = lfn
# source vars: gpfn, loc_pfn, loc_host, loc_dirname, loc_filename
# dest vars: path
if fchecksum != 0 and fchecksum != "":
csumtype = SiteMover.getChecksumType(fchecksum)
else:
csumtype = "default"
if fsize == 0 or fchecksum == 0:
ec, pilotErrorDiag, fsize, fchecksum = SiteMover.getLocalFileInfo(src_loc_pfn, csumtype=csumtype)
if ec != 0:
self.prepareReport('LOCAL_FILE_INFO_FAIL', report)
return ec, pilotErrorDiag
dest_file = os.path.join(path, src_loc_filename)
# execute the copy command
#PN
_cmd_str = "cp %s %s" % (src_loc_pfn, dest_file)
# if ".lib." in src_loc_pfn:
# _cmd_str = _cmd_str.replace('XXX', '')
tolog("Executing command: %s" % (_cmd_str))
report['transferStart'] = time.time()
try:
s, telapsed, cout, cerr = timed_command(_cmd_str, timeout)
except Exception, e:
tolog("!!WARNING!!2999!! timed_command() threw an exception: %s" % str(e))
s = 1
o = str(e)
telapsed = timeout
else:
o = cout + cerr
tolog("Elapsed time: %d" % (telapsed))
report['validateStart'] = time.time()
# error code handling
if s != 0:
tolog("!!WARNING!!2990!! Command failed: %s" % (_cmd_str))
check_syserr(s, o)
if is_timeout(s):
pilotErrorDiag = "cp get was timed out after %d seconds" % (telapsed)
ec = error.ERR_GETTIMEOUT
else:
o = o.replace('\n', ' ')
if o.find("No such file or directory") >= 0:
if src_loc_pfn.find("DBRelease") >= 0:
pilotErrorDiag = "DBRelease file missing: %s" % (src_loc_pfn)
ec = error.ERR_MISSDBREL
else:
pilotErrorDiag = "No such file or directory: %s" % (src_loc_pfn)
ec = error.ERR_NOSUCHFILE
else:
pilotErrorDiag = "cp failed with output: ec = %d, output = %s" % (s, o)
ec = error.ERR_STAGEINFAILED
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
self.prepareReport('COPY_FAIL', report)
return ec, pilotErrorDiag
# get remote file size and checksum
ec, pilotErrorDiag, dstfsize, dstfchecksum = SiteMover.getLocalFileInfo(dest_file, csumtype=csumtype)
if ec != 0:
self.prepareReport('LOCAL_FILE_INFO_FAIL', report)
return ec, pilotErrorDiag
# compare remote and local file size
if dstfsize != fsize:
pilotErrorDiag = "Remote and local file sizes do not match for %s (%s != %s)" %\
(os.path.basename(gpfn), str(dstfsize), str(fsize))
tolog('!!WARNING!!2999!! %s' % (pilotErrorDiag))
self.prepareReport('FS_MISMATCH', report)
return error.ERR_GETWRONGSIZE, pilotErrorDiag
# compare remote and local file checksum
if dstfchecksum != fchecksum and not self.isDummyChecksum(fchecksum):
pilotErrorDiag = "Remote and local checksums (of type %s) do not match for %s (%s != %s)" %\
(csumtype, os.path.basename(gpfn), dstfchecksum, fchecksum)
tolog('!!WARNING!!2999!! %s' % (pilotErrorDiag))
if csumtype == "adler32":
self.prepareReport('AD_MISMATCH', report)
return error.ERR_GETADMISMATCH, pilotErrorDiag
else:
self.prepareReport('MD5_MISMATCH', report)
return error.ERR_GETMD5MISMATCH, pilotErrorDiag
self.prepareReport('DONE', report)
return 0, pilotErrorDiag
def getRSE(surl=None):
""" Return the Rucio site name (RSE ... Rucio Storage Element) using the SURL """
sitename = None
if surl:
try:
from dq2.info import TiersOfATLAS
except:
# Note: do not print the exception since it sometimes can not be converted to a string (as seen at Taiwan)
tolog("!!WARNING!!1119!! TiersOfATLAS could not be imported from dq2.info")
else:
sites = TiersOfATLAS.getAllDestinationSites()
for site in sites:
if TiersOfATLAS.isSURLFromSiteOrCloud(surl, site):
sitename = site
break
return sitename
getRSE = staticmethod(getRSE)
def getDefaultRSE(self):
""" Return the Rucio site name using the schedconfig.se info """
# Build a preliminary SURL using minimum information necessary for the getRSE() method
default_token, se = SiteMover.extractSE(readpar('se'))
tolog("default_token=%s, se=%s" % (default_token, se))
# Get a preliminary path
sepath = readpar('seprodpath')
if sepath == "":
sepath = readpar('sepath')
# Note that the sepath might not be simple, but can contain complex structures (brackets and commas)
# First create a properly formatted selist list and then use the default token to get the corresponding proper sepath
destinationList = self.getDirList(sepath)
tolog("destinationList=%s"%str(destinationList))
# Now find the proper sepath
destination = self.getMatchingDestinationPath(default_token, destinationList)
tolog("destination=%s"%destination)
# Create the SURL
surl = se + destination
tolog("surl=%s"%surl)
# Get the default Rucio site name
return SiteMover.getRSE(surl=surl)
def getTiersOfATLASAlternativeName(self, endpoint):
""" Return the alternativeName from TiersOfATLAS for a given edpoint """
alternativeName = ""
try:
from dq2.info import TiersOfATLAS
except:
# Note: do not print the exception since it sometimes can not be converted to a string (as seen at Taiwan)
tolog("!!WARNING!!1119!! TiersOfATLAS could not be imported from dq2.info")
else:
# Now get the alternativeName
tolog("endpoint=%s"%endpoint)
try:
alternativeName = TiersOfATLAS.getSiteProperty(endpoint, 'alternateName')[0]
except:
tolog("!!WARNING!!5656!! TiersOfATLAS.getSiteProperty() failed to find alternativeName for %s" % (endpoint))
return alternativeName
def getTiersOfATLASSE(self, endpoint):
""" Return the se from TiersOfATLAS """
se = ""
try:
from dq2.info import TiersOfATLAS
except:
tolog("!!WARNING!!1119!! TiersOfATLAS could not be imported from dq2.info")
else:
# Get the sites list
sites = TiersOfATLAS.ToACache.sites
# Get the se info
try:
se = sites[endpoint]['srm']
except Exception, e:
tolog("!!WARNING!!1120!! No such endpoint in TiersOfATLAS: %s" % (e))
else:
tolog("Endpoint %s corresponds to se=%s (TiersOfATLAS)" % (endpoint, se))
return se
def getGroupDiskPath(self, endpoint=""):
""" Get the seprodpath from TiersOfATLAS instead of schedconfig if destination is a groupdisk """
# We know it's a group disk if 'dst:' is present in the token descriptor (which in this case it the same as the endpoint name)
sepath = ""
# Remove the dst: substring from the endpoint string unless the alternativeName is different between the site and the requested endpoint
if "dst:" in endpoint:
endpoint = endpoint[len('dst:'):]
# Get the se from TiersOfATLAS
se = self.getTiersOfATLASSE(endpoint)
if se != "":
# Now extract the seprodpath from the srm info
sepath = SiteMover.extractSEPath(se)
# Add /rucio to sepath if not there already
if not sepath.endswith('/rucio'):
sepath += '/rucio'
else:
tolog("!!WARNING!!3999!! Group disk verification failed, space token will be reset to default value")
else:
tolog("!!WARNING!!2233!! Not a groupdisk endpoint: %s" % (endpoint))
return sepath
def verifyGroupSpaceToken(self, token):
""" Make sure that space token is valid in case group disk is requested """
# In case a groupdisk space token is requested, make sure that the site's alternativeName is the same as the endpoints' alternativeName
# They will have different alternativeNames if the job originates from a different cloud
# Note: ATLAS specific
if not token:
return None
if token.startswith("dst:"):
# Found a groupdisk space token
_token = token[len('dst:'):]
tolog("token=%s"%_token)
tolog("sitename=%s"%self.getDefaultRSE())
# Get the corresponding alternative name and compare it to the alternative name of the site
alternativeName_token = self.getTiersOfATLASAlternativeName(_token)
tolog("alternativeName_token = %s" % (alternativeName_token))
alternativeName_site = self.getTiersOfATLASAlternativeName(self.getDefaultRSE())
tolog("alternativeName_site = %s" % (alternativeName_site))
# Only proceed ith getting the groupdisk path if the alternativeName's are the same
if alternativeName_token == alternativeName_site:
tolog("Verified groupdisk token (same alternativeName for site and endpoint)")
else:
tolog("!!WARNING!!3999!! Alternative names are not the same for site and requested endpoint, will reset GROUPDISK")
default_token, _se = SiteMover.extractSE(readpar('se'))
tolog("Requested space token %s reset to %s" % (_token, default_token))
token = default_token
return token
def put_data_retfail(fail, errortext, surl=""):
"""
Provides the return value for put_data when there is a failure.
Used to enforce the number of parameters returned
"""
return fail, errortext, surl, 0, 0, ''
put_data_retfail = staticmethod(put_data_retfail)
def put_data(self, source, destination, fsize=0, fchecksum=0, **pdict):
"""
Move the file from the current local directory to a storage element
Parameters are:
source: full path of the file in the local directory
destination: destination SE, method://[hostname[:port]]/full-dir-path/ (NB: no file name)
fsize: file size of the source file (evaluated if 0)
fchecksum: checksum of the source file (evaluated if 0)
pdict: to allow additional parameters that may make sense for specific movers
Assume that the SE is locally mounted and its local path is the same as the remote path
if both fsize and fchecksum (for the source) are given and !=0 these are assumed without reevaluating them
returns: exitcode, gpfn, fsize, fchecksum
"""
error = PilotErrors()
pilotErrorDiag = ""
# Get input parameters from pdict
DN = pdict.get('DN', '')
lfn = pdict.get('lfn', '')
guid = pdict.get('guid', '')
token = pdict.get('token', '')
scope = pdict.get('scope', '')
dsname = pdict.get('dsname', '')
timeout = pdict.get('timeout', 5*3600)
analyJob = pdict.get('analJob', False)
testLevel = pdict.get('testLevel', '0')
extradirs = pdict.get('extradirs', '')
experiment = pdict.get('experiment', 'ATLAS')
prodSourceLabel = pdict.get('prodSourceLabel', '')
# get the Rucio tracing report
report = self.getStubTracingReport(pdict['report'], 'sm', lfn, guid)
# get the checksum type
if fchecksum != 0 and fchecksum != "":
csumtype = SiteMover.getChecksumType(fchecksum)
else:
csumtype = "default"
if fsize == 0 or fchecksum == 0:
ec, pilotErrorDiag, fsize, fchecksum = SiteMover.getLocalFileInfo(source, csumtype="adler32")
if ec != 0:
self.prepareReport('LOCAL_FILE_INFO_FAIL', report)
return SiteMover.put_data_retfail(ec, pilotErrorDiag)
# now that the file size is known, add it to the tracing report
report['filesize'] = fsize
# get the site information object
si = getSiteInformation(experiment)
# are we on a tier 3?
if si.isTier3():
dst_loc_se = SiteMover.getTier3Path(dsname, DN)
dst_prefix = ""
tolog("Writing output on a Tier 3 site to: %s" % (dst_loc_se))
else:
dst_se = destination
if dst_se.find('SFN') != -1: # srm://dcsrm.usatlas.bnl.gov:8443/srm/managerv1?SFN=/pnfs/usatlas.bnl.gov/
s = dst_se.split('SFN=')
dst_loc_se = s[1]
dst_prefix = s[0] + 'SFN='
else:
_sentries = dst_se.split('/', 3)
try:
dst_prefix = _sentries[0] + '//' + _sentries[2] # 'method://host:port' is it always a ftp server? can it be srm? something else?
dst_loc_se = '/'+ _sentries[3]
except Exception, e:
pilotErrorDiag = "Could not figure out destination path from dst_se (%s): %s" % (dst_se, str(e))
tolog('!!WARNING!!2999!! %s' % (pilotErrorDiag))
self.prepareReport('DEST_PATH_UNDEF', report)
return SiteMover.put_data_retfail(error.ERR_STAGEOUTFAILED, pilotErrorDiag)
# VCH added check for Tier3 sites because the ds name is added to the path in SiteMove.getTier3Path()
if si.isTier3():
dst_loc_sedir = os.path.join(dst_loc_se, extradirs)
else:
dst_loc_sedir = os.path.join(dst_loc_se, os.path.join(extradirs, dsname))
filename = os.path.basename(source)
ec, pilotErrorDiag, tracer_error, dst_loc_pfn, lfcdir, surl = si.getProperPaths(error, analyJob, token, prodSourceLabel, dsname, filename, scope=scope, sitemover=self) # quick workaround
if ec != 0:
self.prepareReport(tracer_error, report)
return self.put_data_retfail(ec, pilotErrorDiag)
#dst_loc_pfn = os.path.join(dst_loc_sedir, filename)
dst_gpfn = dst_prefix + dst_loc_pfn
try:
SiteMover.mkdirWperm(os.path.dirname(dst_loc_pfn))
#SiteMover.mkdirWperm(dst_loc_sedir)
except Exception, e:
tolog("!!WARNING!!2999!! Could not create dir: %s, %s" % (dst_loc_sedir, str(e)))
if testLevel == "1":
source = "thisisjustatest"
_cmd_str = "cp %s %s" % (source, dst_loc_pfn)
tolog("Executing command: %s" % (_cmd_str))
report['transferStart'] = time.time()
try:
s, telapsed, cout, cerr = timed_command(_cmd_str, timeout)
except Exception, e:
tolog("!!WARNING!!2999!! timed_command() threw an exception: %s" % str(e))
s = 1
o = str(e)
telapsed = timeout
else:
o = cout + cerr
tolog("Elapsed time: %d" % (telapsed))
report['validateStart'] = time.time()
if s != 0:
tolog("!!WARNING!!2990!! Command failed: %s" % (_cmd_str))
check_syserr(s, o)
if is_timeout(s):
pilotErrorDiag = "cp put was timed out after %d seconds" % (telapsed)
ec = error.ERR_PUTTIMEOUT
else:
o = o.replace('\n', ' ')
pilotErrorDiag = "cp failed with output: ec = %d, output = %s" % (s, o)
ec = error.ERR_STAGEOUTFAILED
self.prepareReport('COPY_FAIL', report)
return SiteMover.put_data_retfail(ec, pilotErrorDiag, surl=dst_loc_pfn)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
# get remote file size and checksum
ec, pilotErrorDiag, dstfsize, dstfchecksum = SiteMover.getLocalFileInfo(dst_loc_pfn, csumtype="adler32")
if ec != 0:
self.prepareReport('LOCAL_FILE_INFO_FAIL', report)
return SiteMover.put_data_retfail(ec, pilotErrorDiag, surl=dst_loc_pfn)
# compare remote and local file size
if dstfsize != fsize:
pilotErrorDiag = "Remote and local file sizes do not match for %s (%s != %s)" %\
(os.path.basename(dst_gpfn), str(dstfsize), str(fsize))
tolog('!!WARNING!!2999!! %s' % (pilotErrorDiag))
self.prepareReport('FS_MISMATCH', report)
return SiteMover.put_data_retfail(error.ERR_PUTWRONGSIZE, pilotErrorDiag, surl=dst_loc_pfn)
# compare remote and local checksums
if dstfchecksum != fchecksum:
pilotErrorDiag = "Remote and local checksums (of type %s) do not match for %s (%s != %s)" %\
(csumtype, os.path.basename(dst_gpfn), dstfchecksum, fchecksum)
tolog('!!WARNING!!2999!! %s' % (pilotErrorDiag))
if csumtype == "adler32":
self.prepareReport('AD_MISMATCH', report)
return SiteMover.put_data_retfail(error.ERR_PUTADMISMATCH, pilotErrorDiag, surl=dst_loc_pfn)
else:
self.prepareReport('MD5_MISMATCH', report)
return SiteMover.put_data_retfail(error.ERR_PUTMD5MISMATCH, pilotErrorDiag, surl=dst_loc_pfn)
self.prepareReport('DONE', report)
return 0, pilotErrorDiag, str(dst_gpfn), fsize, fchecksum, ARCH_DEFAULT # Eddie added str, unicode protection
def getLCGPaths(self, destination, dsname, filename, lfcpath):
""" return the proper paths for lcg-cp/cr file transfer and registration """
# return full lfc file path (beginning lfcpath might need to be replaced)
native_lfc_path = self.to_native_lfn(dsname, filename)
# /grid/atlas/dq2/testpanda/testpanda.destDB.b7cd4b56-1b5e-465a-a5d7-38d5e2609724_sub01000457/
#58f836d5-ff4b-441a-979b-c37094257b72_0.job.log.tgz
# tolog("Native_lfc_path: %s" % (native_lfc_path))
# replace the default path /grid/atlas/rucio with lfcpath if different
# (to_native_lfn returns a path begining with /grid/atlas/rucio)
default_lfcpath = '/grid/atlas/rucio' # to_native_lfn always returns this at the beginning of the string
if default_lfcpath != lfcpath:
final_lfc_path = native_lfc_path.replace(default_lfcpath, lfcpath)
else:
final_lfc_path = native_lfc_path
stripped_lfcpath = os.path.dirname(native_lfc_path[len(default_lfcpath):]) # the rest (to be added to the 'destination' variable)
# /testpanda/testpanda.destDB.b7cd4b56-1b5e-465a-a5d7-38d5e2609724_sub01000457/58f836d5-ff4b-441a-979b-c37094257b72_0.job.log.tgz
# tolog("stripped_lfcpath: %s" % (stripped_lfcpath))
# full file path for disk
if stripped_lfcpath[0] == "/":
stripped_lfcpath = stripped_lfcpath[1:]
destination = os.path.join(destination, stripped_lfcpath)
# /pnfs/tier2.hep.manchester.ac.uk/data/atlas/dq2/testpanda/testpanda.destDB.fcaf8da5-ffb6-4a63-9963-f31e768b82ef_sub01000345
# tolog("Updated SE destination: %s" % (destination))
# name of dir to be created in LFC
lfcdir = os.path.dirname(final_lfc_path)
# /grid/atlas/dq2/testpanda/testpanda.destDB.dfb45803-1251-43bb-8e7a-6ad2b6f205be_sub01000492
# tolog("LFC dir: %s" % (lfcdir))
return destination, lfcdir
def getPreDestination(self, analyJob, token, prodSourceLabel, alt=False):
""" get the pre destination """
destination = ""
# Special case for GROUPDISK
# In this case, (e.g.) token = 'dst:AGLT2_PERF-MUONS'
# Pilot should then consult TiersOfATLAS and get it from the corresponding srm entry
if token != None and "dst:" in token:
# if the job comes from a different cloud than the sites' cloud, destination will be set to "" and the
# default space token will be used instead (the transfer to groupdisk will be handled by DDM not pilot)
destination = self.getGroupDiskPath(endpoint=token)
if destination != "":
if destination.endswith('//rucio'):
destination = destination.replace('//rucio','/rucio')
tolog("GROUPDISK token requested (%s), destination=%s" % (token, destination))
return destination
else:
# Reset the space token to the default value
default_token, _se = SiteMover.extractSE(readpar('se'))
tolog("Requested space token %s reset to %s" % (token, default_token))
token = default_token
if not analyJob:
# process the destination path with getDirList since it can have a complex structure
# as well as be a list of destination paths matching a corresponding space token
if prodSourceLabel == 'ddm' and readpar('seprodpath', alt=alt) == '':
sepath = readpar('sepath', alt=alt)
else:
sepath = readpar('seprodpath', alt=alt)
destinationList = self.getDirList(sepath)
# decide which destination path to use depending on the space token for the current file
if token:
# find the proper path
destination = self.getMatchingDestinationPath(token, destinationList, alt=alt)
if destination == "":
tolog("!!WARNING!!2990!! seprodpath not properly defined: seprodpath = %s, destinationList = %s, using sepath instead" %\
(sepath, str(destinationList)))
sepath = readpar('sepath', alt=alt)
destinationList = self.getDirList(sepath)
destination = self.getMatchingDestinationPath(token, destinationList, alt=alt)
if destination == "":
tolog("!!WARNING!!2990!! sepath not properly defined: sepath = %s, destinationList = %s" %\
(sepath, str(destinationList)))
else:
# space tokens are not used
destination = destinationList[0]
else:
sepath = readpar('sepath', alt=alt)
destinationList = self.getDirList(sepath)
# decide which destination path to use depending on the space token for the current file
if token:
# find the proper path
destination = self.getMatchingDestinationPath(token, destinationList, alt=alt)
if destination == "":
tolog("!!WARNING!!2990!! sepath not properly defined: sepath = %s, destinationList = %s" %\
(sepath, str(destinationList)))
else:
# space tokens are not used
destination = destinationList[0]
return destination
def getUserLFCDir(destination, lfcpath, dsname):
""" Get the LFC dir path for a user job """
ec = 0
pilotErrorDiag = ""
lfcdir = ""
# old pat = re.compile('([^\.]+\.[^\.]+)\..*')
# pat = re.compile('([^\.]+\.[^\.]+\.[^\.]+[^\.]+)\..*')
pat = re.compile('([^\.]+\.[^\.]+\.[^\.]+)\..*')
mat = pat.match(dsname)
if mat:
# old prefixdir = mat.group(1) # 'user.pnilsson'
subdirs = mat.group(1).split('.') # 'user.pnilsson.0915151927'
_user = subdirs[0] # 'user'
_username = subdirs[1] # 'pnilsson'
_field3 = subdirs[2] # '0915151927'
prefixdir = os.path.join(_user, _username, _field3)
destination = os.path.join(destination, prefixdir)
if lfcpath != "":
lfcdir = os.path.join(lfcpath, prefixdir, dsname)
tolog("LFC dir: %s" % (lfcdir))
tolog("SE destination: %s" % (destination))
else:
error = PilotErrors()
ec = error.ERR_STAGEOUTFAILED
pilotErrorDiag = "put_data encountered an unexpected dataset name format: %s" % (dsname)
tolog('!!WARNING!!2990!! %s' % (pilotErrorDiag))
return 0, pilotErrorDiag, destination, str(lfcdir) # Eddie added str, unicode protection
getUserLFCDir = staticmethod(getUserLFCDir)
def getFinalLCGPaths(self, analyJob, destination, dsname, filename, lfcpath, token, prodSourceLabel, scope="", alt=False):
"""
set up paths differently for analysis and production jobs
use conventional LFC paths or production jobs
use special convention for analysis jobs (Aug-Sep 2011)
"""
dst_gpfn = ""
lfcdir = ""
if "/rucio" in destination and scope != "":
useRucio = True
else:
useRucio = False
if analyJob: # for analysis jobs
ec, pilotErrorDiag, destination, lfcdir = self.getUserLFCDir(destination, lfcpath, dsname)
if ec != 0:
return ec, pilotErrorDiag, dst_gpfn, lfcdir
dst_gpfn = os.path.join(destination, os.path.join(dsname, filename))
else:
# get the proper paths
destination, lfcdir = self.getLCGPaths(destination, dsname, filename, lfcpath)
dst_gpfn = os.path.join(destination, filename)
# /pnfs/tier2.hep.manchester.ac.uk/data/atlas/dq2/testpanda/testpanda.destDB.dfb45803-1251-43bb-8e7a-6ad2b6f205be_sub01000492
# overwrite the dst_gpfn if path contains /rucio
if useRucio:
dst_gpfn = self.getPathFromScope(scope, filename)
# correct for a potentially missing sepath
sepath = self.getPreDestination(analyJob, token, prodSourceLabel, alt=alt)
if not sepath in dst_gpfn:
dst_gpfn = os.path.join(sepath, dst_gpfn)
# correct for possible double rucio substring
if "rucio/rucio" in dst_gpfn:
dst_gpfn = dst_gpfn.replace('rucio/rucio', 'rucio')
return 0, "", dst_gpfn, lfcdir
def check_space_df(self, dst_loc_se):
""" Run df to check space availability """
avail = -1
s, o = commands.getstatusoutput('df %s' % (dst_loc_se))
if s != 0:
check_syserr(s, o)
tolog("WARNING: Error in running df: %s" % str(o))
else:
output = o.strip().split('\n')
for l in output:
m = re.search('\s\s*([0-9]*)\s\s*([0-9]*)\s\s*([0-9]*)\%\s', l)
if m != None:
avail = int(m.group(2))/1048576
break
return avail
def getStubTracingReport(self, initial_report, protocol, filename, guid):
""" Return the first part of the tracing report """
try:
report = initial_report
except:
report = {}
else:
# set the proper protocol
report['protocol'] = protocol
# mark the catalog (or relative?) start
report['catStart'] = time.time()
# the current file
report['filename'] = filename
# guid
report['guid'] = guid.replace('-','')
return report
def sendTrace(self, report):
""" Go straight to the tracing server and post the instrumentation dictionary """
if not self.useTracingService:
tolog("Experiment is not using Tracing service. skip sending tracing report")
return
url = 'https://rucio-lb-prod.cern.ch/traces/'
tolog("Tracing server: %s" % (url))
tolog("Sending tracing report: %s" % str(report))
try:
# take care of the encoding
#data = urlencode({'API':'0_3_0', 'operation':'addReport', 'report':report})
from json import dumps
data = dumps(report).replace('"','\\"')
from SiteInformation import SiteInformation
si = SiteInformation()
sslCertificate = si.getSSLCertificate()
# create the command
cmd = 'curl --connect-timeout 20 --max-time 120 --cacert %s -v -k -d "%s" %s' % (sslCertificate, data, url)
tolog("Executing command: %s" % (cmd))
s,o = commands.getstatusoutput(cmd)
if s != 0:
raise Exception(str(o))
except:
# if something fails, log it but ignore
from sys import exc_info
tolog('!!WARNING!!2999!! tracing failed: %s' % str(exc_info()))
else:
tolog("Tracing report sent")
def prepareReport(self, state, report):
""" Prepare the Rucio tracing report. Set the client exit state and finish """
if report.has_key('timeStart'):
# Handle the client state which might be a string or a dictionary
if type(state) is str:
report['clientState'] = state
elif type(state) is dict:
for key in state.keys():
report[key] = state[key]
else:
tolog("!!WARNING!!3332!! Do not know how to handle this tracing state: %s" % str(state))
# Store the tracing report to file
filename = getTracingReportFilename()
status = writeJSON(filename, report)
if status:
tolog("Wrote tracing report to file %s (cwd=%s)" % (filename, os.getcwd()))
else:
tolog("!!WARNING!!3333!! Failed to write tracing report to file")
# Send the report
#try:
# self.sendTrace(report)
#except Exception, e:
# tolog("!!WARNING!!3334!! Failed to send tracing report: %s" % (e))
else:
tolog("!!WARNING!!3331!! No timeStart found in tracing report, cannot send")
def sendReport(self, report):
""" Send Rucio tracing report. Set the client exit state and finish """
if report.has_key('timeStart'):
# finish instrumentation
report['timeEnd'] = time.time()
# send report
tolog("Sending tracing report: %s" % str(report))
self.sendTrace(report)
else:
tolog("!!WARNING!!21211! Tracing report does not have a timeStart entry: %s" % str(report))
@classmethod
def getSURLDictionaryFilename(self, directory, jobId):
""" return the name of the SURL dictionary file """
return os.path.join(directory, "surlDictionary-%s.%s" % (jobId, getExtension()))
@classmethod
def getSURLDictionary(self, directory, jobId):
""" get the SURL dictionary from file """
surlDictionary = {}
# open the dictionary for reading
filename = self.getSURLDictionaryFilename(directory, jobId)
if not os.path.exists(filename):
tolog("SURL dictionary does not exist: %s (will be created)" % (filename))
return surlDictionary
try:
fp = open(filename, "r")
except OSError, e:
tolog("!!WARNING!!1800!! Failed to open SURL dictionary for reading: %s" % str(e))
else:
# get the dictionary
importedLoad = False
if filename.endswith('json'):
try:
from json import load
except Exception, e:
tolog("!!WARNING!!1800!! Could not import load function from json module (too old python version?): %s" % str(e))
else:
importedLoad = True
else:
from pickle import load
importedLoad = True
if importedLoad:
# load the dictionary from file
try:
# load the dictionary from file
surlDictionary = load(fp)
except:
tolog("!!WARNING!!1800!! JobState could not deserialize file: %s" % (filename))
else:
tolog("Deserialized surl dictionary with %d keys: filename=%s" % (len(surlDictionary.keys()), filename))
#tolog("surlDictionary=%s" % str(surlDictionary))
fp.close()
return surlDictionary
@classmethod
def putSURLDictionary(self, surlDictionary, directory, jobId):
""" store the updated SURL dictionary """
status = False
# open the dictionary for writing
filename = self.getSURLDictionaryFilename(directory, jobId)
try:
fp = open(filename, "w")
except OSError, e:
tolog("!!WARNING!!1800!! Could not open SURL dictionary for writing: %s" % str(e))
else:
# write the dictionary
if filename.endswith('json'):
from json import dump
else:
from pickle import dump
try:
# write the dictionary to file
dump(surlDictionary, fp)
except Exception, e:
tolog("!!WARNING!!1800!! Could not encode data to SURL dictionary file: %s, %s" % (filename, str(e)))
else:
status = True
fp.close()
return status
@classmethod
def updateSURLDictionary(self, guid, surl, directory, jobId):
""" add the guid and surl to the surl dictionary """
status = False
tolog("Adding GUID (%s) and SURL (%s) to dictionary" % (guid, surl))
# (re-)open dictionary if possible (the dictionary will be empty if the function is called for the first time)
surlDictionary = self.getSURLDictionary(directory, jobId)
# add the guid and surl to the dictionary
surlDictionary[guid] = surl
# store the updated dictionary
if self.putSURLDictionary(surlDictionary, directory, jobId):
tolog("Successfully updated SURL dictionary (which currectly has %d key(s))" % len(surlDictionary.keys()))
status = True
else:
tolog("!!FAILED!!1800!! SURL dictionary could not be updated (later LFC registration will not work)")
return status
def getFileInfoFromRucio(self, scope, dataset, guid):
""" Get the file size and checksum from Rucio """
filesize = ""
checksum = ""
tolog("scope=%s"%scope)
tolog("dataset=%s"%dataset)
tolog("guid=%s"%guid)
pre = scope + ":"
if dataset.startswith(pre):
dataset = dataset.replace(pre, "")
try:
from rucio.client import Client
client = Client()
replica_list = [i for i in client.list_files(scope, dataset)]
except Exception, e:
tolog("!!WARNING!!2233!! Exception caught: %s" % (e))
else:
# Extract the info for the correct guid
tolog("Rucio returned a replica list with %d entries" % (len(replica_list)))
for i in range(0, len(replica_list)):
# replica = {u'adler32': u'9849e8ae', u'name': u'EVNT.01580095._002901.pool.root.1', u'bytes': 469906, u'scope': u'mc12_13TeV', u'guid': u'F88E0A836696344981358463A641A486', u'events': None}
# Is it the replica we are looking for?
if not "-" in replica_list[i]['guid']:
# Convert the guid (guids in Rucio might not have dashes)
guid = guid.replace('-', '')
if guid == replica_list[i]['guid']:
checksum = replica_list[i]['adler32']
filesize = str(replica_list[i]['bytes'])
events = replica_list[i]['events']
if events != None:
tolog("File %s has checksum %s, size %s and %d events" % (replica_list[i]['name'], checksum, filesize, str(replica_list[i]['events'])))
else:
tolog("File %s has checksum %s and size %s (no recorded events)" % (replica_list[i]['name'], checksum, filesize))
break
return filesize, checksum
def verifyPaths(self, paths):
""" Verify existence of paths """
status = False
badPath = ""
for path in paths:
if not os.path.exists(path):
badPath = path
# if no found bad paths, set return status to True
if badPath == "":
status = True
return status, badPath
def reportFileCorruption(surl):
""" Report a corrupted file to the consistency server """
# clean up the SURL before reporting it
surl = re.sub(':[0-9]+/', '/', surl)
surl = re.sub('/srm/v2/server\?SFN=', '', surl)
surl = re.sub('/srm/managerv1\?SFN=', '', surl)
surl = re.sub('/srm/managerv2\?SFN=', '', surl)
tolog("Cleaned up SURL: %s" % (surl))
try:
from rucio.client import Client
client = Client()
c.declare_suspicious_file_replicas(pfns=[surl], reason='Corrupted File')
except:
tolog("!!WARNING!!2111!! Failed to report corrupted file to consistency server")
else:
tolog("Reported corrupted file to consistency server: %s" % (surl))
reportFileCorruption = staticmethod(reportFileCorruption)
def getMover(cls, *args, **kwrds):
"""
Creates and provides exactly one instance for each required subclass of SiteMover.
Implements the Singleton pattern. Method is incomplete.
"""
cl_name = cls.__name__
if not issubclass(cls, SiteMover):
log.error("Wrong Factory invocation, %s is not subclass of SiteMover" % cl_name)
else:
return cls(*args, **kwrds)
getMover = classmethod(getMover)
# Utility Functions
def mkdirWperm(newdir):
"""
- if the dir already exists, silently completes
- if a regular file is in the way, raise an exception
- parent directory does not exist, make it as well
Permissions are set as they should be.
PERMISSIONS_DIR is loaded from config.config_sm and it is currently 0775 (group write)
"""
tolog("Creating dir %s" % newdir)
if os.path.isdir(newdir):
pass
elif os.path.isfile(newdir):
raise OSError("a file with the same name as the desired dir, '%s', already exists." % (newdir))
else:
head, tail = os.path.split(newdir)
if head and not os.path.isdir(head):
SiteMover.mkdirWperm(head)
if tail:
try:
os.mkdir(newdir)
except OSError, e:
if not os.path.isdir(newdir):
raise e
# Desired dir permission
# remember to use octal 0xxx
# is the desired permission 0775 (group writes) or 0755 (only user writes)?
os.chmod(newdir, PERMISSIONS_DIR)
mkdirWperm = staticmethod(mkdirWperm)
def verifyLocalFile(fsize, fchecksum, fname, extra_path=''):
"""
Checks if local copy is correct
returns 0 if OK
"""
error = PilotErrors()
pilotErrorDiag = ""
if fsize == 0 or SiteMover.isDummyChecksum(fchecksum):
return 0, pilotErrorDiag
# set checksum type
if fchecksum != 0 and fchecksum != "":
csumtype = SiteMover.getChecksumType(fchecksum)
else:
csumtype = "default"
# evaluate and compare (size, checksum)
dest_file = fname
if extra_path:
dest_file = os.path.join(extra_path, dest_file)
ec, pilotErrorDiag, dstfsize, dstfchecksum = SiteMover.getLocalFileInfo(dest_file, csumtype=csumtype)
if ec != 0:
return ec, pilotErrorDiag
if fsize != 0 and fsize != '0' and dstfsize != fsize:
return error.ERR_GETWRONGSIZE, pilotErrorDiag
# WARNING: note that currenty only HUSiteMover is using verifyLocalFile(), and only in get_data()
if fchecksum != 0 and dstfchecksum != fchecksum and not SiteMover.isDummyChecksum(fchecksum):
if csumtype == "adler32":
return error.ERR_GETADMISMATCH, pilotErrorDiag
else:
return error.ERR_GETMD5MISMATCH, pilotErrorDiag
return 0, pilotErrorDiag
verifyLocalFile = staticmethod(verifyLocalFile)
def getLocalFileSize(self, filename):
""" Get the file size of a local file (return a string) """
filesize = ""
if os.path.exists(filename):
try:
filesize = os.path.getsize(filename)
except Exception, e:
tolog("!!WARNING!!1232!! Failed to get file size: %s" % (e))
else:
# convert to string
filesize = str(filesize)
else:
tolog("!!WARNING!!1233!! Local file does not exist: %s" % (filename))
return filesize
def getLocalFileInfo(fname, csumtype="default", date=None):
""" Return exit code (0 if OK), file size and checksum of a local file, as well as as date string if requested """
# note that date is mutable
error = PilotErrors()
pilotErrorDiag = ""
tolog("getLocalFileInfo")
# does the file exist?
if not os.path.isfile(fname):
if fname.find("DBRelease") >= 0 and os.path.exists(os.path.dirname(fname)):
pilotErrorDiag = "DBRelease file missing: %s" % (fname)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
return error.ERR_MISSDBREL, pilotErrorDiag, 0, 0
else:
pilotErrorDiag = "No such file or directory: %s" % (fname)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
return error.ERR_MISSINGLOCALFILE, pilotErrorDiag, 0, 0
# get the modification time if needed and store it in the mutable object
if date:
tolog("calling getModTime")
date = SiteMover.getModTime(os.path.dirname(fname), os.path.basename(fname))
# get the file size
try:
tolog("Executing getsize() for file: %s" % (fname))
fsize = str(os.path.getsize(fname))
except OSError, e:
pilotErrorDiag = "Could not get file size: %s" % str(e)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
return error.ERR_FAILEDSIZELOCAL, pilotErrorDiag, 0, 0
else:
if fsize == "0":
pilotErrorDiag = "Encountered zero file size for file %s" % (fname)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
return error.ERR_ZEROFILESIZE, pilotErrorDiag, 0, 0
else:
tolog("Got file size: %s" % (fsize))
# get the checksum
if csumtype == "adler32":
tolog("Executing adler32() for file: %s" % (fname))
from movers import base
m = base.BaseSiteMover()
fchecksum, _ = m.calc_file_checksum(fname)
#fchecksum = SiteMover.adler32(fname)
if fchecksum == '00000001': # "%08x" % 1L
pilotErrorDiag = "Adler32 failed (returned 1)"
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
return error.ERR_FAILEDADLOCAL, pilotErrorDiag, fsize, 0
else:
tolog("Got adler32 checksum: %s" % (fchecksum))
else:
_cmd = '%s %s' % (CMD_CHECKSUM, fname)
tolog("Executing command: %s" % (_cmd))
try:
s, o = commands.getstatusoutput(_cmd)
except Exception, e:
s = -1
o = str(e)
tolog("!!WARNING!!2999!! Exception caught in getstatusoutput: %s" % (o))
if s != 0:
o = o.replace('\n', ' ')
check_syserr(s, o)
pilotErrorDiag = "Error running checksum command (%s): %s" % (CMD_CHECKSUM, o)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
return error.ERR_FAILEDMD5LOCAL, pilotErrorDiag, fsize, 0
fchecksum = o.split()[0]
tolog("Got checksum: %s" % (fchecksum))
return 0, pilotErrorDiag, fsize, fchecksum
getLocalFileInfo = staticmethod(getLocalFileInfo)
def dumpExtendedProxy(setupstr=''):
""" run voms-proxy-info -all """
tmp = setupstr.strip()
if tmp != "" and not tmp.endswith(';'):
tmp += ";"
if os.environ.has_key('X509_USER_PROXY') and tmp.find('export X509_USER_PROXY=') == -1:
tmp += "export X509_USER_PROXY=%s;" % (os.environ['X509_USER_PROXY'])
tmp = tmp.replace(";;",";")
cmd = "%svoms-proxy-info -all --file $X509_USER_PROXY" % (tmp)
tolog("Executing command: %s" % (cmd))
exitcode, output = commands.getstatusoutput(cmd)
tolog("Output: %d, %s" % (exitcode, output))
dumpExtendedProxy = staticmethod(dumpExtendedProxy)
# Code taken from:
# http://isscvs.cern.ch/cgi-bin/viewcvs-all.cgi/dq2.filecatalog.lfc/lib/dq2/filecatalog/lfc/lfcconventions.py?revision=1.4&root=atlas-dq2&view=markup&pathrev=dq2-filecatalog-lfc-0-7-0-branch
def __strip_tag(tag):
"""
Drop the _sub and _dis suffixes for panda datasets from the lfc path
they will be registered in
"""
suffixes_to_drop = ['_dis','_sub','_tid']
#try:
# suffixes_to_drop.extend(LFCFileCatalogConfigurator().getTagSuffixesList())
#except:
# pass
stripped_tag = tag
try:
for suffix in suffixes_to_drop:
stripped_tag = re.sub('%s.*$' % suffix, '', stripped_tag)
except IndexError:
return stripped_tag
return stripped_tag
__strip_tag = staticmethod(__strip_tag)
# Code taken from same source as above
def __strip_dsn(dsn):
"""
Drop the _sub and _dis suffixes for panda datasets from the lfc path
they will be registered in
"""
suffixes_to_drop = ['_dis','_sub','_frag']
#try:
# suffixes_to_drop.extend(LFCFileCatalogConfigurator().getPandaSuffixesList())
#except:
# pass
fields = dsn.split('.')
last_field = fields[-1]
try:
for suffix in suffixes_to_drop:
last_field = re.sub('%s.*$' % suffix, '', last_field)
except IndexError:
return dsn
fields[-1] = last_field
stripped_dsn = '.'.join(fields)
return stripped_dsn
__strip_dsn = staticmethod(__strip_dsn)
# old to_native_lfn taken from:
# http://atlas-sw.cern.ch/cgi-bin/viewcvs-atlas.cgi/offline/DataManagement/DQ2/dq2.filecatalog.lfc/lib/dq2/filecatalog/lfc/lfcconventions.py?view=log
# Code taken from same source as strip functions above
def to_native_lfn(dsn, lfn, prefix='rucio/'):
"""
Return LFN with LFC hierarchical namespace.
"""
bpath = '/grid/atlas/'
# add prefix
bpath += prefix
if bpath[-1] == '/': bpath = bpath[:-1]
# check how many dots in dsn
fields = dsn.split('.')
nfields = len(fields)
if nfields == 1:
stripped_dsn = SiteMover.__strip_dsn(dsn)
return '%s/other/%s/%s' % (bpath, stripped_dsn, lfn)
elif nfields == 2:
project = fields[0]
stripped_dsn = SiteMover.__strip_dsn(dsn)
return '%s/%s/%s/%s' % (bpath, project, stripped_dsn, lfn)
elif nfields < 5 or re.match('user*|group*',fields[0]):
project = fields[0]
f2 = fields[1]
f3 = fields[2]
stripped_dsn = SiteMover.__strip_dsn(dsn)
return '%s/%s/%s/%s/%s/%s' % (bpath, project, f2, f3, stripped_dsn, lfn)
else:
project = fields[0]
dataset_type = fields[4]
if nfields == 5:
tag='other'
else:
tag = SiteMover.__strip_tag(fields[-1])
stripped_dsn = SiteMover.__strip_dsn(dsn)
return '%s/%s/%s/%s/%s/%s' % (bpath, project, dataset_type, tag, stripped_dsn, lfn)
to_native_lfn = staticmethod(to_native_lfn)
def calc_adler32(file_name):
""" calculate the checksum for a file with the zlib.adler32 algorithm """
val = 1
blockSize = 32 * 1024 * 1024
with open(file_name) as fp:
while True:
data = fp.read(blockSize)
if not data:
break
val = zlib.adler32(data, val)
if val < 0:
val += 2 ** 32
return hex(val)[2:10].zfill(8).lower()
calc_adler32 = staticmethod(calc_adler32)
def adler32(filename):
""" calculate the checksum for a file with the zlib.adler32 algorithm """
# note: a failed file open will return '1'
import zlib
# default adler32 starting value
sum1 = 1L
try:
f = open(filename, 'rb')
except Exception, e:
tolog("!!WARNING!!2999!! Could not open file: %s" % (filename))
else:
try:
for line in f:
sum1 = zlib.adler32(line, sum1)
except Exception, e:
tolog("!!WARNING!!2777!! Exception caught in zlib.adler32: %s" % (e))
f.close()
# correct for bug 32 bit zlib
if sum1 < 0:
sum1 = sum1 + 2**32
# convert to hex
sum2 = "%08x" % sum1
return str(sum2)
adler32 = staticmethod(adler32)
def doFileVerifications():
""" Should the get operation perform any file size/checksum verifications? """
# not for storm sites
# also used to skip input file size checks when mv site mover is used (from Mover)
_copytool = readpar('copytool')
_copytoolin = readpar('copytoolin')
if _copytoolin == "storm" or _copytoolin == "mv" or (_copytoolin == "" and (_copytool == "storm" or _copytool == "mv")):
doVerification = False
else:
doVerification = True
return doVerification
doFileVerifications = staticmethod(doFileVerifications)
def extractPathFromPFN(gpfn):
""" Remove any host and SFN info from PFN path """
# gpfn = srm://srm-atlas.gridpp.rl.ac.uk:8443/srm/managerv2?SFN=/castor/ads.rl.ac.uk/prod/atlas/...
# -> path = /castor/ads.rl.ac.uk/prod/atlas/...
# gpfn = srm://srm-atlas.gridpp.rl.ac.uk/castor/ads.rl.ac.uk/prod/atlas/...
# -> path = /castor/ads.rl.ac.uk/prod/atlas/...
if "SFN" in gpfn:
src_loc_pfn = gpfn.split('SFN=')[1]
else:
src_loc_pfn = '/' + gpfn.split('/', 3)[3] # 0:method, 2:host+port, 3:abs-path
return src_loc_pfn
extractPathFromPFN = staticmethod(extractPathFromPFN)
def getChecksumType(csum, format="long"):
""" return the checksum type given only the checksum value """
# format = "long" returns either "adler32" or "md5sum"
# format = "short" returns either "AD" or "MD"
# force string conversion in case None or 0 should be sent
csum = str(csum)
if len(csum) == 8:
if format == "long":
csumtype = "adler32"
else:
csumtype = "AD"
elif len(csum) == 32:
if format == "long":
csumtype = "md5sum"
else:
csumtype = "MD"
else:
csumtype = CMD_CHECKSUM
return csumtype
getChecksumType = staticmethod(getChecksumType)
def isDummyChecksum(fchecksum):
""" ignore dummy checksum values, e.g. used in the M4/5 cosmics tests """
# Also skipping checksum values of "0" from Aug 12, 2008 (v 24g)
dummy = False
try:
if fchecksum == "0":
tolog("!!WARNING!!2999!! Came across a dummy checksum value (%s). Skipping checksum test" % (fchecksum))
dummy = True
elif fchecksum == "00000000000000000000000000000000":
tolog("!!WARNING!!2999!! Came across a dummy md5sum value (%s). Skipping md5sum test" % (fchecksum))
dummy = True
elif fchecksum == "00000000":
tolog("!!WARNING!!2999!! Came across a dummy adler32 value (%s). Skipping adler32 test" % (fchecksum))
dummy = True
except:
pass
return dummy
isDummyChecksum = staticmethod(isDummyChecksum)
def addFileInfo(lfn, checksum, csumtype='MD', fsize=None):
""" add MD checksum to lfn (change to AD later) """
pilotErrorDiag = ""
tolog("Checksum type to be set: %s" % (csumtype))
try:
import lfc
except Exception, e:
pilotErrorDiag = "addFileInfo() could not import lfc module: %s" % str(e)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
return -1, pilotErrorDiag
os.environ['LFC_HOST'] = readpar('lfchost')
try:
stat = lfc.lfc_filestatg()
# Eddie
# Make sure lfn is not unicode due to a bug in LFC libs that returns the following error:
# 'lfc_statg', argument 1 of type 'char const *'
rc = lfc.lfc_statg(str(lfn), "", stat)
except Exception, e:
pilotErrorDiag = "lfc function failed in addFileInfo(): %s" % str(e)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
return -1, pilotErrorDiag
if rc != 0:
err_num = lfc.cvar.serrno
errstr = lfc.sstrerror(err_num)
pilotErrorDiag = "lfc_statg failed for lfn: %s : rc = %d, err_num = %d, errstr = %s" % (lfn, rc, err_num, errstr)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
return 1, pilotErrorDiag
if fsize:
filesize = long(fsize)
else:
filesize = stat.filesize
rc = lfc.lfc_setfsizeg(stat.guid, filesize, csumtype, checksum)
if rc != 0:
err_num = lfc.cvar.serrno
errstr = lfc.sstrerror(err_num)
pilotErrorDiag = "lfc_setfsizeg failed for lfn: %s : rc = %d, err_num = %d, errstr = %s" % (lfn, rc, err_num, errstr)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
return rc, pilotErrorDiag
tolog("Successfully set checksum (%s of type %s) and file size %s for %s" % (checksum, csumtype, str(filesize), lfn))
return rc, pilotErrorDiag
addFileInfo = staticmethod(addFileInfo)
def getProperSE(token, alt=False):
""" get the proper endpoint """
se = ""
useDefaultSE = True
# do we have several endpoints to chose from?
_seopt = readpar('seopt', alt=alt)
if _seopt != '':
# Find the right endpoint, corrsponding to the given space token
setokens = readpar('setokens', alt=alt).split(",")
seopt = _seopt.split(",")
tolog("seopt: %s" % str(seopt))
tolog("token: %s" % (token))
tolog("setokens: %s" % str(setokens))
if len(seopt) != len(setokens):
tolog("!!WARNING!!2999!! seopt does not have the same length as setokens: %s, %s" % (str(seopt), str(setokens)))
else:
# create the dictionary
sedict = dict(zip(setokens, seopt))
# lookup the endpoint corresponding to token
if token in setokens:
try:
se = sedict[token]
except Exception, e:
tolog("!!WARNING!!2999!! Could not find a matching endpoint to token %s: %s, %s" %\
(token, str(sedict), str(e)))
else:
useDefaultSE = False
else:
tolog("Token %s is not among valid tokens: %s" % (token, str(setokens)))
if useDefaultSE:
# Stick with the default se
# Maybe be a comma list but take first always
# (Remember that se can be a list where the first is used for output but any can be used for input)
tolog("Will use default SE endpoint")
se = readpar('se', alt=alt).split(",")[0]
_dummytoken, se = SiteMover.extractSE(se)
# remove any unwanted stage-in info (present at CERN for atlasdatatape)
se = SiteMover.filterSE(se)
tolog("Using SE: %s" % (se))
return se
getProperSE = staticmethod(getProperSE)
def filterSE(se):
""" Filter away any unwanted stage-in info from the file path """
# "/castor/cern.ch/grid/atlas/((tzero/prod1/perm)|(tzero/prod2/perm)|(t0/perm)|(DAQ)|(conditions)|(atlasdatatape))"
# -> /castor/cern.ch/grid/atlas/atlasdatatape
match = re.findall('\(\S+\)', se)
if match and "atlasdatatape" in se:
filtered_path = match[0]
tolog("Found unwanted stage-in info in SE path, will filter it away: %s" % (filtered_path))
se = se.replace(filtered_path, "atlasdatatape")
return se
filterSE = staticmethod(filterSE)
def extractHostname(se):
""" extract the hostname from the given se info """
# e.g. token:ATLASPRODDISK:srm://dcsrm.usatlas.bnl.gov:8443/srm/managerv2?SFN= -> dcsrm.usatlas.bnl.gov
hostname_pattern = re.compile('.+://([^/:]+):\d+/')
if se != "":
_hostname = re.findall(hostname_pattern, se)
if _hostname != []:
hostname = _hostname[0]
else:
tolog("!!WARNING!!2999!! Hostname could not be extracted from se=%s (reg exp pattern too weak?)" % (se))
hostname = ""
else:
tolog("!!WARNING!!2999!! SE not defined, hostname can not be extracted")
hostname = ""
return hostname
extractHostname = staticmethod(extractHostname)
def getSEMatchFromSEOpt(srm):
""" Extract the full SE path from seopt that matches srm """
# seopt = "token:ATLASDATADISK:srm://f-dpm001.grid.sinica.edu.tw:8446/srm/managerv2?SFN=,token:ATLASDATATAPE:srm://srm2.grid.sinica.edu.tw:8443/srm/managerv2?SFN=,token:ATLASMCTAPE:srm://srm2.grid.sinica.edu.tw:8443/srm/managerv2?SFN="
# srm = srm://srm2.grid.sinica.edu.tw -> sematch = srm://srm2.grid.sinica.edu.tw:8443/srm/managerv2?SFN=
sematch = ""
seopt = readpar('seopt')
if seopt != "":
seopt_list = seopt.split(",")
for _seopt in seopt_list:
token, path = SiteMover.extractSE(_seopt)
if srm in path:
sematch = path
break
return sematch
getSEMatchFromSEOpt = staticmethod(getSEMatchFromSEOpt)
def extractSEPath(se):
""" Extract the sepath from the se info """
# se='token:ATLASGROUPDISK:srm://head01.aglt2.org:8443/srm/managerv2?SFN=/pnfs/aglt2.org/atlasgroupdisk/perf-muons/'
# -> '/pnfs/aglt2.org/atlasgroupdisk/perf-muons/'
sepath = ""
pattern = re.compile(r"SFN=(.+)")
found = re.findall(pattern, se)
if len(found) > 0:
sepath = found[0]
return sepath
extractSEPath = staticmethod(extractSEPath)
def extractSE(fullSE):
""" extract the 'se' info from the schedconfig.se field """
# examples:
# fullSE = token:ATLASMCDISK:srm://srm-atlas.gridpp.rl.ac.uk:8443/srm/managerv2/something/somethingelse/mcdisk/
# return: ATLASMCDISK, srm://srm-atlas.gridpp.rl.ac.uk:8443/srm/managerv2/something/somethingelse/mcdisk/
# fullSE = srm://srm-atlas.gridpp.rl.ac.uk:8443/path
# return: None, srm://srm-atlas.gridpp.rl.ac.uk:8443/path
token = None
path = None
# does the fullSE contain a space token?
if fullSE[:len("token")] == "token":
# splitFullSE = ['token', 'ATLASMCDISK', 'srm', '//srm-atlas.gridpp.rl.ac.uk', '8443/srm/managerv2/something/somethingelse/mcdisk/']
splitFullSE = fullSE.split(":")
# token = ATLASMCDISK
token = splitFullSE[1]
# remainingFullSE = ['srm', '//srm-atlas.gridpp.rl.ac.uk', '8443/srm/managerv2/something/somethingelse/mcdisk/']
remainingFullSE = splitFullSE[2:]
from string import join
# path = srm://srm-atlas.gridpp.rl.ac.uk:8443/srm/managerv2/something/somethingelse/mcdisk/
path = join(remainingFullSE, ":")
else:
path = fullSE
return token, path
extractSE = staticmethod(extractSE)
def getSEFromToken(token):
""" Match an SE to a space token descriptor """
stripped_se = ""
seopt = readpar("seopt")
if seopt != "":
seopt_list = seopt.split(",")
# match an SE to the space token
for se in seopt_list:
_token, _se = SiteMover.extractSE(se)
if _token == token:
# remove the post and version strings
stripped_se = SiteMover.stripPortAndVersion(_se)
return stripped_se
getSEFromToken = staticmethod(getSEFromToken)
def getTokenFromPath(path):
""" return the space token from an SRMv2 end point path """
# example:
# path = srm://srm-atlas.gridpp.rl.ac.uk:8443/srm/managerv2/something/somethingelse/mcdisk/
# return: ATLASMCDISK
# remove any trailing / (..mcdisk/ -> ../mcdisk)
if path[-1] == '/':
path = path[:-1]
# ATLASMCDISK
return "ATLAS" + os.path.basename(path).upper()
getTokenFromPath = staticmethod(getTokenFromPath)
def stripListSEs(listSEs):
""" remove any space token info from the SE list """
# ['token:BLAH:srm://path1', 'srm://path2']
# return ['srm://path1', 'srm://path2']
strippedListSEs = []
for se in listSEs:
if se[:len('token')] == "token":
# extract the token (not needed) and the path (without the token)
token, sepath = SiteMover.extractSE(se)
# remove port and SRM version number as well since it is not needed for replica comparison
# (replica.sfn does not contain port and version number)
sepath = SiteMover.stripPortAndVersion(sepath)
strippedListSEs += [sepath]
else:
strippedListSEs += [se]
return strippedListSEs
stripListSEs = staticmethod(stripListSEs)
def stripPortAndVersion(path):
""" remove port and SFN (e.g. ':8443/srm/managerv2?SFN=' from path (if any) """
# 'srm://gridka-dcache.fzk.de:8443/srm/managerv2?SFN=/pnfs/gridka.de/atlas/disk-only/mc/simone_test'
# => 'srm://gridka-dcache.fzk.de/pnfs/gridka.de/atlas/disk-only/mc/simone_test'
pattern = re.compile(r"(\:\d+/[0-9a-zA-Z/?]+SFN=)")
found = re.findall(pattern, path)
if len(found) > 0:
return path.replace(found[0], '')
else:
return path
stripPortAndVersion = staticmethod(stripPortAndVersion)
def stripProtocolServer(fullpath):
""" strip protocol and server info """
# srm://srm-atlas.gridpp.rl.ac.uk:8443/srm/managerv2/something/somethingelse/mcdisk/
# -> srm://srm-atlas.gridpp.rl.ac.uk/something/somethingelse/mcdisk/
# -> /something/somethingelse/mcdisk/
# remove any port and version if present
fullpath = SiteMover.stripPortAndVersion(fullpath)
# extract the protocol and server info
strippedpath = SiteMover.stripPath(fullpath)
# and finally remove the protocol and server info
return fullpath.replace(strippedpath, "")
stripProtocolServer = staticmethod(stripProtocolServer)
def stripPath(fullpath):
""" strip path """
# srm://srm-atlas.gridpp.rl.ac.uk:8443/srm/managerv2/something/somethingelse/mcdisk/
# -> srm://srm-atlas.gridpp.rl.ac.uk:8443
path = ''
# remove ddm info if present
if fullpath.find("^"):
fullpath = fullpath.split("^")[0]
try:
fullpath = fullpath.replace("://","|")
pathList = fullpath.split("/")
path = pathList[0]
path = path.replace("|","://")
except Exception, e:
tolog("!!WARNING!!2999!! Could not strip path from: %s, %s" % (fullpath, e))
path = fullpath
if path == '':
tolog("!!WARNING!!2999!! Nothing to strip from: %s" % (fullpath))
path = fullpath
return path
stripPath = staticmethod(stripPath)
def getFileType(filename, setup=None):
""" use the file command to get the file type """
status = "unknown"
if setup:
_setup = setup
else:
_setup = ""
try:
cmd = "%s file -b %s" % (_setup, filename)
tolog("Executing command: %s" % (cmd))
rs = commands.getoutput(cmd)
except Exception, e:
tolog("!!WARNING!!3000!! Could not run file command: %s" % str(e))
else:
tolog("Command returned: %s" % (rs))
if rs != "":
status = rs
return status
getFileType = staticmethod(getFileType)
def getTransferModes():
""" should remote I/O and file stager be used? """
# return the corresponding booleans
directIn = False
useFileStager = False
dInfo = getDirectAccessDic(readpar('copysetupin'))
# if copysetupin did not contain direct access info, try the copysetup instead
if not dInfo:
dInfo = getDirectAccessDic(readpar('copysetup'))
# check if we should use the copytool
if dInfo:
directIn = dInfo['directIn']
useFileStager = dInfo['useFileStager']
return directIn, useFileStager
getTransferModes = staticmethod(getTransferModes)
def isRootFileName(filename):
""" Determine whether file is a root file or not by its name """
filename = filename.lower()
return not ('.tar.gz' in filename or '.lib.tgz' in filename or '.raw.' in filename)
isRootFileName = staticmethod(isRootFileName)
def isRootFile(filename, setup=None, directAccess=False):
""" check whether filename is a root file or not """
# always a root file in direct access mode
if directAccess:
return True
status = False
ftype = SiteMover.getFileType(filename, setup=setup)
try:
if ftype[:4].upper() == "ROOT":
status = True
except:
pass
return status
isRootFile = staticmethod(isRootFile)
def getDirList(_dirlist):
""" return a proper dir list from dirlist string """
# _dirlist = "/atlas/[atlasdatadisk,atlasdatadisktape,atlasmcdisk]/"
# -> ['/atlas/atlasdatadisk, '/atlas/atlasdatadisktape', '/atlas/atlasmcdisk']
# _dirlist = "/atlas/[atlasdatadisk,atlasdatadisktape,atlasmcdisk]/rucio"
# -> ['/atlas/atlasdatadisk/rucio', '/atlas/atlasdatadisktape/rucio', '/atlas/atlasmcdisk/rucio']
# _dirlist = "/atlas/atlasdatadisk,/atlas/atlasmcdisk/rucio"
# -> ['/atlas/atlasdatadisk', '/atlas/atlasmcdisk/rucio']
# _dirlist = "/[atlasscratchdisk/rucio,atlaslocalgroupdisk/rucio]"
# -> ['/atlasscratchdisk/rucio', '/atlaslocalgroupdisk/rucio']
# also works with lists of the following form
#_dirlist = "/atlas/[atlasdatadisk]"
#_dirlist = "/atlas/atlasdatadisk"
#_dirlist = "/atlas/whatever/atlasdatadisk"
#_dirlist = "/atlas/atlasdatadisk,/atlas/atlasmcdisk"
#_dirlist = "" (-> [''])
dirlist = []
if _dirlist.find('[') > 0 and _dirlist.find(']') > 0:
# path should have the form /path/[spacetokenlowered1, ...]
pat = re.compile('(.*)\[(.*)\]')
mat = pat.match( _dirlist)
if mat:
# mat.group(1) = '/atlas/'
# mat.group(2) = 'atlasdatadisk,atlasdatadisktape,atlasmcdisk'
_dir = mat.group(1)
_subdirs = mat.group(2)
if not _dir == "/":
_remainder = _dirlist.replace(mat.group(1), '').replace('['+mat.group(2)+']', '')
else:
_remainder = ""
if _remainder == "/":
_remainder = ""
for d in _subdirs.split(","):
dirlist.append(os.path.join(_dir, d+_remainder))
else:
# no match for pattern
pass
else:
# path should have the form /path/spacetokenlowered or /path/st1,/path/st2.,...
dirlist = _dirlist.split(",")
return dirlist
getDirList = staticmethod(getDirList)
def getMatchingDestinationPath(spacetoken, destinationList, alt=False):
""" select the destination path that corresponds to the space token """
destination = ""
_setokens = readpar('setokens', alt=alt)
if _setokens == "":
setokens = []
else:
setokens = _setokens.split(",")
if len(setokens) != len(destinationList):
tolog("WARNING: setokens (%s) not of the same length as destinationList (%s) - switching to alternative algorithm (match space token)" %\
(str(setokens), str(destinationList)))
# alternative default algorithm: get the path corresponding to the space token in lower case
for prodpath in destinationList:
if prodpath.find(spacetoken.lower()) >= 0:
tolog("Found matching destination path for space token %s: %s" % (prodpath, spacetoken))
destination = prodpath
break
else:
destDict = dict(zip(setokens, destinationList))
# get the path corresponding to the relevant spacetoken
try:
destination = destDict[spacetoken]
except Exception, e:
tolog("!!WARNING!!2999!! Path for spacetoken %s not found in dictionary %s: %s" % (spacetoken, str(destDict), str(e)))
else:
tolog("Space token %s corresponds to path %s" % (spacetoken, destination))
return destination
getMatchingDestinationPath = staticmethod(getMatchingDestinationPath)
def getPort(se):
""" does the se contain a port number? """
# se = 'srm://atlas.bu.edu:8443/srm/v2/server?SFN='
# => port = 8443
port = None
pattern = re.compile(r'\S+\:([0-9]+)')
_port = re.findall(pattern, se)
if _port != [] and _port != ['']:
port = _port[0]
return port
getPort = staticmethod(getPort)
def stripPortFromSE(se):
""" remove the port number from se """
port = SiteMover.getPort(se)
return se.replace(":%s" % str(port), "")
stripPortFromSE = staticmethod(stripPortFromSE)
def addPortToPath(se, gpfn):
""" add the port number to the gpfn if not already there """
# se = 'srm://atlas.bu.edu:8443/srm/v2/server?SFN='
# => port = 8443
# gpfn = 'srm://atlas.bu.edu/srm/v2/server?SFN=/whatever/DBRelease-6.0.1.tar.gz'
# => 'srm://atlas.bu.edu:8443/srm/v2/server?SFN=/whatever/DBRelease-6.0.1.tar.gz'
# remove any ddm info attached to se
if se.find("^"):
se = se.split("^")[0]
tolog("Removed ddm info from se: %s" % (se))
# don't do anything if gpfn already has a port number
port = SiteMover.getPort(gpfn)
if not port:
strippedSE = SiteMover.stripPortFromSE(se)
if gpfn[:len(strippedSE)] == strippedSE and gpfn[:len(se)] != se:
tolog("Updating gpfn with port number from se")
tolog("Old gpfn: %s" % (gpfn))
gpfn = gpfn.replace(gpfn[:len(strippedSE)], se)
tolog("New gpfn: %s" % (gpfn))
else:
tolog("!!WARNING!!2999!! gpfn surl does not match se surl: se=%s, gpfn=%s" % (se, gpfn))
else:
tolog("gpfn already has a port number, will not update it: %s" % (gpfn))
return gpfn
addPortToPath = staticmethod(addPortToPath)
def genSubpath(dsname, filename, logFile):
""" Generate BNL specific subpath (used by BNLdCacheSiteMover and SRMSiteMover) """
fields = dsname.split('.')
subpath = ''
os.environ['TZ'] = 'US/Eastern'
year = time.strftime("%Y")
month = time.strftime("%m")
week = time.strftime("%W")
day = time.strftime("%j")
try:
if ( re.search('^user[0-9]{0,2}$', fields[0]) ):
if filename == logFile:
subpath = 'user_log02/%s/%s/%s' % (fields[1], year, week)
else:
subpath = 'user_data02/%s/%s/%s' % (fields[1], year, week)
elif ( SiteMover.isCond(fields) ):
subpath = 'conditions01'
elif ( SiteMover.isProd(fields) ):
if ( fields[4] == 'RDO' ):
subpath = 'RDO02/%s/prod' % (fields[0])
elif ( fields[4] == 'log' ):
subpath = 'log02/%s_%s/%s/prod' % (year, month, fields[0])
else:
subpath = '%s01/%s/prod' % (fields[4], fields[0])
elif ( fields[0] == 'testpanda' ):
subpath = 'user_log02/testpanda/%s/%s/%s' % (year, week, day)
else:
subpath = 'others03/%s/%s' % (year, week)
except Exception, e:
tolog('!!WARNING!!2999!! Error in generating the subpath for %s, using %s: %s' % (dsname, subpath, str(e)))
return subpath
genSubpath = staticmethod(genSubpath)
def isCond(fields):
if (fields[0] not in SiteMover.CONDPROJ):
return False
m = re.search('^[0-9]{6}$', fields[1])
if m == None:
return False
if (fields[2] != 'conditions'):
return False
return True
isCond = staticmethod(isCond)
def isProd(fields):
m = re.search('^user', fields[0])
if m:
return False
m = re.search('^[0-9]{6}$', fields[1])
if m == None:
return False
if (fields[4] not in SiteMover.PRODFTYPE):
return False
return True
isProd = staticmethod(isProd)
def getLFCPath(analyJob, alt=False):
""" return the proper schedconfig lfcpath """
lfcpath = ""
pilotErrorDiag = ""
if not analyJob:
lfcpath = readpar('lfcprodpath', alt=alt)
if lfcpath == "":
lfcpath = readpar('lfcpath', alt=alt)
else:
lfcpath = readpar('lfcpath', alt=alt)
if lfcpath == "":
lfcpath = readpar('lfcprodpath', alt=alt)
if lfcpath == "":
pilotErrorDiag = "lfc[prod]path is not set"
tolog('WARNING: %s' % (pilotErrorDiag))
return lfcpath, pilotErrorDiag
getLFCPath = staticmethod(getLFCPath)
def getModTime(path, filename):
""" get the modification time of the file """
t = ""
_filename = os.path.join(path, filename)
if os.path.exists(_filename):
try:
t = time.strftime("%Y-%m-%d %I:%M:%S",time.localtime(os.path.getmtime(_filename)))
except Exception, e:
tolog("!!WARNING!!1880!! Get mod time failed for file %s: %s" % (_filename, e))
t = ""
else:
tolog("WARNING: Cannot get mod time of file since it does not exist: %s" % (_filename))
if t != "":
tolog("Mod time for file %s: %s" % (_filename, t))
return t
getModTime = staticmethod(getModTime)
def getDestination(analyJob, token):
""" get the destination path """
# for production jobs, the SE path is stored in seprodpath
# for analysis jobs, the SE path is stored in sepath
destination = ""
if not analyJob:
# process the destination path with getDirList since it can have a complex
# structure as well as be a list of destination paths matching a corresponding
# space token
spath = readpar('seprodpath')
destinationList = SiteMover.getDirList(spath)
# decide which destination path to use depending on the space token for the current file
if token:
# find the proper path
destination = SiteMover.getMatchingDestinationPath(token, destinationList)
if destination == "":
tolog("!!WARNING!!2990!! seprodpath could not be used: seprodpath = %s, destinationList = %s, using sepath instead" %\
(spath, str(destinationList)))
spath = readpar('sepath')
destinationList = SiteMover.getDirList(spath)
destination = SiteMover.getMatchingDestinationPath(token, destinationList)
if destination == "":
tolog("!!WARNING!!2990!! sepath could not be used: sepath = %s, destinationList = %s" %\
(spath, str(destinationList)))
else:
# space tokens are not used
destination = destinationList[0]
else:
spath = readpar('sepath')
# sepath could be empty in the case of install jobs, if so, try to use seprodpath instead
if spath == "":
spath = readpar('seprodpath')
tolog("Encountered an empty sepath, trying to use seprodpath instead")
destinationList = SiteMover.getDirList(spath)
# decide which destination path to use depending on the space token for the current file
if token:
# find the proper path
destination = SiteMover.getMatchingDestinationPath(token, destinationList)
if destination == "":
tolog("!!WARNING!!2990!! sepath could not be used: sepath = %s, destinationList = %s" %\
(spath, str(destinationList)))
else:
# space tokens are not used
destination = destinationList[0]
return destination
getDestination = staticmethod(getDestination)
def addPaths(se_list):
""" add seprodpaths to the se list entries """
tolog("Adding seprodpath(s) to SE list")
seprodpath = readpar('seprodpath')
if seprodpath != "":
new_se_list = []
seprodpath_list = SiteMover.getDirList(seprodpath)
for se in se_list:
for path in seprodpath_list:
# strip any trailing slash
if se.endswith("/"):
se = se[:-1]
new_se_list.append(se + path)
if new_se_list != []:
se_list = new_se_list + se_list
else:
tolog("WARNING: SiteMover.addPaths(): seprodpath is not set")
# make sure that any tape areas do not appear before the simple se info
if se_list != "":
se_list = SiteMover.pushbackTapeAreas(se_list)
tolog("Updated SE list:")
dumpOrderedItems(se_list)
return se_list
addPaths = staticmethod(addPaths)
def pullMatchedAreas(se_list, area):
""" Pull forward a given area in the se list """
# e.g.
# se_list = ['srm://whatever/pnfs/atlasproddisk','srm://whatever','srm://whatever/pnfs/atlasdatatape']
# area = atlasdatatape
# -> se_list = ['srm://whatever/pnfs/atlasdatatape','srm://whatever/pnfs/atlasproddisk']
path_priority = []
path_no_priority = []
for path in se_list:
if area in path:
path_priority.append(path)
else:
path_no_priority.append(path)
se_list = path_priority + path_no_priority
return se_list
pullMatchedAreas = staticmethod(pullMatchedAreas)
def pushbackTapeAreas(se_list):
""" Make sure that any tape areas do not appear before the simple se info """
# e.g. se_list = ['srm://whatever/pnfs/atlasproddisk','srm://whatever/pnfs/atlasprodtape','srm://whatever']
# -> se_list = ['srm://whatever/pnfs/atlasproddisk','srm://whatever','srm://whatever/pnfs/atlasprodtape']
# test with:
# from SiteMover import SiteMover
# sm = SiteMover()
# se_list=['srm://whatever/pnfs/atlasproddisk','srm://whatever/pnfs/atlasprodtape','srm://whatever']
# sm.pushbackTapeAreas(se_list)
# -> ['srm://whatever/pnfs/atlasproddisk', 'srm://whatever', 'srm://whatever/pnfs/atlasprodtape']
path_disk = []
path_tape = []
for path in se_list:
if 'tape' in path:
path_tape.append(path)
else:
path_disk.append(path)
se_list = path_disk + path_tape
return se_list
pushbackTapeAreas = staticmethod(pushbackTapeAreas)
def getdCacheFileSize(directory, filename):
"""
Special workaround to retrieve file size in SEs using dCache and PNFS that
works when file size>=2GB. PNFS is NFSv3 compliant: supported file size<2GB.
Suggested by Ofer, dCache expert at BNL:
"cat %s/\".(use)(2)(%s)\" | grep \'l=\' | sed \'s/.*l=\(.*\);.*/\\1/\'" % (directory, filename)
Fixed RE: "cat %s/\".(use)(2)(%s)\" | grep \'l=\' | sed \'s/.*l=\([^;]*\);.*/\\1/\'" % (directory, filename)
The python version below has been reworked with Charles
This should be no more necessary once Chimera is adopted.
"""
for attempt in range(1,4):
f = open("%s/.(use)(2)(%s)" % (directory, filename), 'r')
data = f.readlines()
f.close()
tolog("data = %s" % str(data))
filesize = None
for line in data:
if 'l=' in line:
for word in line.split(';'):
if word.startswith('l='):
filesize = word.split('=')[1]
if filesize:
break
tolog("dCache size retrieval failed. Service (2) file (size: %s) content: %s\n sleeping %s sec." %\
(len(data), data, attempt*3))
time.sleep(attempt*3)
if not filesize:
tolog('!!WARNING!!2999!! dCache size retrieval failed all attempts, check: %s/".(use)(2)(%s)"' %\
(directory, filename))
raise OSError("Wrong dCache system file format")
return filesize
getdCacheFileSize = staticmethod(getdCacheFileSize)
def getdCacheChecksum(dir, filename):
""" Retrieve the remote checksum in SEs using dCache and PNFS """
# Note: this function is used by both lcgcp2SiteMover and dCacheSiteMover
# which is why it is not put in dCacheSiteMover even though it is specific to dCache
fchecksum = None
# abort check if the remote file system is not visible on the worker node
if not SiteMover.isPNFSVisible(dir):
return None
for attempt in range(1,4):
try:
f = open("%s/.(use)(2)(%s)" % (dir, filename), 'r')
except Exception, e:
tolog("Warning: Exception caught in getdCacheChecksum(): %s, attempt %d" % (str(e), attempt))
if "No such file or directory" in str(e):
return "NOSUCHFILE"
else:
data = f.readlines()
# data = ['2,0,0,0.0,0.0\n', ':c=1:3ef569d9;h=yes;l=60170430;\n']
f.close()
tolog("data = %s" % str(data))
for line in data:
if 'c=' in line:
for word in line.split(';'):
if word.startswith('c=') or word.startswith(':c='):
value = word.split('=')[1]
fchecksum = value.split(':')[1]
if fchecksum:
break
else:
tolog("dCache checksum retrieval failed (attempt %d)" % (attempt))
fchecksum = None
time.sleep(attempt*3)
if fchecksum:
# adler32 or md5sum
if len(fchecksum) == 8 or len(fchecksum) == 32:
pass # valid checksum
else:
tolog("!!WARNING!!2999!! Bad dCache checksum: %s" % (fchecksum))
fchecksum = None
return fchecksum
getdCacheChecksum = staticmethod(getdCacheChecksum)
def lcgGetChecksum(envsetup, timeout, surl):
""" get checksum with lcg-get-checksum command """
remote_checksum = None
output = None
# determine which timeout option to use
if SiteMover.isNewLCGVersion("%s lcg-get-checksum" % (envsetup)):
timeout_option = "--connect-timeout=300 --sendreceive-timeout=%d" % (timeout)
else:
timeout_option = "-t %d" % (timeout)
cmd = "%s lcg-get-checksum -b -T srmv2 %s %s" % (envsetup, timeout_option, surl)
tolog("Executing command: %s" % (cmd))
try:
ec, output = commands.getstatusoutput(cmd)
except Exception, e:
tolog("Warning: (Exception caught) lcg-get-checksum failed: %s" % (e))
output = None
else:
if ec != 0 or "[ERROR]" in output:
tolog("Warning: lcg-get-checksum failed: %d, %s" % (ec, output))
else:
tolog(output)
# are there any warnings we could ignore..?
if output.startswith('Error'):
tolog("Will try to remove the Error line in case it is only a warning")
try:
output = output.split('\n')[-1]
except Exception, e:
tolog("Failed to remove the error line: %s" % (e))
else:
tolog("Updated output: %s" % (output))
try:
remote_checksum = output[:8]
except:
tolog("!!WARNING!!1998!! Cannot extract checksum from output: %s" % (output))
if not remote_checksum.isalnum():
tolog("!!WARNING!!1998!! Failed to extract alphanumeric checksum string from output: %s" % (output))
remote_checksum = None
# import re
# match = re.findall('([a-zA-Z0-9]+) ', output)
# if match:
# if len(match[0]) == 8:
# remote_checksum = match[0]
# else:
# tolog("!!WARNING!!1998!! Remote checksum is not eight characters long: %s" % (match[0]))
# else:
# tolog("!!WARNING!!1998!! No checksum match in lcg-get-checksum output")
return remote_checksum
lcgGetChecksum = staticmethod(lcgGetChecksum)
def getRemoteFileInfo(envsetup, timeout, filename):
""" extract checksum and file size from lcg-ls output """
remote_checksum = None
remote_fsize = None
# get output from lcg-ls
output = SiteMover.getLCGLS(envsetup, timeout, filename)
# interpret lcg-ls output
if output and output != "":
remote_checksum = SiteMover.getRemoteChecksumLCGLS(output)
remote_fsize = SiteMover.getRemoteFileSizeLCGLS(output)
return remote_checksum, remote_fsize
getRemoteFileInfo = staticmethod(getRemoteFileInfo)
def getLCGLS(envsetup, timeout, filename):
""" try to get the checksum with lcg-ls """
output = None
# determine which timeout option to use
if SiteMover.isNewLCGVersion("%s lcg-ls" % (envsetup)):
timeout_option = "--connect-timeout=300 --sendreceive-timeout=%d" % (timeout)
else:
timeout_option = "-t %d" % (timeout)
# get the output
cmd = '%s lcg-ls -l -b %s -T srmv2 %s' % (envsetup, timeout_option, filename)
tolog("Executing command: %s" % (cmd))
try:
ec, output = commands.getstatusoutput(cmd)
except Exception, e:
tolog("Warning: (Exception caught) lcg-ls failed: %s" % str(e))
output = None
else:
if ec != 0:
tolog("Warning: lcg-ls failed: %d, %s" % (ec, output))
output = None
else:
tolog(output)
return output
getLCGLS = staticmethod(getLCGLS)
def getRemoteChecksumLCGLS(output):
""" extract checksum from lcg-ls output """
# interpret lcg-ls output
ec, pilotErrorDiag, remote_checksum = SiteMover._getRemoteChecksumLCGLS(output)
if ec == 0:
pass
elif ec == -1:
# general error
remote_checksum = ""
elif ec == -2:
# outdated lcg-ls or not supported checksum feature
tolog("lcg-ls: cannot extract checksum from command output (not supported or command version too old)")
remote_checksum = ""
else:
tolog("Warning: getRemoteChecksumLCGLS() failed")
remote_checksum = ""
return remote_checksum
getRemoteChecksumLCGLS = staticmethod(getRemoteChecksumLCGLS)
def getRemoteFileSizeLCGLS(output):
""" extract file size from lcg-ls output """
# interpret lcg-ls output
ec, pilotErrorDiag, remote_fsize = SiteMover._getRemoteFileSizeLCGLS(output)
if ec == 0:
pass
elif ec == -1:
# general error
remote_fsize = ""
else:
tolog("Warning: getRemoteFileSizeLCGLS() failed")
remote_fsize = ""
return remote_fsize
getRemoteFileSizeLCGLS = staticmethod(getRemoteFileSizeLCGLS)
def getRemoteFileSize(envsetup, timeout, filename):
""" extract the remote file size using lcg-ls """
# Exit: file size (string)
remote_fsize = ""
# determine which timeout option to use
if SiteMover.isNewLCGVersion("%s lcg-ls" % (envsetup)):
timeout_option = "--connect-timeout=300 --sendreceive-timeout=%d" % (timeout)
else:
timeout_option = "-t %d" % (timeout)
# does the file exist?
cmd = '%s lcg-ls -l -b %s -T srmv2 %s' % (envsetup, timeout_option, filename)
ec, pilotErrorDiag, remote_fsize = SiteMover._getRemoteFileSize(cmd)
if ec == 0:
pass
elif ec == -1:
# general error
remote_fsize = ""
else:
tolog("Warning: getRemoteFileSize() failed")
remote_fsize = ""
return remote_fsize
getRemoteFileSize = staticmethod(getRemoteFileSize)
def getEnvsetup(get=False, alt=False):
""" return a proper envsetup """
if get:
envsetup = readpar('envsetupin', alt=alt)
if envsetup == "":
tolog("Using envsetup since envsetupin is not set")
envsetup = readpar('envsetup', alt=alt)
else:
envsetup = readpar('envsetup', alt=alt)
# get the user proxy if available
envsetup = envsetup.strip()
if envsetup != "" and not envsetup.endswith(';'):
envsetup += ";"
if os.environ.has_key('X509_USER_PROXY'):
envsetup += "export X509_USER_PROXY=%s;" % (os.environ['X509_USER_PROXY'])
return envsetup.replace(";;",";")
getEnvsetup = staticmethod(getEnvsetup)
def _getRemoteChecksumLCGLS(output):
""" extract the remote checksum from lcg-ls output """
# Exit: file size (string)
# example command plus output
# lcg-ls -l -b -T srmv2 srm://dcsrm.usatlas.bnl.gov:8443/srm/managerv2?SFN=/pnfs/usatlas.bnl.gov/BNLT0D1/data10_7TeV/
# NTUP_TRKVALID/RAW_r1239/data10_7TeV.00152878.physics_MinBias.merge.NTUP_TRKVALID.RAW_r1239_tid134218_00/
# NTUP_TRKVALID.134218._001789.root.2
# -rw-r--r-- 1 2 2 241781863 ONLINE /pnfs/usatlas.bnl.gov/BNLT0D1/data10_7TeV/NTUP_TRKVALID/
# RAW_r1239/data10_7TeV.00152878.physics_MinBias.merge.NTUP_TRKVALID.RAW_r1239_tid134218_00/NTUP_TRKVALID.134218._001789.root.2
# * Checksum: 89526bb2 (adler32)
# * Space tokens: 10000
remote_checksum = ""
pilotErrorDiag = ""
ec = 0
# lcg-ls exits with error code 0 even if there was a problem
if "CacheException" in output:
pilotErrorDiag = "lcg-ls failed: %s" % (output)
tolog("!!WARNING!!2999!! %s (CacheException found)" % (pilotErrorDiag))
ec = -1
elif "is deprecated" in output:
pilotErrorDiag = "Deprecated option(s) in lcg-ls command: %s" % (output)
tolog("WARNING: %s" % (pilotErrorDiag))
ec = -2
else:
# extract checksum * Checksum: 89526bb2 (adler32)
pattern = re.compile(r"Checksum\:\ ([0-9a-zA-Z\-]+)")
found = re.findall(pattern, output)
if len(found) == 0:
tolog("Checksum pattern not found in output (skip this checksum extraction method)")
ec = -2
else:
# did we get a valid checksum?
try:
remote_checksum = found[0]
# adler32
if len(remote_checksum) == 8:
if remote_checksum == "0"*8:
tolog("Encountered dummy checksum: %s" % (remote_checksum))
remote_checksum = ""
ec = -2
elif remote_checksum.isalnum():
tolog("Remote adler32 checksum has correct length and is alphanumeric")
else:
tolog("!!WARNING!!2999!! Adler32 checksum is not alphanumeric")
remote_checksum = ""
ec = -1
# md5sum
elif len(remote_checksum) == 32:
if remote_checksum == "0"*32:
tolog("Encountered dummy checksum: %s" % (remote_checksum))
remote_checksum = ""
ec = -2
else:
tolog("Remote md5 checksum has correct length")
# unknown
else:
tolog("!!WARNING!!2999!! Remote checksum does not have correct length: %d, %s (must have length 8 or 32)" %\
(len(remote_checksum), remote_checksum))
remote_checksum = ""
ec = -1
except Exception, e:
pilotErrorDiag = "Checksum is not valid: %s (reset to empty string): %s" % (remote_checksum, str(e))
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
remote_checksum = ""
ec = -1
return ec, pilotErrorDiag, remote_checksum
_getRemoteChecksumLCGLS = staticmethod(_getRemoteChecksumLCGLS)
def _getRemoteFileSizeLCGLS(output):
""" extract the remote file size from lcg-ls output """
# Exit: file size (string)
# usage: lcg-ls [-l] [-d] [-D,--defaultsetype se|srmv1|srmv2] [-T,--setype se|srmv1|srmv2]
# lcg-ls -l -b -t 18000 -T srmv2 srm://iut2-dc1.iu.edu:8443/srm/managerv2?SFN=/pnfs/iu.edu/atlasproddisk/testpanda/
# testpanda.destDB.8b34ab22-a320-4822-ab7d-863db674b565_sub05026945/312ea784-bb8a-4465-945e-5894eea18ba5_0.evgen.pool.root
# -rw-r--r-- 1 2 2 80601940 ONLINE /pnfs/iu.edu/atlasproddisk/testpanda/testpanda.destDB.8b34ab22 ...
remote_fsize = ""
pilotErrorDiag = ""
ec = 0
# lcg-ls exits with error code 0 even if there was a problem
if "CacheException" in output:
pilotErrorDiag = "lcg-ls failed: %s" % (output)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
ec = -1
elif "is deprecated" in output:
pilotErrorDiag = "Deprecated option(s) in lcg-ls command: %s" % (output)
tolog("WARNING: %s" % (pilotErrorDiag))
ec = -2
else:
# extract file size
try:
# are there any warnings we could ignore..?
if output.startswith('Error'):
tolog("Will try to remove the Error line in case it is only a warning (is the file size in the second line?)")
try:
output = output.split('\n')[1] # assuming file size in second line
except Exception, e:
tolog("Failed to remove the error line: %s" % (e))
else:
tolog("Updated output: %s" % (output))
# remove extra spaces
while " " in output:
output = output.replace(" ", " ")
_output = output.split(" ")
remote_fsize = _output[4]
except Exception, e:
pilotErrorDiag = "_getRemoteFileSizeLCGLS caught an exception: %s" % str(e)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
ec = -1
else:
# did we get an integer?
try:
_dummy = int(remote_fsize)
except:
pilotErrorDiag = "File size is not an integer: %s (reset to empty string)" % (remote_fsize)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
remote_fsize = ""
ec = -1
return ec, pilotErrorDiag, remote_fsize
_getRemoteFileSizeLCGLS = staticmethod(_getRemoteFileSizeLCGLS)
def _getRemoteFileSize(cmd):
""" extract the remote file size using lcg-ls """
# Exit: file size (string)
# usage: lcg-ls [-l] [-d] [-D,--defaultsetype se|srmv1|srmv2] [-T,--setype se|srmv1|srmv2]
# lcg-ls -l -b -t 18000 -T srmv2 srm://iut2-dc1.iu.edu:8443/srm/managerv2?SFN=/pnfs/iu.edu/atlasproddisk/testpanda/
# testpanda.destDB.8b34ab22-a320-4822-ab7d-863db674b565_sub05026945/312ea784-bb8a-4465-945e-5894eea18ba5_0.evgen.pool.root
# -rw-r--r-- 1 2 2 80601940 ONLINE /pnfs/iu.edu/atlasproddisk/testpanda/testpanda.destDB.8b34ab22 ...
remote_fsize = ""
pilotErrorDiag = ""
ec = 0
# does the file exist?
tolog("Executing command: %s" % (cmd))
try:
_ec, rs = commands.getstatusoutput(cmd)
except Exception, e:
pilotErrorDiag = "lcg-ls failed: %s" % str(e)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
ec = -1
else:
if _ec != 0:
pilotErrorDiag = "lcg-ls failed: %d, %s" % (_ec, rs)
tolog("Warning: %s" % (pilotErrorDiag))
ec = -1
else:
# lcg-ls exits with error code 0 even if there was a problem
if "CacheException" in rs:
pilotErrorDiag = "lcg-ls failed: %s" % (rs)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
ec = -1
elif "is deprecated" in rs:
pilotErrorDiag = "Deprecated option(s) in lcg-ls command: %s" % (rs)
tolog("WARNING: %s" % (pilotErrorDiag))
ec = -2
else:
tolog(rs)
# extract file size
try:
# remove extra spaces
while " " in rs:
rs = rs.replace(" ", " ")
_rs = rs.split(" ")
remote_fsize = _rs[4]
except Exception, e:
pilotErrorDiag = "_getRemoteFileSize caught an exception: %s" % str(e)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
ec = -1
else:
# did we get an integer?
try:
_dummy = int(remote_fsize)
except:
pilotErrorDiag = "File size is not an integer: %s (reset to empty string)" % (remote_fsize)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
remote_fsize = ""
ec = -1
return ec, pilotErrorDiag, remote_fsize
_getRemoteFileSize = staticmethod(_getRemoteFileSize)
def removeLocal(filename):
""" Remove the local file in case of failure to prevent problem with get retry attempt """
status = False
if os.path.exists(filename):
try:
os.remove(filename)
except OSError:
tolog("Could not remove the local file: %s" % (filename))
else:
status = True
tolog("Removed local file: %s" % (filename))
else:
tolog("Nothing to remove (file %s does not exist)" % (filename))
return status
removeLocal = staticmethod(removeLocal)
def removeFile(envsetup, timeout, filename, nolfc=False, lfcdir=None):
""" Remove file from SE and unregister from LFC (if necessary) """
# Occationally when lcg-cp fails during transfer it does not remove the touched or partially transferred file
# in those cases the pilot will be in charge of removing the file
# Exit: returns True if the method managed to remove the file
# if nolfc=True, the file will not need to be removed from the LFC
ec = -1
# take a 1 m nap before trying to reach the file (it might not be available immediately after a transfer)
tolog("Taking a 1 m nap before the file removal attempt")
time.sleep(60)
# get the remote file size (i.e. verify that the file exists)
try:
remote_fsize = SiteMover.getRemoteFileSize(envsetup, timeout, filename)
except Exception, e:
tolog('Warning: Could not get remote file size: %s (test will be skipped)' % str(e))
remote_fsize = None
if remote_fsize and remote_fsize != "":
tolog("Remote file exists (has file size: %s)" % (remote_fsize))
else:
tolog("Remote file does not exist (attempting removal from LFC at least)")
if nolfc:
# change this flag since there might be a registered LFC entry
tolog("Will not use --nolfc with lcg-del")
nolfc = False
# try to remove the file
if nolfc:
extra = "--nolfc"
else:
extra = ""
cmd = '%s lcg-del --vo atlas --verbose -b -l -T srmv2 -t %d %s %s' % (envsetup, timeout, extra, filename)
tolog("Executing command: %s" % (cmd))
try:
ec, rs = commands.getstatusoutput(cmd)
except Exception, e:
tolog("Warning: Exception caught in removeFile: %s" % (e))
else:
tolog(rs)
if ec == 0:
tolog("Successfully removed file: %s" % (filename))
else:
tolog("Could not remove file: ec = %d" % (ec))
ec = -2 # code for 'do not retry stage-out'
# remove the created LFC file and dir
# if lfcdir:
# # first remove the LFC file
# SiteMover.lfcrm(envsetup, os.path.join(lfcdir, os.path.basename(filename)))
# # then remove the LFC dir
# SiteMover.lfcrm(envsetup, lfcdir)
return ec
removeFile = staticmethod(removeFile)
def lfcrm(envsetup, fname):
""" Remove a file or directory in LFC with lfc-rm """
cmd = "%s lfc-rm -rf %s" % (envsetup, fname)
tolog("Executing command: %s" % (cmd))
try:
ec, rs = commands.getstatusoutput(cmd)
except Exception, e:
tolog("Warning: Exception caught in removeFile: %s" % (e))
else:
if ec == 0:
tolog("Removed LFC entry: %s" % (fname))
else:
tolog("Could not remove LFC entry: %d, %s (ignore)" % (ec, rs))
lfcrm = staticmethod(lfcrm)
def isPNFSVisible(dir):
""" Is /pnfs/subdir visible on the worker node? """
#>>> dir='/pnfs/aglt2.org/atlasproddisk/testpanda/testpanda.Nebraska-Lincoln-red_Install_7f103b6a-93b9-4ae1-'
#>>> a=dir.split('/')
#>>> os.path.join('/',a[1],a[2])
#'/pnfs/aglt2.org'
status = False
subdir = dir.split('/')
path = os.path.join('/', subdir[1], subdir[2])
tolog('subdir: %s' % str(subdir))
tolog('path: %s' % path)
tolog('dir: %s' % dir)
if subdir[1] != 'pnfs':
tolog("/pnfs is not visible")
else:
try:
dummy = os.listdir(path)
except:
tolog("%s is not visible" % (path))
else:
tolog("%s is visible: %s" % (path, dummy))
status = True
return status
isPNFSVisible = staticmethod(isPNFSVisible)
def isNewLCGVersion(cmd):
""" return True if LCG command is newer than version ... """
# New versions of the lcg-cr/cp commands deprecates -t option
# --timeout-* will be used instead
status = True
lcg_help = commands.getoutput("%s -h" % (cmd))
tolog("%s help: %s" % (cmd, lcg_help))
if not "--connect-timeout" in lcg_help:
status = False
return status
isNewLCGVersion = staticmethod(isNewLCGVersion)
def getTier3Path(dsname, DN):
""" Create a simple path for Tier 3 files """
# e.g. 2010/TorreWenaus/FullDatasetName/Filename
# get the base path
path = readpar("se")
if path == "":
return None
# get current year
year = time.strftime("%Y")
# extract user name from DN
username = SiteMover.extractUsername(DN)
if username != "":
# create final SE path
return os.path.join(path, year, username, dsname)
else:
return None
getTier3Path = staticmethod(getTier3Path)
def extractUsername(DN):
""" Extract the user name without whitespaces from the DN """
try:
pattern = re.compile(r"./CN=([A-Za-z0-9\.\s]+).")
txt = re.findall(pattern, DN)[0]
# remove the numbers and spaces
pattern = re.compile(r".([0-9]+).")
numbers = re.findall(pattern, txt)[0]
username = txt[:txt.find(numbers)]
except Exception, e:
tolog("!!WARNING!!2999!! Exception caught in extractUsername(): %s" % str(e))
username = ""
else:
username = username.replace(' ', '')
username = username.replace('.', '')
tolog("Will use username %s for path creation" % (username))
return username
extractUsername = staticmethod(extractUsername)
def removeSubFromDatasetName(dsname):
""" Tmp function """
pattern = re.compile('\S+(\_sub[0-9]+)')
match = pattern.match(dsname)
if match:
# strip away the _subNNN string
try:
dsname = dsname.replace(match.group(1), '')
except Exception, e:
tolog("!!WARNING!!1119!! Failed to remove _sub string (%s) from dataset name: %s" % (match.group(1), e))
else:
tolog("Updated dataset name (removed %s): %s" % (match.group(1), dsname))
else:
tolog("Found no _subNNN string in the dataset name")
return dsname
removeSubFromDatasetName = staticmethod(removeSubFromDatasetName)
def isOneByOneFileTransfer(self):
""" Should files be transferred one by one or all at once? """
# override this method in the relevant site mover (e.g. aria2cSiteMover)
return True
def checkForDirectAccess(self, lfn, useCT, workDir, jobId, prodDBlockToken):
""" Should the file be transferred or read directly [later] by athena? """
# get the transfer modes (direct access, file stager)
directIn, useFileStager = self.getTransferModes()
if directIn:
from FileStateClient import updateFileState
if useCT:
directIn = False
tolog("Direct access mode is switched off (file will be transferred with the copy tool)")
updateFileState(lfn, workDir, jobId, mode="transfer_mode", state="copy_to_scratch", ftype="input")
else:
# determine if the file is a root file according to its name
rootFile = self.isRootFileName(lfn)
if prodDBlockToken == 'local' or not rootFile:
directIn = False
tolog("Direct access mode has been switched off for this file (will be transferred with the copy tool)")
updateFileState(lfn, workDir, jobId, mode="transfer_mode", state="copy_to_scratch", ftype="input")
elif rootFile:
tolog("Found root file according to file name: %s (will not be transferred in direct reading mode)" % (lfn))
if useFileStager:
updateFileState(lfn, workDir, jobId, mode="transfer_mode", state="file_stager", ftype="input")
else:
updateFileState(lfn, workDir, jobId, mode="transfer_mode", state="remote_io", ftype="input")
else:
tolog("Normal file transfer")
else:
tolog("not directIn")
return directIn
def getPathFromScope(self, scope, lfn):
""" Construct a partial PFN using the scope and the LFN """
# /<site_prefix>/<space_token>/rucio/<scope>/md5(<scope>:<lfn>)[0:2]/md5(<scope:lfn>)[2:4]/<lfn>
try:
# for python 2.6
import hashlib
hash = hashlib.md5()
except:
# for python 2.4
import md5
hash = md5.new()
correctedscope = "/".join(scope.split('.'))
hash.update('%s:%s' % (scope, lfn))
hash_hex = hash.hexdigest()[:6]
return 'rucio/%s/%s/%s/%s' % (correctedscope, hash_hex[0:2], hash_hex[2:4], lfn)
def getFullPath(self, scope, spacetoken, lfn, analyJob, prodSourceLabel, alt=False):
""" Construct a full PFN using site prefix, space token, scope and LFN """
# <protocol>://<hostname>:<port>/<protocol_prefix>/ + <site_prefix>/<space_token>/rucio/<scope>/md5(<scope>:<lfn>)[0:2]/md5(<scope:lfn>)[2:4]/<lfn>
full_path = ""
# Get the SE info
se = readpar('se', alt=alt).split(",")[0]
_spacetoken, se = SiteMover.extractSE(se)
tolog("Extracted spacetoken=%s, se=%s" % (_spacetoken, se))
# remove any unwanted stage-in info (present at CERN for atlasdatatape)
se = SiteMover.filterSE(se)
tolog("Full path will use SE: %s" % (se))
# Use default space token from se field unless specified
if spacetoken == "" or spacetoken == "NULL":
# get the default space token from se
spacetoken = _spacetoken
tolog("Full path will use default space token descriptor: %s" % (spacetoken))
else:
tolog("Full path will use space token descriptor: %s" % (spacetoken))
# Get the SE path from se[prod]path
# E.g. /dpm/grid.sinica.edu.tw/home/atlas/atlasscratchdisk/
destination = self.getPreDestination(analyJob, spacetoken, prodSourceLabel, alt=alt)
tolog("Full path will use source/destination: %s" % (destination))
# if the source/destination already has a trailing /rucio, remove it since it will be added by the scope path below
if destination.endswith("/rucio"):
destination = destination[:-len("/rucio")] # cut away the trailing /rucio bit
# Get the path from the scope and LFN
scope_path = self.getPathFromScope(scope, lfn)
tolog("Full path will use path from scope: %s" % (scope_path))
# Construct final full path
full_path = se + destination
full_path = os.path.join(full_path, scope_path)
return full_path
def getGlobalFilePaths(self, surl, scope, computingSite, sourceSite):
""" Get the global file paths """
# Note: this method depends on the site mover used, so should be defined there, and as virtual here
# (see e.g. FAXSiteMover for implementations)
return []
def getTimeOut(self, filesize):
""" Get a proper time-out limit based on the file size """
# timeout_default = 3600
timeout_max = 6*3600
timeout_min = 5*60
# timeout = min + k*size
timeout = timeout_min + int(filesize/400000.0)
if timeout > timeout_max:
timeout = timeout_max
return timeout
| apache-2.0 |
ppries/tensorflow | tensorflow/python/util/deprecation_test.py | 3 | 27171 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tensor_util tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
class DeprecationTest(tf.test.TestCase):
def _assert_subset(self, expected_subset, actual_set):
self.assertTrue(
actual_set.issuperset(expected_subset),
msg="%s is not a superset of %s." % (actual_set, expected_subset))
def test_deprecated_illegal_args(self):
instructions = "This is how you update..."
with self.assertRaisesRegexp(ValueError, "date"):
deprecation.deprecated(None, instructions)
with self.assertRaisesRegexp(ValueError, "date"):
deprecation.deprecated("", instructions)
with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"):
deprecation.deprecated("07-04-2016", instructions)
date = "2016-07-04"
with self.assertRaisesRegexp(ValueError, "instructions"):
deprecation.deprecated(date, None)
with self.assertRaisesRegexp(ValueError, "instructions"):
deprecation.deprecated(date, "")
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_with_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated(date, instructions)
def _fn(arg0, arg1):
"""fn doc.
Args:
arg0: Arg 0.
arg1: Arg 1.
Returns:
Sum of args.
"""
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (deprecated)"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:\n%s"
"\n"
"\nArgs:"
"\n arg0: Arg 0."
"\n arg1: Arg 1."
"\n"
"\nReturns:"
"\n Sum of args." % (date, instructions),
_fn.__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions]), set(args[1:]))
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_with_one_line_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated(date, instructions)
def _fn(arg0, arg1):
"""fn doc."""
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (deprecated)"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:\n%s" % (date, instructions),
_fn.__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions]), set(args[1:]))
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_no_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated(date, instructions)
def _fn(arg0, arg1):
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"DEPRECATED FUNCTION"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:"
"\n%s" % (date, instructions),
_fn.__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions]), set(args[1:]))
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_instance_fn_with_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
class _Object(object):
def __init(self):
pass
@deprecation.deprecated(date, instructions)
def _fn(self, arg0, arg1):
"""fn doc.
Args:
arg0: Arg 0.
arg1: Arg 1.
Returns:
Sum of args.
"""
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual(
"fn doc. (deprecated)"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:\n%s"
"\n"
"\nArgs:"
"\n arg0: Arg 0."
"\n arg1: Arg 1."
"\n"
"\nReturns:"
"\n Sum of args." % (date, instructions),
getattr(_Object, "_fn").__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _Object()._fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions]), set(args[1:]))
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_instance_fn_with_one_line_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
class _Object(object):
def __init(self):
pass
@deprecation.deprecated(date, instructions)
def _fn(self, arg0, arg1):
"""fn doc."""
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual(
"fn doc. (deprecated)"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:\n%s" % (date, instructions),
getattr(_Object, "_fn").__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _Object()._fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions]), set(args[1:]))
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_instance_fn_no_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
class _Object(object):
def __init(self):
pass
@deprecation.deprecated(date, instructions)
def _fn(self, arg0, arg1):
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual(
"DEPRECATED FUNCTION"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:"
"\n%s" % (date, instructions),
getattr(_Object, "_fn").__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _Object()._fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions]), set(args[1:]))
def test_prop_wrong_order(self):
with self.assertRaisesRegexp(
ValueError,
"make sure @property appears before @deprecated in your source code"):
# pylint: disable=unused-variable
class _Object(object):
def __init(self):
pass
@deprecation.deprecated("2016-07-04", "Instructions.")
@property
def _prop(self):
return "prop_wrong_order"
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_prop_with_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
class _Object(object):
def __init(self):
pass
@property
@deprecation.deprecated(date, instructions)
def _prop(self):
"""prop doc.
Returns:
String.
"""
return "prop_with_doc"
# Assert function docs are properly updated.
self.assertEqual(
"prop doc. (deprecated)"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:"
"\n%s"
"\n"
"\nReturns:"
"\n String." % (date, instructions),
getattr(_Object, "_prop").__doc__)
# Assert calling new fn issues log warning.
self.assertEqual("prop_with_doc", _Object()._prop)
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions]), set(args[1:]))
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_prop_no_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
class _Object(object):
def __init(self):
pass
@property
@deprecation.deprecated(date, instructions)
def _prop(self):
return "prop_no_doc"
# Assert function docs are properly updated.
self.assertEqual(
"DEPRECATED FUNCTION"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:"
"\n%s" % (date, instructions),
getattr(_Object, "_prop").__doc__)
# Assert calling new fn issues log warning.
self.assertEqual("prop_no_doc", _Object()._prop)
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions]), set(args[1:]))
class DeprecatedArgsTest(tf.test.TestCase):
def _assert_subset(self, expected_subset, actual_set):
self.assertTrue(
actual_set.issuperset(expected_subset),
msg="%s is not a superset of %s." % (actual_set, expected_subset))
def test_deprecated_illegal_args(self):
instructions = "This is how you update..."
date = "2016-07-04"
with self.assertRaisesRegexp(ValueError, "date"):
deprecation.deprecated_args(None, instructions, "deprecated")
with self.assertRaisesRegexp(ValueError, "date"):
deprecation.deprecated_args("", instructions, "deprecated")
with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"):
deprecation.deprecated_args("07-04-2016", instructions, "deprecated")
with self.assertRaisesRegexp(ValueError, "instructions"):
deprecation.deprecated_args(date, None, "deprecated")
with self.assertRaisesRegexp(ValueError, "instructions"):
deprecation.deprecated_args(date, "", "deprecated")
with self.assertRaisesRegexp(ValueError, "argument"):
deprecation.deprecated_args(date, instructions)
def test_deprecated_missing_args(self):
date = "2016-07-04"
instructions = "This is how you update..."
def _fn(arg0, arg1, deprecated=None):
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert calls without the deprecated argument log nothing.
with self.assertRaisesRegexp(ValueError, "not present.*\\['missing'\\]"):
deprecation.deprecated_args(date, instructions, "missing")(_fn)
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_with_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, "deprecated")
def _fn(arg0, arg1, deprecated=True):
"""fn doc.
Args:
arg0: Arg 0.
arg1: Arg 1.
deprecated: Deprecated!
Returns:
Sum of args.
"""
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (deprecated arguments)"
"\n"
"\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s."
"\nInstructions for updating:\n%s"
"\n"
"\nArgs:"
"\n arg0: Arg 0."
"\n arg1: Arg 1."
"\n deprecated: Deprecated!"
"\n"
"\nReturns:"
"\n Sum of args." % (date, instructions),
_fn.__doc__)
# Assert calls without the deprecated argument log nothing.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated argument log a warning.
self.assertEqual(3, _fn(1, 2, True))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions]), set(args[1:]))
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_with_one_line_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, "deprecated")
def _fn(arg0, arg1, deprecated=True):
"""fn doc."""
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (deprecated arguments)"
"\n"
"\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s."
"\nInstructions for updating:\n%s" % (date, instructions),
_fn.__doc__)
# Assert calls without the deprecated argument log nothing.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated argument log a warning.
self.assertEqual(3, _fn(1, 2, True))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions]), set(args[1:]))
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_no_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, "deprecated")
def _fn(arg0, arg1, deprecated=True):
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"DEPRECATED FUNCTION ARGUMENTS"
"\n"
"\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s."
"\nInstructions for updating:"
"\n%s" % (date, instructions),
_fn.__doc__)
# Assert calls without the deprecated argument log nothing.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated argument log a warning.
self.assertEqual(3, _fn(1, 2, True))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions]), set(args[1:]))
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_varargs(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, "deprecated")
def _fn(arg0, arg1, *deprecated):
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert calls without the deprecated argument log nothing.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated argument log a warning.
self.assertEqual(3, _fn(1, 2, True, False))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions]), set(args[1:]))
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_kwargs(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, "deprecated")
def _fn(arg0, arg1, **deprecated):
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert calls without the deprecated argument log nothing.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated argument log a warning.
self.assertEqual(3, _fn(1, 2, a=True, b=False))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions]), set(args[1:]))
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_positional_and_named(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, "d1", "d2")
def _fn(arg0, d1=None, arg1=2, d2=None):
return arg0 + arg1 if d1 else arg1 + arg0 if d2 else arg0 * arg1
# Assert calls without the deprecated arguments log nothing.
self.assertEqual(2, _fn(1, arg1=2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated arguments log warnings.
self.assertEqual(2, _fn(1, None, 2, d2=False))
self.assertEqual(2, mock_warning.call_count)
(args1, _) = mock_warning.call_args_list[0]
self.assertRegexpMatches(args1[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions, "d1"]), set(args1[1:]))
(args2, _) = mock_warning.call_args_list[1]
self.assertRegexpMatches(args1[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions, "d2"]), set(args2[1:]))
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_positional_and_named_with_ok_vals(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(
date,
instructions,
("d1", None),
("d2", "my_ok_val"))
def _fn(arg0, d1=None, arg1=2, d2=None):
return arg0 + arg1 if d1 else arg1 + arg0 if d2 else arg0 * arg1
# Assert calls without the deprecated arguments log nothing.
self.assertEqual(2, _fn(1, arg1=2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated arguments log warnings.
self.assertEqual(2, _fn(1, False, 2, d2=False))
self.assertEqual(2, mock_warning.call_count)
(args1, _) = mock_warning.call_args_list[0]
self.assertRegexpMatches(args1[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions, "d1"]), set(args1[1:]))
(args2, _) = mock_warning.call_args_list[1]
self.assertRegexpMatches(args1[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions, "d2"]), set(args2[1:]))
# Assert calls with the deprecated arguments dont log warnings if
# the value matches the 'ok_val'.
mock_warning.reset_mock()
self.assertEqual(3, _fn(1, None, 2, d2="my_ok_val"))
self.assertEqual(0, mock_warning.call_count)
class DeprecatedArgValuesTest(tf.test.TestCase):
def _assert_subset(self, expected_subset, actual_set):
self.assertTrue(
actual_set.issuperset(expected_subset),
msg="%s is not a superset of %s." % (actual_set, expected_subset))
def test_deprecated_illegal_args(self):
instructions = "This is how you update..."
with self.assertRaisesRegexp(ValueError, "date"):
deprecation.deprecated_arg_values(
None, instructions, deprecated=True)
with self.assertRaisesRegexp(ValueError, "date"):
deprecation.deprecated_arg_values(
"", instructions, deprecated=True)
with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"):
deprecation.deprecated_arg_values(
"07-04-2016", instructions, deprecated=True)
date = "2016-07-04"
with self.assertRaisesRegexp(ValueError, "instructions"):
deprecation.deprecated_arg_values(
date, None, deprecated=True)
with self.assertRaisesRegexp(ValueError, "instructions"):
deprecation.deprecated_arg_values(
date, "", deprecated=True)
with self.assertRaisesRegexp(ValueError, "argument", deprecated=True):
deprecation.deprecated_arg_values(
date, instructions)
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_with_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_arg_values(date, instructions, deprecated=True)
def _fn(arg0, arg1, deprecated=True):
"""fn doc.
Args:
arg0: Arg 0.
arg1: Arg 1.
deprecated: Deprecated!
Returns:
Sum of args.
"""
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (deprecated arguments)"
"\n"
"\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s."
"\nInstructions for updating:\n%s"
"\n"
"\nArgs:"
"\n arg0: Arg 0."
"\n arg1: Arg 1."
"\n deprecated: Deprecated!"
"\n"
"\nReturns:"
"\n Sum of args." % (date, instructions),
_fn.__doc__)
# Assert calling new fn with non-deprecated value logs nothing.
self.assertEqual(3, _fn(1, 2, deprecated=False))
self.assertEqual(0, mock_warning.call_count)
# Assert calling new fn with deprecated value issues log warning.
self.assertEqual(3, _fn(1, 2, deprecated=True))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions]), set(args[1:]))
# Assert calling new fn with default deprecated value issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(2, mock_warning.call_count)
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_with_one_line_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_arg_values(date, instructions, deprecated=True)
def _fn(arg0, arg1, deprecated=True):
"""fn doc."""
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (deprecated arguments)"
"\n"
"\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s."
"\nInstructions for updating:\n%s" % (date, instructions),
_fn.__doc__)
# Assert calling new fn with non-deprecated value logs nothing.
self.assertEqual(3, _fn(1, 2, deprecated=False))
self.assertEqual(0, mock_warning.call_count)
# Assert calling new fn with deprecated value issues log warning.
self.assertEqual(3, _fn(1, 2, deprecated=True))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions]), set(args[1:]))
# Assert calling new fn with default deprecated value issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(2, mock_warning.call_count)
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_no_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_arg_values(date, instructions, deprecated=True)
def _fn(arg0, arg1, deprecated=True):
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"DEPRECATED FUNCTION ARGUMENTS"
"\n"
"\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s."
"\nInstructions for updating:"
"\n%s" % (date, instructions),
_fn.__doc__)
# Assert calling new fn with non-deprecated value logs nothing.
self.assertEqual(3, _fn(1, 2, deprecated=False))
self.assertEqual(0, mock_warning.call_count)
# Assert calling new fn issues log warning.
self.assertEqual(3, _fn(1, 2, deprecated=True))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions]), set(args[1:]))
# Assert calling new fn with default deprecated value issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(2, mock_warning.call_count)
class DeprecationArgumentsTest(tf.test.TestCase):
def testDeprecatedArgumentLookup(self):
good_value = 3
self.assertEqual(deprecation.deprecated_argument_lookup(
"val_new", good_value, "val_old", None), good_value)
self.assertEqual(deprecation.deprecated_argument_lookup(
"val_new", None, "val_old", good_value), good_value)
with self.assertRaisesRegexp(ValueError,
"Cannot specify both 'val_old' and 'val_new'"):
self.assertEqual(deprecation.deprecated_argument_lookup(
"val_new", good_value, "val_old", good_value), good_value)
def testRewriteArgumentDocstring(self):
docs = """Add `a` and `b`
Args:
a: first arg
b: second arg
"""
new_docs = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(docs, "a", "left"),
"b", "right")
new_docs_ref = """Add `left` and `right`
Args:
left: first arg
right: second arg
"""
self.assertEqual(new_docs, new_docs_ref)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
toddeye/home-assistant | homeassistant/components/group.py | 2 | 7588 | """
homeassistant.components.group
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Provides functionality to group devices that can be turned on or off.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/group/
"""
import homeassistant.core as ha
from homeassistant.helpers.event import track_state_change
from homeassistant.helpers.entity import (
Entity, split_entity_id, generate_entity_id)
from homeassistant.const import (
ATTR_ENTITY_ID, STATE_ON, STATE_OFF,
STATE_HOME, STATE_NOT_HOME, STATE_OPEN, STATE_CLOSED,
STATE_UNKNOWN, CONF_NAME, CONF_ICON)
DOMAIN = 'group'
ENTITY_ID_FORMAT = DOMAIN + '.{}'
CONF_ENTITIES = 'entities'
CONF_VIEW = 'view'
ATTR_AUTO = 'auto'
ATTR_ORDER = 'order'
ATTR_VIEW = 'view'
# List of ON/OFF state tuples for groupable states
_GROUP_TYPES = [(STATE_ON, STATE_OFF), (STATE_HOME, STATE_NOT_HOME),
(STATE_OPEN, STATE_CLOSED)]
def _get_group_on_off(state):
""" Determine the group on/off states based on a state. """
for states in _GROUP_TYPES:
if state in states:
return states
return None, None
def is_on(hass, entity_id):
""" Returns if the group state is in its ON-state. """
state = hass.states.get(entity_id)
if state:
group_on, _ = _get_group_on_off(state.state)
# If we found a group_type, compare to ON-state
return group_on is not None and state.state == group_on
return False
def expand_entity_ids(hass, entity_ids):
""" Returns the given list of entity ids and expands group ids into
the entity ids it represents if found. """
found_ids = []
for entity_id in entity_ids:
if not isinstance(entity_id, str):
continue
entity_id = entity_id.lower()
try:
# If entity_id points at a group, expand it
domain, _ = split_entity_id(entity_id)
if domain == DOMAIN:
found_ids.extend(
ent_id for ent_id
in get_entity_ids(hass, entity_id)
if ent_id not in found_ids)
else:
if entity_id not in found_ids:
found_ids.append(entity_id)
except AttributeError:
# Raised by split_entity_id if entity_id is not a string
pass
return found_ids
def get_entity_ids(hass, entity_id, domain_filter=None):
""" Get the entity ids that make up this group. """
entity_id = entity_id.lower()
try:
entity_ids = hass.states.get(entity_id).attributes[ATTR_ENTITY_ID]
if domain_filter:
domain_filter = domain_filter.lower()
return [ent_id for ent_id in entity_ids
if ent_id.startswith(domain_filter)]
else:
return entity_ids
except (AttributeError, KeyError):
# AttributeError if state did not exist
# KeyError if key did not exist in attributes
return []
def setup(hass, config):
""" Sets up all groups found definded in the configuration. """
for object_id, conf in config.get(DOMAIN, {}).items():
if not isinstance(conf, dict):
conf = {CONF_ENTITIES: conf}
name = conf.get(CONF_NAME, object_id)
entity_ids = conf.get(CONF_ENTITIES)
icon = conf.get(CONF_ICON)
view = conf.get(CONF_VIEW)
if isinstance(entity_ids, str):
entity_ids = [ent.strip() for ent in entity_ids.split(",")]
Group(hass, name, entity_ids, icon=icon, view=view,
object_id=object_id)
return True
class Group(Entity):
""" Tracks a group of entity ids. """
# pylint: disable=too-many-instance-attributes, too-many-arguments
def __init__(self, hass, name, entity_ids=None, user_defined=True,
icon=None, view=False, object_id=None):
self.hass = hass
self._name = name
self._state = STATE_UNKNOWN
self._order = len(hass.states.entity_ids(DOMAIN))
self._user_defined = user_defined
self._icon = icon
self._view = view
self.entity_id = generate_entity_id(
ENTITY_ID_FORMAT, object_id or name, hass=hass)
self.tracking = []
self.group_on = None
self.group_off = None
if entity_ids is not None:
self.update_tracked_entity_ids(entity_ids)
else:
self.update_ha_state(True)
@property
def should_poll(self):
return False
@property
def name(self):
return self._name
@property
def state(self):
return self._state
@property
def icon(self):
return self._icon
@property
def hidden(self):
return not self._user_defined or self._view
@property
def state_attributes(self):
data = {
ATTR_ENTITY_ID: self.tracking,
ATTR_ORDER: self._order,
}
if not self._user_defined:
data[ATTR_AUTO] = True
if self._view:
data[ATTR_VIEW] = True
return data
def update_tracked_entity_ids(self, entity_ids):
""" Update the tracked entity IDs. """
self.stop()
self.tracking = tuple(ent_id.lower() for ent_id in entity_ids)
self.group_on, self.group_off = None, None
self.update_ha_state(True)
self.start()
def start(self):
""" Starts the tracking. """
track_state_change(
self.hass, self.tracking, self._state_changed_listener)
def stop(self):
""" Unregisters the group from Home Assistant. """
self.hass.states.remove(self.entity_id)
self.hass.bus.remove_listener(
ha.EVENT_STATE_CHANGED, self._state_changed_listener)
def update(self):
""" Query all the tracked states and determine current group state. """
self._state = STATE_UNKNOWN
for entity_id in self.tracking:
state = self.hass.states.get(entity_id)
if state is not None:
self._process_tracked_state(state)
def _state_changed_listener(self, entity_id, old_state, new_state):
""" Listener to receive state changes of tracked entities. """
self._process_tracked_state(new_state)
self.update_ha_state()
def _process_tracked_state(self, tr_state):
""" Updates group state based on a new state of a tracked entity. """
# We have not determined type of group yet
if self.group_on is None:
self.group_on, self.group_off = _get_group_on_off(tr_state.state)
if self.group_on is not None:
# New state of the group is going to be based on the first
# state that we can recognize
self._state = tr_state.state
return
# There is already a group state
cur_gr_state = self._state
group_on, group_off = self.group_on, self.group_off
# if cur_gr_state = OFF and tr_state = ON: set ON
# if cur_gr_state = ON and tr_state = OFF: research
# else: ignore
if cur_gr_state == group_off and tr_state.state == group_on:
self._state = group_on
elif cur_gr_state == group_on and tr_state.state == group_off:
# Set to off if no other states are on
if not any(self.hass.states.is_state(ent_id, group_on)
for ent_id in self.tracking
if tr_state.entity_id != ent_id):
self._state = group_off
| mit |
p4datasystems/CarnotKE | jyhton/lib-python/2.7/filecmp.py | 63 | 9577 | """Utilities for comparing files and directories.
Classes:
dircmp
Functions:
cmp(f1, f2, shallow=1) -> int
cmpfiles(a, b, common) -> ([], [], [])
"""
import os
import stat
from itertools import ifilter, ifilterfalse, imap, izip
__all__ = ["cmp","dircmp","cmpfiles"]
_cache = {}
BUFSIZE=8*1024
def cmp(f1, f2, shallow=1):
"""Compare two files.
Arguments:
f1 -- First file name
f2 -- Second file name
shallow -- Just check stat signature (do not read the files).
defaults to 1.
Return value:
True if the files are the same, False otherwise.
This function uses a cache for past comparisons and the results,
with a cache invalidation mechanism relying on stale signatures.
"""
s1 = _sig(os.stat(f1))
s2 = _sig(os.stat(f2))
if s1[0] != stat.S_IFREG or s2[0] != stat.S_IFREG:
return False
if shallow and s1 == s2:
return True
if s1[1] != s2[1]:
return False
outcome = _cache.get((f1, f2, s1, s2))
if outcome is None:
outcome = _do_cmp(f1, f2)
if len(_cache) > 100: # limit the maximum size of the cache
_cache.clear()
_cache[f1, f2, s1, s2] = outcome
return outcome
def _sig(st):
return (stat.S_IFMT(st.st_mode),
st.st_size,
st.st_mtime)
def _do_cmp(f1, f2):
bufsize = BUFSIZE
with open(f1, 'rb') as fp1, open(f2, 'rb') as fp2:
while True:
b1 = fp1.read(bufsize)
b2 = fp2.read(bufsize)
if b1 != b2:
return False
if not b1:
return True
# Directory comparison class.
#
class dircmp:
"""A class that manages the comparison of 2 directories.
dircmp(a,b,ignore=None,hide=None)
A and B are directories.
IGNORE is a list of names to ignore,
defaults to ['RCS', 'CVS', 'tags'].
HIDE is a list of names to hide,
defaults to [os.curdir, os.pardir].
High level usage:
x = dircmp(dir1, dir2)
x.report() -> prints a report on the differences between dir1 and dir2
or
x.report_partial_closure() -> prints report on differences between dir1
and dir2, and reports on common immediate subdirectories.
x.report_full_closure() -> like report_partial_closure,
but fully recursive.
Attributes:
left_list, right_list: The files in dir1 and dir2,
filtered by hide and ignore.
common: a list of names in both dir1 and dir2.
left_only, right_only: names only in dir1, dir2.
common_dirs: subdirectories in both dir1 and dir2.
common_files: files in both dir1 and dir2.
common_funny: names in both dir1 and dir2 where the type differs between
dir1 and dir2, or the name is not stat-able.
same_files: list of identical files.
diff_files: list of filenames which differ.
funny_files: list of files which could not be compared.
subdirs: a dictionary of dircmp objects, keyed by names in common_dirs.
"""
def __init__(self, a, b, ignore=None, hide=None): # Initialize
self.left = a
self.right = b
if hide is None:
self.hide = [os.curdir, os.pardir] # Names never to be shown
else:
self.hide = hide
if ignore is None:
self.ignore = ['RCS', 'CVS', 'tags'] # Names ignored in comparison
else:
self.ignore = ignore
def phase0(self): # Compare everything except common subdirectories
self.left_list = _filter(os.listdir(self.left),
self.hide+self.ignore)
self.right_list = _filter(os.listdir(self.right),
self.hide+self.ignore)
self.left_list.sort()
self.right_list.sort()
def phase1(self): # Compute common names
a = dict(izip(imap(os.path.normcase, self.left_list), self.left_list))
b = dict(izip(imap(os.path.normcase, self.right_list), self.right_list))
self.common = map(a.__getitem__, ifilter(b.__contains__, a))
self.left_only = map(a.__getitem__, ifilterfalse(b.__contains__, a))
self.right_only = map(b.__getitem__, ifilterfalse(a.__contains__, b))
def phase2(self): # Distinguish files, directories, funnies
self.common_dirs = []
self.common_files = []
self.common_funny = []
for x in self.common:
a_path = os.path.join(self.left, x)
b_path = os.path.join(self.right, x)
ok = 1
try:
a_stat = os.stat(a_path)
except os.error, why:
# print 'Can\'t stat', a_path, ':', why[1]
ok = 0
try:
b_stat = os.stat(b_path)
except os.error, why:
# print 'Can\'t stat', b_path, ':', why[1]
ok = 0
if ok:
a_type = stat.S_IFMT(a_stat.st_mode)
b_type = stat.S_IFMT(b_stat.st_mode)
if a_type != b_type:
self.common_funny.append(x)
elif stat.S_ISDIR(a_type):
self.common_dirs.append(x)
elif stat.S_ISREG(a_type):
self.common_files.append(x)
else:
self.common_funny.append(x)
else:
self.common_funny.append(x)
def phase3(self): # Find out differences between common files
xx = cmpfiles(self.left, self.right, self.common_files)
self.same_files, self.diff_files, self.funny_files = xx
def phase4(self): # Find out differences between common subdirectories
# A new dircmp object is created for each common subdirectory,
# these are stored in a dictionary indexed by filename.
# The hide and ignore properties are inherited from the parent
self.subdirs = {}
for x in self.common_dirs:
a_x = os.path.join(self.left, x)
b_x = os.path.join(self.right, x)
self.subdirs[x] = dircmp(a_x, b_x, self.ignore, self.hide)
def phase4_closure(self): # Recursively call phase4() on subdirectories
self.phase4()
for sd in self.subdirs.itervalues():
sd.phase4_closure()
def report(self): # Print a report on the differences between a and b
# Output format is purposely lousy
print 'diff', self.left, self.right
if self.left_only:
self.left_only.sort()
print 'Only in', self.left, ':', self.left_only
if self.right_only:
self.right_only.sort()
print 'Only in', self.right, ':', self.right_only
if self.same_files:
self.same_files.sort()
print 'Identical files :', self.same_files
if self.diff_files:
self.diff_files.sort()
print 'Differing files :', self.diff_files
if self.funny_files:
self.funny_files.sort()
print 'Trouble with common files :', self.funny_files
if self.common_dirs:
self.common_dirs.sort()
print 'Common subdirectories :', self.common_dirs
if self.common_funny:
self.common_funny.sort()
print 'Common funny cases :', self.common_funny
def report_partial_closure(self): # Print reports on self and on subdirs
self.report()
for sd in self.subdirs.itervalues():
print
sd.report()
def report_full_closure(self): # Report on self and subdirs recursively
self.report()
for sd in self.subdirs.itervalues():
print
sd.report_full_closure()
methodmap = dict(subdirs=phase4,
same_files=phase3, diff_files=phase3, funny_files=phase3,
common_dirs = phase2, common_files=phase2, common_funny=phase2,
common=phase1, left_only=phase1, right_only=phase1,
left_list=phase0, right_list=phase0)
def __getattr__(self, attr):
if attr not in self.methodmap:
raise AttributeError, attr
self.methodmap[attr](self)
return getattr(self, attr)
def cmpfiles(a, b, common, shallow=1):
"""Compare common files in two directories.
a, b -- directory names
common -- list of file names found in both directories
shallow -- if true, do comparison based solely on stat() information
Returns a tuple of three lists:
files that compare equal
files that are different
filenames that aren't regular files.
"""
res = ([], [], [])
for x in common:
ax = os.path.join(a, x)
bx = os.path.join(b, x)
res[_cmp(ax, bx, shallow)].append(x)
return res
# Compare two files.
# Return:
# 0 for equal
# 1 for different
# 2 for funny cases (can't stat, etc.)
#
def _cmp(a, b, sh, abs=abs, cmp=cmp):
try:
return not abs(cmp(a, b, sh))
except os.error:
return 2
# Return a copy with items that occur in skip removed.
#
def _filter(flist, skip):
return list(ifilterfalse(skip.__contains__, flist))
# Demonstration and testing.
#
def demo():
import sys
import getopt
options, args = getopt.getopt(sys.argv[1:], 'r')
if len(args) != 2:
raise getopt.GetoptError('need exactly two args', None)
dd = dircmp(args[0], args[1])
if ('-r', '') in options:
dd.report_full_closure()
else:
dd.report()
if __name__ == '__main__':
demo()
| apache-2.0 |
kirca/OpenUpgrade | addons/purchase/stock.py | 5 | 14452 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools.translate import _
class stock_move(osv.osv):
_inherit = 'stock.move'
_columns = {
'purchase_line_id': fields.many2one('purchase.order.line',
'Purchase Order Line', ondelete='set null', select=True,
readonly=True),
}
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
res = super(stock_move, self).write(cr, uid, ids, vals, context=context)
from openerp import workflow
if vals.get('state') in ['done', 'cancel']:
for move in self.browse(cr, uid, ids, context=context):
if move.purchase_line_id and move.purchase_line_id.order_id:
order_id = move.purchase_line_id.order_id.id
# update linked purchase order as superuser as the warehouse
# user may not have rights to access purchase.order
if self.pool.get('purchase.order').test_moves_done(cr, uid, [order_id], context=context):
workflow.trg_validate(SUPERUSER_ID, 'purchase.order', order_id, 'picking_done', cr)
if self.pool.get('purchase.order').test_moves_except(cr, uid, [order_id], context=context):
workflow.trg_validate(SUPERUSER_ID, 'purchase.order', order_id, 'picking_cancel', cr)
return res
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
context = context or {}
if not default.get('split_from'):
#we don't want to propagate the link to the purchase order line except in case of move split
default['purchase_line_id'] = False
return super(stock_move, self).copy(cr, uid, id, default, context)
def _create_invoice_line_from_vals(self, cr, uid, move, invoice_line_vals, context=None):
if move.purchase_line_id:
invoice_line_vals['purchase_line_id'] = move.purchase_line_id.id
invoice_line_vals['account_analytic_id'] = move.purchase_line_id.account_analytic_id.id or False
invoice_line_id = super(stock_move, self)._create_invoice_line_from_vals(cr, uid, move, invoice_line_vals, context=context)
if move.purchase_line_id:
purchase_line = move.purchase_line_id
self.pool.get('purchase.order.line').write(cr, uid, [purchase_line.id], {
'invoice_lines': [(4, invoice_line_id)]
}, context=context)
self.pool.get('purchase.order').write(cr, uid, [purchase_line.order_id.id], {
'invoice_ids': [(4, invoice_line_vals['invoice_id'])],
})
return invoice_line_id
def _get_master_data(self, cr, uid, move, company, context=None):
if move.purchase_line_id:
purchase_order = move.purchase_line_id.order_id
return purchase_order.partner_id, purchase_order.create_uid.id, purchase_order.currency_id.id
else:
partner = move.picking_id and move.picking_id.partner_id or False
code = self.get_code_from_locs(cr, uid, move, context=context)
if partner and partner.property_product_pricelist_purchase and code == 'incoming':
currency = partner.property_product_pricelist_purchase.currency_id.id
return partner, uid, currency
return super(stock_move, self)._get_master_data(cr, uid, move, company, context=context)
def _get_invoice_line_vals(self, cr, uid, move, partner, inv_type, context=None):
res = super(stock_move, self)._get_invoice_line_vals(cr, uid, move, partner, inv_type, context=context)
if move.purchase_line_id:
purchase_line = move.purchase_line_id
res['invoice_line_tax_id'] = [(6, 0, [x.id for x in purchase_line.taxes_id])]
res['price_unit'] = purchase_line.price_unit
return res
def attribute_price(self, cr, uid, move, context=None):
"""
Attribute price to move, important in inter-company moves or receipts with only one partner
"""
code = self.get_code_from_locs(cr, uid, move, context=context)
if not move.purchase_line_id and code == 'incoming' and not move.price_unit:
partner = move.picking_id and move.picking_id.partner_id or False
price = False
# If partner given, search price in its purchase pricelist
if partner and partner.property_product_pricelist_purchase:
pricelist_obj = self.pool.get("product.pricelist")
pricelist = partner.property_product_pricelist_purchase.id
price = pricelist_obj.price_get(cr, uid, [pricelist],
move.product_id.id, move.product_uom_qty, partner, {
'uom': move.product_uom.id,
'date': move.date,
})[pricelist]
if price:
return self.write(cr, uid, [move.id], {'price_unit': price}, context=context)
super(stock_move, self).attribute_price(cr, uid, move, context=context)
class stock_picking(osv.osv):
_inherit = 'stock.picking'
def _get_to_invoice(self, cr, uid, ids, name, args, context=None):
res = {}
for picking in self.browse(cr, uid, ids, context=context):
res[picking.id] = False
for move in picking.move_lines:
if move.purchase_line_id and move.purchase_line_id.order_id.invoice_method == 'picking':
if not move.move_orig_ids:
res[picking.id] = True
return res
def _get_picking_to_recompute(self, cr, uid, ids, context=None):
picking_ids = set()
for move in self.pool.get('stock.move').browse(cr, uid, ids, context=context):
if move.picking_id and move.purchase_line_id:
picking_ids.add(move.picking_id.id)
return list(picking_ids)
_columns = {
'reception_to_invoice': fields.function(_get_to_invoice, type='boolean', string='Invoiceable on incoming shipment?',
help='Does the picking contains some moves related to a purchase order invoiceable on the receipt?',
store={
'stock.move': (_get_picking_to_recompute, ['purchase_line_id', 'picking_id'], 10),
}),
}
def _create_invoice_from_picking(self, cr, uid, picking, vals, context=None):
purchase_obj = self.pool.get("purchase.order")
purchase_line_obj = self.pool.get('purchase.order.line')
invoice_line_obj = self.pool.get('account.invoice.line')
invoice_id = super(stock_picking, self)._create_invoice_from_picking(cr, uid, picking, vals, context=context)
if picking.move_lines and picking.move_lines[0].purchase_line_id:
purchase_id = picking.move_lines[0].purchase_line_id.order_id.id
purchase_line_ids = purchase_line_obj.search(cr, uid, [('order_id', '=', purchase_id), ('product_id.type', '=', 'service'), ('invoiced', '=', False)], context=context)
if purchase_line_ids:
inv_lines = []
for po_line in purchase_line_obj.browse(cr, uid, purchase_line_ids, context=context):
acc_id = purchase_obj._choose_account_from_po_line(cr, uid, po_line, context=context)
inv_line_data = purchase_obj._prepare_inv_line(cr, uid, acc_id, po_line, context=context)
inv_line_id = invoice_line_obj.create(cr, uid, inv_line_data, context=context)
inv_lines.append(inv_line_id)
po_line.write({'invoice_lines': [(4, inv_line_id)]})
invoice_line_obj.write(cr, uid, inv_lines, {'invoice_id': invoice_id}, context=context)
return invoice_id
def _get_invoice_vals(self, cr, uid, key, inv_type, journal_id, move, context=None):
inv_vals = super(stock_picking, self)._get_invoice_vals(cr, uid, key, inv_type, journal_id, move, context=context)
if move.purchase_line_id and move.purchase_line_id.order_id:
purchase = move.purchase_line_id.order_id
inv_vals.update({
'fiscal_position': purchase.fiscal_position.id,
'payment_term': purchase.payment_term_id.id,
})
return inv_vals
class stock_warehouse(osv.osv):
_inherit = 'stock.warehouse'
_columns = {
'buy_to_resupply': fields.boolean('Purchase to resupply this warehouse',
help="When products are bought, they can be delivered to this warehouse"),
'buy_pull_id': fields.many2one('procurement.rule', 'BUY rule'),
}
_defaults = {
'buy_to_resupply': True,
}
def _get_buy_pull_rule(self, cr, uid, warehouse, context=None):
route_obj = self.pool.get('stock.location.route')
data_obj = self.pool.get('ir.model.data')
try:
buy_route_id = data_obj.get_object_reference(cr, uid, 'purchase', 'route_warehouse0_buy')[1]
except:
buy_route_id = route_obj.search(cr, uid, [('name', 'like', _('Buy'))], context=context)
buy_route_id = buy_route_id and buy_route_id[0] or False
if not buy_route_id:
raise osv.except_osv(_('Error!'), _('Can\'t find any generic Buy route.'))
return {
'name': self._format_routename(cr, uid, warehouse, _(' Buy'), context=context),
'location_id': warehouse.in_type_id.default_location_dest_id.id,
'route_id': buy_route_id,
'action': 'buy',
'picking_type_id': warehouse.in_type_id.id,
'warehouse_id': warehouse.id,
}
def create_routes(self, cr, uid, ids, warehouse, context=None):
pull_obj = self.pool.get('procurement.rule')
res = super(stock_warehouse, self).create_routes(cr, uid, ids, warehouse, context=context)
if warehouse.buy_to_resupply:
buy_pull_vals = self._get_buy_pull_rule(cr, uid, warehouse, context=context)
buy_pull_id = pull_obj.create(cr, uid, buy_pull_vals, context=context)
res['buy_pull_id'] = buy_pull_id
return res
def write(self, cr, uid, ids, vals, context=None):
pull_obj = self.pool.get('procurement.rule')
if isinstance(ids, (int, long)):
ids = [ids]
if 'buy_to_resupply' in vals:
if vals.get("buy_to_resupply"):
for warehouse in self.browse(cr, uid, ids, context=context):
if not warehouse.buy_pull_id:
buy_pull_vals = self._get_buy_pull_rule(cr, uid, warehouse, context=context)
buy_pull_id = pull_obj.create(cr, uid, buy_pull_vals, context=context)
vals['buy_pull_id'] = buy_pull_id
else:
for warehouse in self.browse(cr, uid, ids, context=context):
if warehouse.buy_pull_id:
buy_pull_id = pull_obj.unlink(cr, uid, warehouse.buy_pull_id.id, context=context)
return super(stock_warehouse, self).write(cr, uid, ids, vals, context=None)
def get_all_routes_for_wh(self, cr, uid, warehouse, context=None):
all_routes = super(stock_warehouse, self).get_all_routes_for_wh(cr, uid, warehouse, context=context)
if warehouse.buy_to_resupply and warehouse.buy_pull_id and warehouse.buy_pull_id.route_id:
all_routes += [warehouse.buy_pull_id.route_id.id]
return all_routes
def _get_all_products_to_resupply(self, cr, uid, warehouse, context=None):
res = super(stock_warehouse, self)._get_all_products_to_resupply(cr, uid, warehouse, context=context)
if warehouse.buy_pull_id and warehouse.buy_pull_id.route_id:
for product_id in res:
for route in self.pool.get('product.product').browse(cr, uid, product_id, context=context).route_ids:
if route.id == warehouse.buy_pull_id.route_id.id:
res.remove(product_id)
break
return res
def _handle_renaming(self, cr, uid, warehouse, name, code, context=None):
res = super(stock_warehouse, self)._handle_renaming(cr, uid, warehouse, name, code, context=context)
pull_obj = self.pool.get('procurement.rule')
#change the buy pull rule name
if warehouse.buy_pull_id:
pull_obj.write(cr, uid, warehouse.buy_pull_id.id, {'name': warehouse.buy_pull_id.name.replace(warehouse.name, name, 1)}, context=context)
return res
def change_route(self, cr, uid, ids, warehouse, new_reception_step=False, new_delivery_step=False, context=None):
res = super(stock_warehouse, self).change_route(cr, uid, ids, warehouse, new_reception_step=new_reception_step, new_delivery_step=new_delivery_step, context=context)
if warehouse.in_type_id.default_location_dest_id != warehouse.buy_pull_id.location_id:
self.pool.get('procurement.rule').write(cr, uid, warehouse.buy_pull_id.id, {'location_id': warehouse.in_type_id.default_location_dest_id.id}, context=context)
return res
| agpl-3.0 |
CopeX/odoo | addons/l10n_sg/__openerp__.py | 331 | 2380 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014 Tech Receptives (<http://techreceptives.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Singapore - Accounting',
'version': '1.0',
'author': 'Tech Receptives',
'website': 'http://www.techreceptives.com',
'category': 'Localization/Account Charts',
'description': """
Singapore accounting chart and localization.
=======================================================
After installing this module, the Configuration wizard for accounting is launched.
* The Chart of Accounts consists of the list of all the general ledger accounts
required to maintain the transactions of Singapore.
* On that particular wizard, you will be asked to pass the name of the company,
the chart template to follow, the no. of digits to generate, the code for your
account and bank account, currency to create journals.
* The Chart of Taxes would display the different types/groups of taxes such as
Standard Rates, Zeroed, Exempted, MES and Out of Scope.
* The tax codes are specified considering the Tax Group and for easy accessibility of
submission of GST Tax Report.
""",
'depends': ['base', 'account', 'account_chart'],
'demo': [ ],
'data': [
'l10n_sg_chart_tax_code.xml',
'l10n_sg_chart.xml',
'l10n_sg_chart_tax.xml',
'l10n_sg_wizard.xml',
],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
FrankBian/kuma | vendor/packages/translate-toolkit/translate/storage/php.py | 6 | 9390 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2004-2008 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Classes that hold units of PHP localisation files L{phpunit} or entire files
L{phpfile}. These files are used in translating many PHP based applications.
Only PHP files written with these conventions are supported::
$lang['item'] = "vale"; # Array of values
$some_entity = "value"; # Named variables
The parser does not support other array conventions such as::
$lang = array(
'item1' => 'value1',
'item2' => 'value2',
);
The working of PHP strings and specifically the escaping conventions which
differ between single quote (') and double quote (") characters are outlined
in the PHP documentation for the U{String type<http://www.php.net/language.types.string>}
"""
from translate.storage import base
import re
def phpencode(text, quotechar="'"):
"""convert Python string to PHP escaping
The encoding is implemented for
U{'single quote'<http://www.php.net/manual/en/language.types.string.php#language.types.string.syntax.single>}
and U{"double quote"<http://www.php.net/manual/en/language.types.string.php#language.types.string.syntax.double>}
syntax.
heredoc and nowdoc are not implemented and it is not certain whether this would
ever be needed for PHP localisation needs.
"""
if not text:
return text
if quotechar == '"':
# \n may be converted to \\n but we don't. This allows us to preserve pretty layout that might have appeared in muliline entries
# we might lose some "blah\nblah" layouts but that's probably not the most frequent use case. See bug 588
escapes = (("\\", "\\\\"), ("\r", "\\r"), ("\t", "\\t"), ("\v", "\\v"), ("\f", "\\f"), ("\\\\$", "\\$"), ('"', '\\"'), ("\\\\", "\\"))
for a, b in escapes:
text = text.replace(a, b)
return text
else:
return text.replace("%s" % quotechar, "\\%s" % quotechar)
def phpdecode(text, quotechar="'"):
"""convert PHP escaped string to a Python string"""
def decode_octal_hex(match):
"""decode Octal \NNN and Hex values"""
if match.groupdict().has_key("octal"):
return match.groupdict()['octal'].decode("string_escape")
elif match.groupdict().has_key("hex"):
return match.groupdict()['hex'].decode("string_escape")
else:
return match.group
if not text:
return text
if quotechar == '"':
# We do not escape \$ as it is used by variables and we can't roundtrip that item.
text = text.replace('\\"', '"').replace("\\\\", "\\")
text = text.replace("\\n", "\n").replace("\\r", "\r").replace("\\t", "\t").replace("\\v", "\v").replace("\\f", "\f")
text = re.sub(r"(?P<octal>\\[0-7]{1,3})", decode_octal_hex, text)
text = re.sub(r"(?P<hex>\\x[0-9A-Fa-f]{1,2})", decode_octal_hex, text)
else:
text = text.replace("\\'", "'").replace("\\\\", "\\")
return text
class phpunit(base.TranslationUnit):
"""a unit of a PHP file i.e. a name and value, and any comments
associated"""
def __init__(self, source=""):
"""construct a blank phpunit"""
self.escape_type = None
super(phpunit, self).__init__(source)
self.name = ""
self.value = ""
self.translation = ""
self._comments = []
self.source = source
def setsource(self, source):
"""Sets the source AND the target to be equal"""
self.value = phpencode(source, self.escape_type)
def getsource(self):
return phpdecode(self.value, self.escape_type)
source = property(getsource, setsource)
def settarget(self, target):
self.translation = phpencode(target, self.escape_type)
def gettarget(self):
return phpdecode(self.translation, self.escape_type)
target = property(gettarget, settarget)
def __str__(self):
"""convert to a string. double check that unicode is handled somehow here"""
source = self.getoutput()
if isinstance(source, unicode):
return source.encode(getattr(self, "encoding", "UTF-8"))
return source
def getoutput(self):
"""convert the unit back into formatted lines for a php file"""
return "".join(self._comments + ["%s='%s';\n" % (self.name, self.translation or self.value)])
def addlocation(self, location):
self.name = location
def getlocations(self):
return [self.name]
def addnote(self, text, origin=None, position="append"):
if origin in ['programmer', 'developer', 'source code', None]:
if position == "append":
self._comments.append(text)
else:
self._comments = [text]
else:
return super(phpunit, self).addnote(text, origin=origin, position=position)
def getnotes(self, origin=None):
if origin in ['programmer', 'developer', 'source code', None]:
return '\n'.join(self._comments)
else:
return super(phpunit, self).getnotes(origin)
def removenotes(self):
self._comments = []
def isblank(self):
"""Returns whether this is a blank element, containing only comments."""
return not (self.name or self.value)
def getid(self):
return self.name
class phpfile(base.TranslationStore):
"""This class represents a PHP file, made up of phpunits"""
UnitClass = phpunit
def __init__(self, inputfile=None, encoding='utf-8'):
"""construct a phpfile, optionally reading in from inputfile"""
super(phpfile, self).__init__(unitclass = self.UnitClass)
self.filename = getattr(inputfile, 'name', '')
self._encoding = encoding
if inputfile is not None:
phpsrc = inputfile.read()
inputfile.close()
self.parse(phpsrc)
def parse(self, phpsrc):
"""Read the source of a PHP file in and include them as units"""
newunit = phpunit()
lastvalue = ""
value = ""
comment = []
invalue = False
incomment = False
valuequote = "" # either ' or "
for line in phpsrc.decode(self._encoding).split("\n"):
commentstartpos = line.find("/*")
commentendpos = line.rfind("*/")
if commentstartpos != -1:
incomment = True
if commentendpos != -1:
newunit.addnote(line[commentstartpos:commentendpos].strip(), "developer")
incomment = False
else:
newunit.addnote(line[commentstartpos:].strip(), "developer")
if commentendpos != -1 and incomment:
newunit.addnote(line[:commentendpos+2].strip(), "developer")
incomment = False
if incomment and commentstartpos == -1:
newunit.addnote(line.strip(), "developer")
continue
equalpos = line.find("=")
hashpos = line.find("#")
if 0 <= hashpos < equalpos:
# Assume that this is a '#' comment line
newunit.addnote(line.strip(), "developer")
continue
if equalpos != -1 and not invalue:
newunit.addlocation(line[:equalpos].strip().replace(" ", ""))
value = line[equalpos+1:].lstrip()[1:]
valuequote = line[equalpos+1:].lstrip()[0]
lastvalue = ""
invalue = True
else:
if invalue:
value = line
colonpos = value.rfind(";")
while colonpos != -1:
if value[colonpos-1] == valuequote:
newunit.value = lastvalue + value[:colonpos-1]
newunit.escape_type = valuequote
lastvalue = ""
invalue = False
if not invalue and colonpos != len(value)-1:
commentinlinepos = value.find("//", colonpos)
if commentinlinepos != -1:
newunit.addnote(value[commentinlinepos+2:].strip(), "developer")
if not invalue:
self.addunit(newunit)
value = ""
newunit = phpunit()
colonpos = value.rfind(";", 0, colonpos)
if invalue:
lastvalue = lastvalue + value + "\n"
def __str__(self):
"""Convert the units back to lines."""
lines = []
for unit in self.units:
lines.append(str(unit))
return "".join(lines)
| mpl-2.0 |
kamitchell/py2app | examples/wxPython/2.4/doodle/superdoodle.py | 3 | 12982 | # superdoodle.py
"""
This module implements the SuperDoodle demo application. It takes the
DoodleWindow previously presented and reuses it in a much more
intelligent Frame. This one has a menu and a statusbar, is able to
save and reload doodles, clear the workspace, and has a simple control
panel for setting color and line thickness in addition to the popup
menu that DoodleWindow provides. There is also a nice About dialog
implmented using an wx.html.HtmlWindow.
"""
import sys
try:
import wx # This module uses the new wx namespace
import wx.html
except:
import pdb
import traceback
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[2])
from wx.lib import buttons # for generic button classes
from doodle import DoodleWindow
import os, cPickle
#----------------------------------------------------------------------
wx.RegisterId(5000) # Give a high starting value for the IDs, just for kicks
idNEW = wx.NewId()
idOPEN = wx.NewId()
idSAVE = wx.NewId()
idSAVEAS = wx.NewId()
idCLEAR = wx.NewId()
idEXIT = wx.NewId()
idABOUT = wx.NewId()
class DoodleFrame(wx.Frame):
"""
A DoodleFrame contains a DoodleWindow and a ControlPanel and manages
their layout with a wx.BoxSizer. A menu and associated event handlers
provides for saving a doodle to a file, etc.
"""
title = "Do a doodle"
def __init__(self, parent):
wx.Frame.__init__(self, parent, -1, self.title, size=(800,600),
style=wx.DEFAULT_FRAME_STYLE | wx.NO_FULL_REPAINT_ON_RESIZE)
self.CreateStatusBar()
self.MakeMenu()
self.filename = None
self.doodle = DoodleWindow(self, -1)
cPanel = ControlPanel(self, -1, self.doodle)
# Create a sizer to layout the two windows side-by-side.
# Both will grow vertically, the doodle window will grow
# horizontally as well.
box = wx.BoxSizer(wx.HORIZONTAL)
box.Add(cPanel, 0, wx.EXPAND)
box.Add(self.doodle, 1, wx.EXPAND)
# Tell the frame that it should layout itself in response to
# size events.
self.SetAutoLayout(True)
self.SetSizer(box)
def SaveFile(self):
if self.filename:
data = self.doodle.GetLinesData()
f = open(self.filename, 'w')
cPickle.dump(data, f)
f.close()
def ReadFile(self):
if self.filename:
try:
f = open(self.filename, 'r')
data = cPickle.load(f)
f.close()
self.doodle.SetLinesData(data)
except cPickle.UnpicklingError:
wx.MessageBox("%s is not a doodle file." % self.filename,
"oops!", style=wx.OK|wx.ICON_EXCLAMATION)
def MakeMenu(self):
# create the file menu
menu1 = wx.Menu()
# Using the "\tKeyName" syntax automatically creates a
# wx.AcceleratorTable for this frame and binds the keys to
# the menu items.
menu1.Append(idOPEN, "&Open\tCtrl-O", "Open a doodle file")
menu1.Append(idSAVE, "&Save\tCtrl-S", "Save the doodle")
menu1.Append(idSAVEAS, "Save &As", "Save the doodle in a new file")
menu1.AppendSeparator()
menu1.Append(idCLEAR, "&Clear", "Clear the current doodle")
menu1.AppendSeparator()
menu1.Append(idEXIT, "E&xit", "Terminate the application")
# and the help menu
menu2 = wx.Menu()
menu2.Append(idABOUT, "&About\tCtrl-H", "Display the gratuitous 'about this app' thingamajig")
# and add them to a menubar
menuBar = wx.MenuBar()
menuBar.Append(menu1, "&File")
menuBar.Append(menu2, "&Help")
self.SetMenuBar(menuBar)
wx.EVT_MENU(self, idOPEN, self.OnMenuOpen)
wx.EVT_MENU(self, idSAVE, self.OnMenuSave)
wx.EVT_MENU(self, idSAVEAS, self.OnMenuSaveAs)
wx.EVT_MENU(self, idCLEAR, self.OnMenuClear)
wx.EVT_MENU(self, idEXIT, self.OnMenuExit)
wx.EVT_MENU(self, idABOUT, self.OnMenuAbout)
wildcard = "Doodle files (*.ddl)|*.ddl|All files (*.*)|*.*"
def OnMenuOpen(self, event):
dlg = wx.FileDialog(self, "Open doodle file...", os.getcwd(),
style=wx.OPEN, wildcard = self.wildcard)
if dlg.ShowModal() == wx.ID_OK:
self.filename = dlg.GetPath()
self.ReadFile()
self.SetTitle(self.title + ' -- ' + self.filename)
dlg.Destroy()
def OnMenuSave(self, event):
if not self.filename:
self.OnMenuSaveAs(event)
else:
self.SaveFile()
def OnMenuSaveAs(self, event):
dlg = wx.FileDialog(self, "Save doodle as...", os.getcwd(),
style=wx.SAVE | wx.OVERWRITE_PROMPT,
wildcard = self.wildcard)
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
if not os.path.splitext(filename)[1]:
filename = filename + '.ddl'
self.filename = filename
self.SaveFile()
self.SetTitle(self.title + ' -- ' + self.filename)
dlg.Destroy()
def OnMenuClear(self, event):
self.doodle.SetLinesData([])
self.SetTitle(self.title)
def OnMenuExit(self, event):
self.Close()
def OnMenuAbout(self, event):
dlg = DoodleAbout(self)
dlg.ShowModal()
dlg.Destroy()
#----------------------------------------------------------------------
class ControlPanel(wx.Panel):
"""
This class implements a very simple control panel for the DoodleWindow.
It creates buttons for each of the colours and thickneses supported by
the DoodleWindow, and event handlers to set the selected values. There is
also a little window that shows an example doodleLine in the selected
values. Nested sizers are used for layout.
"""
BMP_SIZE = 16
BMP_BORDER = 3
def __init__(self, parent, ID, doodle):
wx.Panel.__init__(self, parent, ID, style=wx.RAISED_BORDER)
numCols = 4
spacing = 4
btnSize = wx.Size(self.BMP_SIZE + 2*self.BMP_BORDER,
self.BMP_SIZE + 2*self.BMP_BORDER)
# Make a grid of buttons for each colour. Attach each button
# event to self.OnSetColour. The button ID is the same as the
# key in the colour dictionary.
self.clrBtns = {}
colours = doodle.menuColours
keys = colours.keys()
keys.sort()
cGrid = wx.GridSizer(cols=numCols, hgap=2, vgap=2)
for k in keys:
bmp = self.MakeBitmap(colours[k])
b = buttons.GenBitmapToggleButton(self, k, bmp, size=btnSize )
b.SetBezelWidth(1)
b.SetUseFocusIndicator(False)
wx.EVT_BUTTON(self, k, self.OnSetColour)
cGrid.Add(b, 0)
self.clrBtns[colours[k]] = b
self.clrBtns[colours[keys[0]]].SetToggle(True)
# Make a grid of buttons for the thicknesses. Attach each button
# event to self.OnSetThickness. The button ID is the same as the
# thickness value.
self.thknsBtns = {}
tGrid = wx.GridSizer(cols=numCols, hgap=2, vgap=2)
for x in range(1, doodle.maxThickness+1):
b = buttons.GenToggleButton(self, x, str(x), size=btnSize)
b.SetBezelWidth(1)
b.SetUseFocusIndicator(False)
wx.EVT_BUTTON(self, x, self.OnSetThickness)
tGrid.Add(b, 0)
self.thknsBtns[x] = b
self.thknsBtns[1].SetToggle(True)
# Make a colour indicator window, it is registerd as a listener
# with the doodle window so it will be notified when the settings
# change
ci = ColourIndicator(self)
doodle.AddListener(ci)
doodle.Notify()
self.doodle = doodle
# Make a box sizer and put the two grids and the indicator
# window in it.
box = wx.BoxSizer(wx.VERTICAL)
box.Add(cGrid, 0, wx.ALL, spacing)
box.Add(tGrid, 0, wx.ALL, spacing)
box.Add(ci, 0, wx.EXPAND|wx.ALL, spacing)
self.SetSizer(box)
self.SetAutoLayout(True)
# Resize this window so it is just large enough for the
# minimum requirements of the sizer.
box.Fit(self)
def MakeBitmap(self, colour):
"""
We can create a bitmap of whatever we want by simply selecting
it into a wx.MemoryDC and drawing on it. In this case we just set
a background brush and clear the dc.
"""
bmp = wx.EmptyBitmap(self.BMP_SIZE, self.BMP_SIZE)
dc = wx.MemoryDC()
dc.SelectObject(bmp)
dc.SetBackground(wx.Brush(colour))
dc.Clear()
dc.SelectObject(wx.NullBitmap)
return bmp
def OnSetColour(self, event):
"""
Use the event ID to get the colour, set that colour in the doodle.
"""
colour = self.doodle.menuColours[event.GetId()]
if colour != self.doodle.colour:
# untoggle the old colour button
self.clrBtns[self.doodle.colour].SetToggle(False)
# set the new colour
self.doodle.SetColour(colour)
def OnSetThickness(self, event):
"""
Use the event ID to set the thickness in the doodle.
"""
thickness = event.GetId()
if thickness != self.doodle.thickness:
# untoggle the old thickness button
self.thknsBtns[self.doodle.thickness].SetToggle(False)
# set the new colour
self.doodle.SetThickness(thickness)
#----------------------------------------------------------------------
class ColourIndicator(wx.Window):
"""
An instance of this class is used on the ControlPanel to show
a sample of what the current doodle line will look like.
"""
def __init__(self, parent):
wx.Window.__init__(self, parent, -1, style=wx.SUNKEN_BORDER)
self.SetBackgroundColour(wx.WHITE)
self.SetSize( (-1, 45) )
self.colour = self.thickness = None
wx.EVT_PAINT(self, self.OnPaint)
def Update(self, colour, thickness):
"""
The doodle window calls this method any time the colour
or line thickness changes.
"""
self.colour = colour
self.thickness = thickness
self.Refresh() # generate a paint event
def OnPaint(self, event):
"""
This method is called when all or part of the window needs to be
redrawn.
"""
dc = wx.PaintDC(self)
if self.colour:
sz = self.GetClientSize()
pen = wx.Pen(self.colour, self.thickness)
dc.BeginDrawing()
dc.SetPen(pen)
dc.DrawLine(10, sz.height/2, sz.width-10, sz.height/2)
dc.EndDrawing()
#----------------------------------------------------------------------
class DoodleAbout(wx.Dialog):
""" An about box that uses an HTML window """
text = '''
<html>
<body bgcolor="#ACAA60">
<center><table bgcolor="#455481" width="100%" cellspacing="0"
cellpadding="0" border="1">
<tr>
<td align="center"><h1>SuperDoodle</h1></td>
</tr>
</table>
</center>
<p><b>SuperDoodle</b> is a demonstration program for <b>wxPython</b> that
will hopefully teach you a thing or two. Just follow these simple
instructions: </p>
<p>
<ol>
<li><b>Read</b> the Source...
<li><b>Learn</b>...
<li><b>Do!</b>
</ol>
<p><b>SuperDoodle</b> and <b>wxPython</b> are brought to you by
<b>Robin Dunn</b> and <b>Total Control Software</b>, Copyright
© 1997-2003.</p>
</body>
</html>
'''
def __init__(self, parent):
wx.Dialog.__init__(self, parent, -1, 'About SuperDoodle',
size=(420, 380) )
html = wx.html.HtmlWindow(self, -1)
html.SetPage(self.text)
button = wx.Button(self, wx.ID_OK, "Okay")
# constraints for the html window
lc = wx.LayoutConstraints()
lc.top.SameAs(self, wx.Top, 5)
lc.left.SameAs(self, wx.Left, 5)
lc.bottom.SameAs(button, wx.Top, 5)
lc.right.SameAs(self, wx.Right, 5)
html.SetConstraints(lc)
# constraints for the button
lc = wx.LayoutConstraints()
lc.bottom.SameAs(self, wx.Bottom, 5)
lc.centreX.SameAs(self, wx.CentreX)
lc.width.AsIs()
lc.height.AsIs()
button.SetConstraints(lc)
self.SetAutoLayout(True)
self.Layout()
self.CentreOnParent(wx.BOTH)
#----------------------------------------------------------------------
class DoodleApp(wx.App):
def OnInit(self):
frame = DoodleFrame(None)
frame.Show(True)
self.SetTopWindow(frame)
return True
#----------------------------------------------------------------------
if __name__ == '__main__':
app = DoodleApp(0)
app.MainLoop()
| mit |
xwu/swift | utils/gyb_syntax_support/Traits.py | 39 | 1558 | from Child import Child
class Trait(object):
def __init__(self, trait_name, description=None, children=None):
self.trait_name = trait_name
self.children = children
self.description = description
TRAITS = [
Trait('DeclGroup',
children=[
Child('Attributes', kind='AttributeList', is_optional=True),
Child('Modifiers', kind='ModifierList', is_optional=True),
Child('Members', kind='MemberDeclBlock'),
]),
Trait('Braced',
children=[
Child('LeftBrace', kind='LeftBraceToken'),
Child('RightBrace', kind='RightBraceToken'),
]),
Trait('IdentifiedDecl',
children=[
Child('Identifier', kind='IdentifierToken'),
]),
Trait('WithCodeBlock',
children=[
Child('Body', kind='CodeBlock'),
]),
Trait('Parenthesized',
children=[
Child('LeftParen', kind='LeftParenToken'),
Child('RightParen', kind='RightParenToken'),
]),
Trait('WithTrailingComma',
children=[
Child('TrailingComma', kind='CommaToken', is_optional=True),
]),
Trait('Labeled',
children=[
Child('LabelName', kind='IdentifierToken', is_optional=True),
Child('LabelColon', kind='ColonToken', is_optional=True),
]),
Trait('WithStatements',
children=[
Child('Statements', kind='CodeBlockItemList'),
]),
]
| apache-2.0 |
fabianvaccaro/pygums | pythonLibs/mahotas-1.1.0/mahotas/stretch.py | 2 | 5388 | # -*- coding: utf-8 -*-
# Copyright (C) 2009-2013, Luis Pedro Coelho <luis@luispedro.org>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import division
import numpy as np
__all__ = ['stretch', 'stretch_rgb', 'as_rgb']
def stretch_rgb(img, arg0=None, arg1=None, dtype=np.uint8):
'''Variation of stretch() function that works per-channel on an RGB image
Parameters
----------
img : ndarray
input image. It is *not modified* by this function
min : integer, optional
minimum value for output [default: 0]
max : integer, optional
maximum value for output [default: 255]
dtype : dtype of output,optional
[default: np.uint8]
Returns
-------
img': ndarray
resulting image. ndarray of same shape as `img` and type `dtype`.
See Also
--------
stretch : function
'''
if img.ndim == 2:
return stretch(img, arg0, arg1, dtype)
elif img.ndim == 3:
return np.dstack([stretch(img[:,:,i], arg0, arg1, dtype) for i in range(img.shape[2])])
else:
raise ValueError('mahotas.stretch_rgb: Only works for RGB images')
def stretch(img, arg0=None, arg1=None, dtype=np.uint8):
'''
img' = stretch(img, [dtype=np.uint8])
img' = stretch(img, max, [dtype=np.uint8])
img' = stretch(img, min, max, [dtype=np.uint8])
Contrast stretch the image to the range [0, max] (first form) or
[min, max] (second form).
Parameters
----------
img : ndarray
input image. It is *not modified* by this function
min : integer, optional
minimum value for output [default: 0]
max : integer, optional
maximum value for output [default: 255]
dtype : dtype of output,optional
[default: np.uint8]
Returns
-------
img': ndarray
resulting image. ndarray of same shape as `img` and type `dtype`.
Bugs
----
If max > 255, then it truncates the values if dtype is not specified.
'''
if arg0 is None:
min = 0
max = 255
elif arg1 is None:
min = 0
max = arg0
else:
min = arg0
max = arg1
img = img.astype(np.double)
img -= img.min()
ptp = img.ptp()
if not ptp:
img = np.zeros(img.shape, dtype)
if min:
img += min
return img
img *= float(max - min)/ptp
if min: img += min
return img.astype(dtype)
def as_rgb(r, g, b):
'''
rgb = as_rgb(r, g, b)
Returns an RGB image with ``r`` in the red channel, ``g`` in the green, and
``b`` in the blue. The channels are contrast stretched.
If any of the channels is `None`, that channel is set to zero. The same can
be achieved by passing ``0`` as that channels value. In fact, passing a
number as a channel value will set the whole channel to that value.
Example
-------
This shows a nice looking picture::
z1 = np.linspace(0, np.pi)
X,Y = np.meshgrid(z1, z1)
red = np.sin(X)
green = np.cos(4*Y)
blue = X*Y
plt.imshow(mahotas.as_rgb(red, green, blue))
Notice that the scaling on the ``blue`` channel is so different from the
other channels (from 0..2500 compared with 0..1), but ``as_rgb`` stretches
each channel independently.
Parameters
----------
r,g,b : array-like or int, optional
The channels can be of any type or None.
At least one must be not None and all must have the same shape.
Returns
-------
rgb : ndarray
RGB ndarray
'''
for c in (r,g,b):
if c is not None:
c = np.array(c)
shape = c.shape
if shape != ():
break
else:
raise ValueError('mahotas.as_rgb: Not all arguments can be None')
def s(c):
if c is None:
return np.zeros(shape, np.uint8)
c = np.asanyarray(c)
if c.shape == ():
c = np.tile(c, shape)
return c.astype(np.uint8)
elif c.shape != shape:
sh = lambda c : (c.shape if c is not None else ' . ')
raise ValueError('mahotas.as_rgb: Not all arguments have the same shape. Shapes were : %s' % [sh(r), sh(g), sh(b)])
return stretch(c)
return np.dstack([s(r), s(g), s(b)])
| gpl-2.0 |
havard024/prego | venv/lib/python2.7/site-packages/django/utils/feedgenerator.py | 73 | 15643 | """
Syndication feed generation library -- used for generating RSS, etc.
Sample usage:
>>> from django.utils import feedgenerator
>>> feed = feedgenerator.Rss201rev2Feed(
... title="Poynter E-Media Tidbits",
... link="http://www.poynter.org/column.asp?id=31",
... description="A group Weblog by the sharpest minds in online media/journalism/publishing.",
... language="en",
... )
>>> feed.add_item(
... title="Hello",
... link="http://www.holovaty.com/test/",
... description="Testing."
... )
>>> with open('test.rss', 'w') as fp:
... feed.write(fp, 'utf-8')
For definitions of the different versions of RSS, see:
http://web.archive.org/web/20110718035220/http://diveintomark.org/archives/2004/02/04/incompatible-rss
"""
from __future__ import unicode_literals
import datetime
try:
from urllib.parse import urlparse
except ImportError: # Python 2
from urlparse import urlparse
from django.utils.xmlutils import SimplerXMLGenerator
from django.utils.encoding import force_text, iri_to_uri
from django.utils import datetime_safe
from django.utils import six
from django.utils.six import StringIO
from django.utils.timezone import is_aware
def rfc2822_date(date):
# We can't use strftime() because it produces locale-dependant results, so
# we have to map english month and day names manually
months = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec',)
days = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
# We do this ourselves to be timezone aware, email.Utils is not tz aware.
dow = days[date.weekday()]
month = months[date.month - 1]
time_str = date.strftime('%s, %%d %s %%Y %%H:%%M:%%S ' % (dow, month))
if not six.PY3: # strftime returns a byte string in Python 2
time_str = time_str.decode('utf-8')
if is_aware(date):
offset = date.tzinfo.utcoffset(date)
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + '%+03d%02d' % (hour, minute)
else:
return time_str + '-0000'
def rfc3339_date(date):
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
time_str = date.strftime('%Y-%m-%dT%H:%M:%S')
if not six.PY3: # strftime returns a byte string in Python 2
time_str = time_str.decode('utf-8')
if is_aware(date):
offset = date.tzinfo.utcoffset(date)
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + '%+03d:%02d' % (hour, minute)
else:
return time_str + 'Z'
def get_tag_uri(url, date):
"""
Creates a TagURI.
See http://web.archive.org/web/20110514113830/http://diveintomark.org/archives/2004/05/28/howto-atom-id
"""
bits = urlparse(url)
d = ''
if date is not None:
d = ',%s' % datetime_safe.new_datetime(date).strftime('%Y-%m-%d')
return 'tag:%s%s:%s/%s' % (bits.hostname, d, bits.path, bits.fragment)
class SyndicationFeed(object):
"Base class for all syndication feeds. Subclasses should provide write()"
def __init__(self, title, link, description, language=None, author_email=None,
author_name=None, author_link=None, subtitle=None, categories=None,
feed_url=None, feed_copyright=None, feed_guid=None, ttl=None, **kwargs):
to_unicode = lambda s: force_text(s, strings_only=True)
if categories:
categories = [force_text(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
self.feed = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'language': to_unicode(language),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'subtitle': to_unicode(subtitle),
'categories': categories or (),
'feed_url': iri_to_uri(feed_url),
'feed_copyright': to_unicode(feed_copyright),
'id': feed_guid or link,
'ttl': ttl,
}
self.feed.update(kwargs)
self.items = []
def add_item(self, title, link, description, author_email=None,
author_name=None, author_link=None, pubdate=None, comments=None,
unique_id=None, enclosure=None, categories=(), item_copyright=None,
ttl=None, **kwargs):
"""
Adds an item to the feed. All args are expected to be Python Unicode
objects except pubdate, which is a datetime.datetime object, and
enclosure, which is an instance of the Enclosure class.
"""
to_unicode = lambda s: force_text(s, strings_only=True)
if categories:
categories = [to_unicode(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
item = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'pubdate': pubdate,
'comments': to_unicode(comments),
'unique_id': to_unicode(unique_id),
'enclosure': enclosure,
'categories': categories or (),
'item_copyright': to_unicode(item_copyright),
'ttl': ttl,
}
item.update(kwargs)
self.items.append(item)
def num_items(self):
return len(self.items)
def root_attributes(self):
"""
Return extra attributes to place on the root (i.e. feed/channel) element.
Called from write().
"""
return {}
def add_root_elements(self, handler):
"""
Add elements in the root (i.e. feed/channel) element. Called
from write().
"""
pass
def item_attributes(self, item):
"""
Return extra attributes to place on each item (i.e. item/entry) element.
"""
return {}
def add_item_elements(self, handler, item):
"""
Add elements on each item (i.e. item/entry) element.
"""
pass
def write(self, outfile, encoding):
"""
Outputs the feed in the given encoding to outfile, which is a file-like
object. Subclasses should override this.
"""
raise NotImplementedError
def writeString(self, encoding):
"""
Returns the feed in the given encoding as a string.
"""
s = StringIO()
self.write(s, encoding)
return s.getvalue()
def latest_post_date(self):
"""
Returns the latest item's pubdate. If none of them have a pubdate,
this returns the current date/time.
"""
updates = [i['pubdate'] for i in self.items if i['pubdate'] is not None]
if len(updates) > 0:
updates.sort()
return updates[-1]
else:
return datetime.datetime.now()
class Enclosure(object):
"Represents an RSS enclosure"
def __init__(self, url, length, mime_type):
"All args are expected to be Python Unicode objects"
self.length, self.mime_type = length, mime_type
self.url = iri_to_uri(url)
class RssFeed(SyndicationFeed):
mime_type = 'application/rss+xml; charset=utf-8'
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement("rss", self.rss_attributes())
handler.startElement("channel", self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
self.endChannelElement(handler)
handler.endElement("rss")
def rss_attributes(self):
return {"version": self._version,
"xmlns:atom": "http://www.w3.org/2005/Atom"}
def write_items(self, handler):
for item in self.items:
handler.startElement('item', self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("item")
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", self.feed['link'])
handler.addQuickElement("description", self.feed['description'])
if self.feed['feed_url'] is not None:
handler.addQuickElement("atom:link", None,
{"rel": "self", "href": self.feed['feed_url']})
if self.feed['language'] is not None:
handler.addQuickElement("language", self.feed['language'])
for cat in self.feed['categories']:
handler.addQuickElement("category", cat)
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("copyright", self.feed['feed_copyright'])
handler.addQuickElement("lastBuildDate", rfc2822_date(self.latest_post_date()))
if self.feed['ttl'] is not None:
handler.addQuickElement("ttl", self.feed['ttl'])
def endChannelElement(self, handler):
handler.endElement("channel")
class RssUserland091Feed(RssFeed):
_version = "0.91"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
class Rss201rev2Feed(RssFeed):
# Spec: http://blogs.law.harvard.edu/tech/rss
_version = "2.0"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
# Author information.
if item["author_name"] and item["author_email"]:
handler.addQuickElement("author", "%s (%s)" % \
(item['author_email'], item['author_name']))
elif item["author_email"]:
handler.addQuickElement("author", item["author_email"])
elif item["author_name"]:
handler.addQuickElement("dc:creator", item["author_name"], {"xmlns:dc": "http://purl.org/dc/elements/1.1/"})
if item['pubdate'] is not None:
handler.addQuickElement("pubDate", rfc2822_date(item['pubdate']))
if item['comments'] is not None:
handler.addQuickElement("comments", item['comments'])
if item['unique_id'] is not None:
handler.addQuickElement("guid", item['unique_id'])
if item['ttl'] is not None:
handler.addQuickElement("ttl", item['ttl'])
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement("enclosure", '',
{"url": item['enclosure'].url, "length": item['enclosure'].length,
"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", cat)
class Atom1Feed(SyndicationFeed):
# Spec: http://atompub.org/2005/07/11/draft-ietf-atompub-format-10.html
mime_type = 'application/atom+xml; charset=utf-8'
ns = "http://www.w3.org/2005/Atom"
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement('feed', self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
handler.endElement("feed")
def root_attributes(self):
if self.feed['language'] is not None:
return {"xmlns": self.ns, "xml:lang": self.feed['language']}
else:
return {"xmlns": self.ns}
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", "", {"rel": "alternate", "href": self.feed['link']})
if self.feed['feed_url'] is not None:
handler.addQuickElement("link", "", {"rel": "self", "href": self.feed['feed_url']})
handler.addQuickElement("id", self.feed['id'])
handler.addQuickElement("updated", rfc3339_date(self.latest_post_date()))
if self.feed['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", self.feed['author_name'])
if self.feed['author_email'] is not None:
handler.addQuickElement("email", self.feed['author_email'])
if self.feed['author_link'] is not None:
handler.addQuickElement("uri", self.feed['author_link'])
handler.endElement("author")
if self.feed['subtitle'] is not None:
handler.addQuickElement("subtitle", self.feed['subtitle'])
for cat in self.feed['categories']:
handler.addQuickElement("category", "", {"term": cat})
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("rights", self.feed['feed_copyright'])
def write_items(self, handler):
for item in self.items:
handler.startElement("entry", self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("entry")
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", "", {"href": item['link'], "rel": "alternate"})
if item['pubdate'] is not None:
handler.addQuickElement("updated", rfc3339_date(item['pubdate']))
# Author information.
if item['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", item['author_name'])
if item['author_email'] is not None:
handler.addQuickElement("email", item['author_email'])
if item['author_link'] is not None:
handler.addQuickElement("uri", item['author_link'])
handler.endElement("author")
# Unique ID.
if item['unique_id'] is not None:
unique_id = item['unique_id']
else:
unique_id = get_tag_uri(item['link'], item['pubdate'])
handler.addQuickElement("id", unique_id)
# Summary.
if item['description'] is not None:
handler.addQuickElement("summary", item['description'], {"type": "html"})
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement("link", '',
{"rel": "enclosure",
"href": item['enclosure'].url,
"length": item['enclosure'].length,
"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", "", {"term": cat})
# Rights.
if item['item_copyright'] is not None:
handler.addQuickElement("rights", item['item_copyright'])
# This isolates the decision of what the system default is, so calling code can
# do "feedgenerator.DefaultFeed" instead of "feedgenerator.Rss201rev2Feed".
DefaultFeed = Rss201rev2Feed
| mit |
elfnor/sverchok | node_scripts/templates/zeffii/spline_utils.py | 4 | 1467 | import mathutils
from mathutils import Vector
import math
# this file would be placed directly into the sverchokmaster directory when used.
def get_length(verts):
summed = 0
lengths = []
lengths_add = lengths.append
for idx in range(len(verts)-1):
segment = (verts[idx]-verts[idx+1]).length
summed += segment
lengths_add(segment)
return summed, lengths
def get_verts_n_edges(verts, lengths, seg_width):
K = seg_width
eps = 0.00001
new_points = []
add_point = new_points.append
def consume(K, A, idx, v1):
if idx > len(lengths)-2:
return
R = K - A
# close enough to start fresh segment
if (-eps <= R <= eps):
K = seg_width
idx += 1
add_point(verts[idx])
A = lengths[idx]
consume(K, A, idx, None)
# must divide segment, same idx
elif (R < -eps):
# rate = R / A
rate = K / A
if not v1:
v1 = verts[idx]
v2 = verts[idx+1]
vmid = v1.lerp(v2, rate)
add_point(vmid)
A = (vmid-v2).length
consume(seg_width, A, idx, v1)
# consume segment, update k, update idx
elif (R > eps):
A = lengths[idx+1]
consume(R, A, idx+1, None)
add_point(verts[0])
consume(K, lengths[0], 0, None)
add_point(verts[-1])
return new_points
| gpl-3.0 |
eicher31/compassion-switzerland | partner_communication_switzerland/wizards/end_contract_wizard.py | 3 | 1099 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2017 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo import models, fields, api
class EndContractWizard(models.TransientModel):
_inherit = 'end.contract.wizard'
generate_communication = fields.Boolean(
'Create depart communication')
@api.multi
def end_contract(self):
self.ensure_one()
if self.generate_communication:
exit_config = self.env.ref(
'partner_communication_switzerland.'
'lifecycle_child_unplanned_exit')
self.contract_id.with_context(
default_object_ids=self.contract_id.id,
default_auto_send=False).send_communication(exit_config)
return super(EndContractWizard, self).end_contract()
| agpl-3.0 |
fxb22/BioGUI | Utils/GetMotifs.py | 1 | 5020 | import CirclePlotClass as cpc
class GetMotifs():
def DefineColLists(self):
self.colList = []
r = 0
while r < self.total_length:
self.colList.append(-1)
r += 1
self.sse = self.cpss.GetSSE()
self.secLinks = self.cpss.GetSecLinks()
for s in self.sse:
j = s[0]
while j < s[1]:
self.colList[j] = s[2] + 1
j += 1
for s in self.secLinks:
if s[2] > 0:
j = s[0]
while j < s[1]:
self.colList[int(j)] = 2
j += 1
def CheckSse(self):
shets = []
for s in self.sse:
f = s[0]
n = s[1]
c = s[2]
for e in self.sse:
if f < e[0]:
if n >= e[0]:
if n < e[1]:
n = e[1]
if f <= e[1]:
if n <= e[1]:
if f > e[0]:
f = e[0]
if not [f, n, c] in shets:
shets.append([f, n, c])
for s in shets:
go = True
for e in shets:
if s[0] > e[0] and s[0] < e[1]:
go = False
if s[1] > e[0] and s[1] < e[1]:
go = False
if go:
self.sheets.append(s)
def CheckSecLinks(self):
for s in self.secLinks:
f = -1
n = -1
for i,e in enumerate(self.sheets):
if s[0] >= e[0] and s[0] < e[1]:
f = i
if s[1] > e[0] and s[1] <= e[1]:
n = i
if f >= 0 and n >= 0:
t = -1
if self.sheets[f][2] == self.sheets[n][2]:
t = 1
a = [self.sheets[f][:2], self.sheets[n][:2], t]
if not a in self.motif:
if not a[0] == a[1]:
self.motif.append(a)
if s[2] == 1:
self.helices.append(s[0])
def FormMotifs(self):
self.motif = []
self.helices = []
self.sheets = []
self.CheckSse()
self.CheckSecLinks()
def FormFrequencies(self, order):
freqs = dict()
for o in order:
if not o[0][0] in freqs:
freqs[o[0][0]] = 1
else:
freqs[o[0][0]] += 1
if not o[1][0] in freqs:
freqs[o[1][0]] = 1
else:
freqs[o[1][0]] += 1
return freqs
def FindMotif(self, n):
i = 0
out = [-1,[[n,-1],[-1,-1],-1]]
while i < len(self.motif):
if self.motif[i][0][0] == n:
out = [i,self.motif[i]]
self.motif.pop(i)
i = len(self.motif)
elif self.motif[i][1][0] == n:
out = [i,[self.motif[i][1],self.motif[i][0],self.motif[i][2]]]
self.motif.pop(i)
i = len(self.motif)
i += 1
return out
def FormGuess(self, freqs):
self.orders = []
fk = freqs.keys()
i = 0
while i < len(fk):
if freqs[fk[i]] == 1:
freqs[fk[i]] -= 1
m = self.FindMotif(fk[i])
self.orders.append([[m[1][0],1]])
prevDir = 1
while m[1][1][0] >= 0 and freqs[m[1][1][0]] >= 1:
prevDir = m[1][2]
self.orders[-1].append([m[1][1], m[1][2]])
freqs[m[1][0][0]] -= 1
m = self.FindMotif(m[1][1][0])
freqs[m[1][0][0]] -= 1
i = -1
if self.orders[-1][-1][0][0]<self.orders[-1][0][0][0]:
temp = []
temp.append([self.orders[-1][-1][0],1])
idk = 1
while idk<len(self.orders[-1]):
temp.append([self.orders[-1][-idk-1][0],
self.orders[-1][-idk][1]])
idk += 1
self.orders[-1] = temp
elif i == len(fk) - 1:
if freqs[fk[0]] > 1:
freqs[fk[0]] -= 1
i = -1
i += 1
def MotifFolds(self):
self.FormMotifs()
freqs = self.FormFrequencies(self.motif)
self.FormGuess(freqs)
def GetExec(self, rec, frSize, pdbMat, meth):
self.cpss = cpc.SecondaryStructure()
self.cpss.GetExec(rec, frSize, pdbMat, meth)
self.alpha_carb_pos = self.cpss.cp.GetCarbonPos()
self.chainEnds = self.cpss.cp.GetChainEnds()
self.total_length = self.cpss.cp.GetLength()
self.residueList = self.cpss.cp.GetResidues()
self.DefineColLists()
self.MotifFolds()
return [[self.orders], self.helices, self.secLinks]
| gpl-2.0 |
apache/spark | examples/src/main/python/ml/tf_idf_example.py | 27 | 1863 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $example on$
from pyspark.ml.feature import HashingTF, IDF, Tokenizer
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("TfIdfExample")\
.getOrCreate()
# $example on$
sentenceData = spark.createDataFrame([
(0.0, "Hi I heard about Spark"),
(0.0, "I wish Java could use case classes"),
(1.0, "Logistic regression models are neat")
], ["label", "sentence"])
tokenizer = Tokenizer(inputCol="sentence", outputCol="words")
wordsData = tokenizer.transform(sentenceData)
hashingTF = HashingTF(inputCol="words", outputCol="rawFeatures", numFeatures=20)
featurizedData = hashingTF.transform(wordsData)
# alternatively, CountVectorizer can also be used to get term frequency vectors
idf = IDF(inputCol="rawFeatures", outputCol="features")
idfModel = idf.fit(featurizedData)
rescaledData = idfModel.transform(featurizedData)
rescaledData.select("label", "features").show()
# $example off$
spark.stop()
| apache-2.0 |
rgeleta/odoo | openerp/tools/test_config.py | 456 | 1418 | # -*- coding: utf-8 -*-
""" Tests for the configuration file/command-line arguments. """
# This test should be run from its directory.
# TODO A configmanager object cannot parse multiple times a config file
# and/or the command line, preventing to 'reload' a configuration.
import os
import config
config_file_00 = os.path.join(os.path.dirname(__file__),'test-config-values-00.conf')
# 1. No config file, no command-line arguments (a.k.a. default values)
conf = config.configmanager()
conf.parse_config()
assert conf['osv_memory_age_limit'] == 1.0
assert os.path.join(conf['root_path'], 'addons') == conf['addons_path']
# 2. No config file, some command-line arguments
conf = config.configmanager()
# mess with the optparse.Option definition to allow an invalid path
conf.casts['addons_path'].action = 'store'
conf.parse_config(['--addons-path=/xyz/dont-exist', '--osv-memory-age-limit=2.3'])
assert conf['osv_memory_age_limit'] == 2.3
assert conf['addons_path'] == '/xyz/dont-exist'
# 3. Config file, no command-line arguments
conf = config.configmanager()
conf.parse_config(['-c', config_file_00])
assert conf['osv_memory_age_limit'] == 3.4
# 4. Config file, and command-line arguments
conf = config.configmanager()
conf.parse_config(['-c', config_file_00, '--osv-memory-age-limit=2.3'])
assert conf['osv_memory_age_limit'] == 2.3
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
navodissa/python-flask | flask/lib/python2.7/site-packages/openid/dh.py | 168 | 1608 | from openid import cryptutil
from openid import oidutil
def strxor(x, y):
if len(x) != len(y):
raise ValueError('Inputs to strxor must have the same length')
xor = lambda (a, b): chr(ord(a) ^ ord(b))
return "".join(map(xor, zip(x, y)))
class DiffieHellman(object):
DEFAULT_MOD = 155172898181473697471232257763715539915724801966915404479707795314057629378541917580651227423698188993727816152646631438561595825688188889951272158842675419950341258706556549803580104870537681476726513255747040765857479291291572334510643245094715007229621094194349783925984760375594985848253359305585439638443L
DEFAULT_GEN = 2
def fromDefaults(cls):
return cls(cls.DEFAULT_MOD, cls.DEFAULT_GEN)
fromDefaults = classmethod(fromDefaults)
def __init__(self, modulus, generator):
self.modulus = long(modulus)
self.generator = long(generator)
self._setPrivate(cryptutil.randrange(1, modulus - 1))
def _setPrivate(self, private):
"""This is here to make testing easier"""
self.private = private
self.public = pow(self.generator, self.private, self.modulus)
def usingDefaultValues(self):
return (self.modulus == self.DEFAULT_MOD and
self.generator == self.DEFAULT_GEN)
def getSharedSecret(self, composite):
return pow(composite, self.private, self.modulus)
def xorSecret(self, composite, secret, hash_func):
dh_shared = self.getSharedSecret(composite)
hashed_dh_shared = hash_func(cryptutil.longToBinary(dh_shared))
return strxor(secret, hashed_dh_shared)
| bsd-3-clause |
wkoathp/glance | glance/tests/unit/test_store_location.py | 19 | 3200 | # Copyright 2011-2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glance_store
import mock
from glance.common import exception
from glance.common import store_utils
import glance.location
from glance.tests.unit import base
CONF = {'default_store': 'file',
'swift_store_auth_address': 'localhost:8080',
'swift_store_container': 'glance',
'swift_store_user': 'user',
'swift_store_key': 'key',
'default_swift_reference': 'store_1'
}
class TestStoreLocation(base.StoreClearingUnitTest):
class FakeImageProxy(object):
size = None
context = None
store_api = mock.Mock()
store_utils = store_utils
def test_add_location_for_image_without_size(self):
def fake_get_size_from_backend(uri, context=None):
return 1
self.stubs.Set(glance_store, 'get_size_from_backend',
fake_get_size_from_backend)
with mock.patch('glance.location._check_image_location'):
loc1 = {'url': 'file:///fake1.img.tar.gz', 'metadata': {}}
loc2 = {'url': 'file:///fake2.img.tar.gz', 'metadata': {}}
# Test for insert location
image1 = TestStoreLocation.FakeImageProxy()
locations = glance.location.StoreLocations(image1, [])
locations.insert(0, loc2)
self.assertEqual(1, image1.size)
# Test for set_attr of _locations_proxy
image2 = TestStoreLocation.FakeImageProxy()
locations = glance.location.StoreLocations(image2, [loc1])
locations[0] = loc2
self.assertIn(loc2, locations)
self.assertEqual(1, image2.size)
def test_add_location_with_restricted_sources(self):
loc1 = {'url': 'file:///fake1.img.tar.gz', 'metadata': {}}
loc2 = {'url': 'swift+config:///xxx', 'metadata': {}}
loc3 = {'url': 'filesystem:///foo.img.tar.gz', 'metadata': {}}
# Test for insert location
image1 = TestStoreLocation.FakeImageProxy()
locations = glance.location.StoreLocations(image1, [])
self.assertRaises(exception.BadStoreUri, locations.insert, 0, loc1)
self.assertRaises(exception.BadStoreUri, locations.insert, 0, loc3)
self.assertNotIn(loc1, locations)
self.assertNotIn(loc3, locations)
# Test for set_attr of _locations_proxy
image2 = TestStoreLocation.FakeImageProxy()
locations = glance.location.StoreLocations(image2, [loc1])
self.assertRaises(exception.BadStoreUri, locations.insert, 0, loc2)
self.assertNotIn(loc2, locations)
| apache-2.0 |
michalliu/OpenWrt-Firefly-Libraries | staging_dir/host/lib/python2.7/test/test_robotparser.py | 32 | 6980 | import unittest, StringIO, robotparser
from test import test_support
from urllib2 import urlopen, HTTPError
HAVE_HTTPS = True
try:
from urllib2 import HTTPSHandler
except ImportError:
HAVE_HTTPS = False
class RobotTestCase(unittest.TestCase):
def __init__(self, index, parser, url, good, agent):
unittest.TestCase.__init__(self)
if good:
self.str = "RobotTest(%d, good, %s)" % (index, url)
else:
self.str = "RobotTest(%d, bad, %s)" % (index, url)
self.parser = parser
self.url = url
self.good = good
self.agent = agent
def runTest(self):
if isinstance(self.url, tuple):
agent, url = self.url
else:
url = self.url
agent = self.agent
if self.good:
self.assertTrue(self.parser.can_fetch(agent, url))
else:
self.assertFalse(self.parser.can_fetch(agent, url))
def __str__(self):
return self.str
tests = unittest.TestSuite()
def RobotTest(index, robots_txt, good_urls, bad_urls,
agent="test_robotparser"):
lines = StringIO.StringIO(robots_txt).readlines()
parser = robotparser.RobotFileParser()
parser.parse(lines)
for url in good_urls:
tests.addTest(RobotTestCase(index, parser, url, 1, agent))
for url in bad_urls:
tests.addTest(RobotTestCase(index, parser, url, 0, agent))
# Examples from http://www.robotstxt.org/wc/norobots.html (fetched 2002)
# 1.
doc = """
User-agent: *
Disallow: /cyberworld/map/ # This is an infinite virtual URL space
Disallow: /tmp/ # these will soon disappear
Disallow: /foo.html
"""
good = ['/','/test.html']
bad = ['/cyberworld/map/index.html','/tmp/xxx','/foo.html']
RobotTest(1, doc, good, bad)
# 2.
doc = """
# robots.txt for http://www.example.com/
User-agent: *
Disallow: /cyberworld/map/ # This is an infinite virtual URL space
# Cybermapper knows where to go.
User-agent: cybermapper
Disallow:
"""
good = ['/','/test.html',('cybermapper','/cyberworld/map/index.html')]
bad = ['/cyberworld/map/index.html']
RobotTest(2, doc, good, bad)
# 3.
doc = """
# go away
User-agent: *
Disallow: /
"""
good = []
bad = ['/cyberworld/map/index.html','/','/tmp/']
RobotTest(3, doc, good, bad)
# Examples from http://www.robotstxt.org/wc/norobots-rfc.html (fetched 2002)
# 4.
doc = """
User-agent: figtree
Disallow: /tmp
Disallow: /a%3cd.html
Disallow: /a%2fb.html
Disallow: /%7ejoe/index.html
"""
good = [] # XFAIL '/a/b.html'
bad = ['/tmp','/tmp.html','/tmp/a.html',
'/a%3cd.html','/a%3Cd.html','/a%2fb.html',
'/~joe/index.html'
]
RobotTest(4, doc, good, bad, 'figtree')
RobotTest(5, doc, good, bad, 'FigTree Robot libwww-perl/5.04')
# 6.
doc = """
User-agent: *
Disallow: /tmp/
Disallow: /a%3Cd.html
Disallow: /a/b.html
Disallow: /%7ejoe/index.html
"""
good = ['/tmp',] # XFAIL: '/a%2fb.html'
bad = ['/tmp/','/tmp/a.html',
'/a%3cd.html','/a%3Cd.html',"/a/b.html",
'/%7Ejoe/index.html']
RobotTest(6, doc, good, bad)
# From bug report #523041
# 7.
doc = """
User-Agent: *
Disallow: /.
"""
good = ['/foo.html']
bad = [] # Bug report says "/" should be denied, but that is not in the RFC
RobotTest(7, doc, good, bad)
# From Google: http://www.google.com/support/webmasters/bin/answer.py?hl=en&answer=40364
# 8.
doc = """
User-agent: Googlebot
Allow: /folder1/myfile.html
Disallow: /folder1/
"""
good = ['/folder1/myfile.html']
bad = ['/folder1/anotherfile.html']
RobotTest(8, doc, good, bad, agent="Googlebot")
# 9. This file is incorrect because "Googlebot" is a substring of
# "Googlebot-Mobile", so test 10 works just like test 9.
doc = """
User-agent: Googlebot
Disallow: /
User-agent: Googlebot-Mobile
Allow: /
"""
good = []
bad = ['/something.jpg']
RobotTest(9, doc, good, bad, agent="Googlebot")
good = []
bad = ['/something.jpg']
RobotTest(10, doc, good, bad, agent="Googlebot-Mobile")
# 11. Get the order correct.
doc = """
User-agent: Googlebot-Mobile
Allow: /
User-agent: Googlebot
Disallow: /
"""
good = []
bad = ['/something.jpg']
RobotTest(11, doc, good, bad, agent="Googlebot")
good = ['/something.jpg']
bad = []
RobotTest(12, doc, good, bad, agent="Googlebot-Mobile")
# 13. Google also got the order wrong in #8. You need to specify the
# URLs from more specific to more general.
doc = """
User-agent: Googlebot
Allow: /folder1/myfile.html
Disallow: /folder1/
"""
good = ['/folder1/myfile.html']
bad = ['/folder1/anotherfile.html']
RobotTest(13, doc, good, bad, agent="googlebot")
# 14. For issue #6325 (query string support)
doc = """
User-agent: *
Disallow: /some/path?name=value
"""
good = ['/some/path']
bad = ['/some/path?name=value']
RobotTest(14, doc, good, bad)
# 15. For issue #4108 (obey first * entry)
doc = """
User-agent: *
Disallow: /some/path
User-agent: *
Disallow: /another/path
"""
good = ['/another/path']
bad = ['/some/path']
RobotTest(15, doc, good, bad)
# 16. Empty query (issue #17403). Normalizing the url first.
doc = """
User-agent: *
Allow: /some/path?
Disallow: /another/path?
"""
good = ['/some/path?']
bad = ['/another/path?']
RobotTest(16, doc, good, bad)
class NetworkTestCase(unittest.TestCase):
def testPasswordProtectedSite(self):
test_support.requires('network')
with test_support.transient_internet('mueblesmoraleda.com'):
url = 'http://mueblesmoraleda.com'
robots_url = url + "/robots.txt"
# First check the URL is usable for our purposes, since the
# test site is a bit flaky.
try:
urlopen(robots_url)
except HTTPError as e:
if e.code not in {401, 403}:
self.skipTest(
"%r should return a 401 or 403 HTTP error, not %r"
% (robots_url, e.code))
else:
self.skipTest(
"%r should return a 401 or 403 HTTP error, not succeed"
% (robots_url))
parser = robotparser.RobotFileParser()
parser.set_url(url)
try:
parser.read()
except IOError:
self.skipTest('%s is unavailable' % url)
self.assertEqual(parser.can_fetch("*", robots_url), False)
@unittest.skipUnless(HAVE_HTTPS, 'need SSL support to download license')
@test_support.system_must_validate_cert
def testPythonOrg(self):
test_support.requires('network')
with test_support.transient_internet('www.python.org'):
parser = robotparser.RobotFileParser(
"https://www.python.org/robots.txt")
parser.read()
self.assertTrue(
parser.can_fetch("*", "https://www.python.org/robots.txt"))
def test_main():
test_support.run_unittest(tests)
test_support.run_unittest(NetworkTestCase)
if __name__=='__main__':
test_support.verbose = 1
test_main()
| gpl-2.0 |
michael-dev2rights/ansible | lib/ansible/modules/cloud/openstack/os_image.py | 4 | 6999 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
#TODO(mordred): we need to support "location"(v1) and "locations"(v2)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_image
short_description: Add/Delete images from OpenStack Cloud
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Add or Remove images from the OpenStack Image Repository
options:
name:
description:
- Name that has to be given to the image
required: true
default: None
id:
version_added: "2.4"
description:
- The Id of the image
required: false
default: None
disk_format:
description:
- The format of the disk that is getting uploaded
required: false
default: qcow2
container_format:
description:
- The format of the container
required: false
default: bare
owner:
description:
- The owner of the image
required: false
default: None
min_disk:
description:
- The minimum disk space (in GB) required to boot this image
required: false
default: None
min_ram:
description:
- The minimum ram (in MB) required to boot this image
required: false
default: None
is_public:
description:
- Whether the image can be accessed publicly. Note that publicizing an image requires admin role by default.
required: false
default: 'yes'
filename:
description:
- The path to the file which has to be uploaded
required: false
default: None
ramdisk:
description:
- The name of an existing ramdisk image that will be associated with this image
required: false
default: None
kernel:
description:
- The name of an existing kernel image that will be associated with this image
required: false
default: None
properties:
description:
- Additional properties to be associated with this image
required: false
default: {}
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatibility
required: false
requirements: ["shade"]
'''
EXAMPLES = '''
# Upload an image from a local file named cirros-0.3.0-x86_64-disk.img
- os_image:
auth:
auth_url: http://localhost/auth/v2.0
username: admin
password: passme
project_name: admin
name: cirros
container_format: bare
disk_format: qcow2
state: present
filename: cirros-0.3.0-x86_64-disk.img
kernel: cirros-vmlinuz
ramdisk: cirros-initrd
properties:
cpu_arch: x86_64
distro: ubuntu
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
def main():
argument_spec = openstack_full_argument_spec(
name = dict(required=True),
id = dict(default=None),
disk_format = dict(default='qcow2', choices=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso', 'vhdx', 'ploop']),
container_format = dict(default='bare', choices=['ami', 'aki', 'ari', 'bare', 'ovf', 'ova', 'docker']),
owner = dict(default=None),
min_disk = dict(type='int', default=0),
min_ram = dict(type='int', default=0),
is_public = dict(type='bool', default=False),
filename = dict(default=None),
ramdisk = dict(default=None),
kernel = dict(default=None),
properties = dict(type='dict', default={}),
state = dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
cloud = shade.openstack_cloud(**module.params)
changed = False
image = cloud.get_image(name_or_id=module.params['name'])
if module.params['state'] == 'present':
if not image:
kwargs={}
if module.params['id'] is not None:
kwargs['id'] = module.params['id']
image = cloud.create_image(
name=module.params['name'],
filename=module.params['filename'],
disk_format=module.params['disk_format'],
container_format=module.params['container_format'],
wait=module.params['wait'],
timeout=module.params['timeout'],
is_public=module.params['is_public'],
min_disk=module.params['min_disk'],
min_ram=module.params['min_ram'],
**kwargs
)
changed = True
if not module.params['wait']:
module.exit_json(changed=changed, image=image, id=image.id)
cloud.update_image_properties(
image=image,
kernel=module.params['kernel'],
ramdisk=module.params['ramdisk'],
**module.params['properties'])
image = cloud.get_image(name_or_id=image.id)
module.exit_json(changed=changed, image=image, id=image.id)
elif module.params['state'] == 'absent':
if not image:
changed = False
else:
cloud.delete_image(
name_or_id=module.params['name'],
wait=module.params['wait'],
timeout=module.params['timeout'])
changed = True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e), extra_data=e.extra_data)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == "__main__":
main()
| gpl-3.0 |
czpython/django-cms | cms/test_utils/project/placeholderapp/views.py | 4 | 3780 | from django.http import HttpResponse
from django.shortcuts import render
from django.template import RequestContext
from django.template.engine import Engine
from django.views.generic import DetailView
from cms.test_utils.project.placeholderapp.models import (
Example1, MultilingualExample1, CharPksExample)
from cms.utils import get_language_from_request
def example_view(request):
context = {}
context['examples'] = Example1.objects.all()
return render(request, 'placeholderapp.html', context)
def _base_detail(request, instance, template_name='detail.html',
item_name="char_1", template_string='',):
context = {}
context['instance'] = instance
context['instance_class'] = instance.__class__()
context['item_name'] = item_name
if hasattr(request, 'toolbar'):
request.toolbar.set_object(instance)
if template_string:
context = RequestContext(request=request, dict_=context)
engine = Engine.get_default()
template = engine.from_string(template_string)
return HttpResponse(template.render(context))
else:
return render(request, template_name, context)
def list_view_multi(request):
context = {}
context['examples'] = MultilingualExample1.objects.language(
get_language_from_request(request)).all()
context['instance_class'] = MultilingualExample1
return render(request, 'list.html', context)
def detail_view_multi(request, pk, template_name='detail_multi.html',
item_name="char_1", template_string='',):
instance = MultilingualExample1.objects.language(
get_language_from_request(request)).get(pk=pk)
return _base_detail(request, instance, template_name, item_name,
template_string)
def detail_view_multi_unfiltered(request, pk, template_name='detail_multi.html',
item_name="char_1", template_string='',):
instance = MultilingualExample1.objects.get(pk=pk)
return _base_detail(request, instance, template_name, item_name, template_string)
def list_view(request):
context = {}
context['examples'] = Example1.objects.all()
context['instance_class'] = Example1
return render(request, 'list.html', context)
def detail_view(request, pk, template_name='detail.html', item_name="char_1",
template_string='',):
if request.user.is_staff and request.toolbar:
instance = Example1.objects.get(pk=pk)
else:
instance = Example1.objects.get(pk=pk, publish=True)
return _base_detail(request, instance, template_name, item_name, template_string)
def latest_view(request):
example = Example1.objects.latest('id')
return detail_view(request, pk=example.pk)
def detail_view_char(request, pk, template_name='detail.html', item_name="char_1",
template_string='',):
instance = CharPksExample.objects.get(pk=pk)
return _base_detail(request, instance, template_name, item_name,
template_string)
class ClassDetail(DetailView):
model = Example1
template_name = "detail.html"
template_string = ''
def render_to_response(self, context, **response_kwargs):
if self.template_string:
context = RequestContext(request=self.request, dict_=context)
engine = Engine.get_default()
template = engine.from_string(self.template_string)
return HttpResponse(template.render(context))
else:
return super(ClassDetail, self).render_to_response(context, **response_kwargs)
def get_context_data(self, **kwargs):
context = super(ClassDetail, self).get_context_data(**kwargs)
context['instance_class'] = self.model
return context
| bsd-3-clause |
mgoral/subconvert | src/subconvert/cli/syncparse.py | 1 | 5412 | #-*- coding: utf-8 -*-
"""
Copyright (C) 2016 Michal Goral.
This file is part of Subconvert
Subconvert is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Subconvert is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Subconvert. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import collections
from subconvert.parsing.Offset import SyncPoint
from subconvert.parsing.FrameTime import FrameTime
from subconvert.utils.SubException import SubException, SubAssert
from subconvert.utils.Locale import _
_Time = collections.namedtuple('Time', ['sign', 'h', 'm', 's', 'ms'])
class _Request:
class Type:
OFFSET = 1
SYNC = 2
def __init__(self):
self.type_ = None
self.sub_no = None
self.time = None
self.sign = None
def to_frametime(self, fps):
ts = self.time
secs = 3600 * ts.h + 60 * ts.m + ts.s + float(ts.ms)/1000
sign = self.time.sign if self.time.sign else 1
return FrameTime(fps, seconds=secs * sign)
def _tokenize_time(timestr):
timestr = re.sub(r'\s+', '', timestr)
SubAssert(timestr, _('Sync: time spec cannot be empty'))
time_args = dict(sign=None, h=0, m=0, s=0, ms=0)
if timestr[0] in '+-':
time_args['sign'] = int('%s1' % timestr[0])
timestr = timestr[1:]
found_units = set()
expr = re.compile(r'''(?P<value>\d+)(?P<unit>[a-zA-Z]+)''')
parsed_len = 0
for elem in expr.finditer(timestr):
val = elem.group('value')
unit = elem.group('unit')
SubAssert(unit not in found_units,
_('Sync: non-unique time units in time spec'))
found_units.add(unit)
time_args[unit] = int(val)
parsed_len += (len(unit) + len(val))
SubAssert(parsed_len == len(timestr),
_('Sync: some characters not parsed'))
try:
return _Time(**time_args)
except TypeError:
raise SubException(_('Sync: incorrect time spec units'))
def _tokenize_offset(offset):
offset = offset.strip()
req = _Request()
req.type_ = _Request.Type.OFFSET
req.time = _tokenize_time(offset)
SubAssert(req.time.sign,
_('Sync: offset must be relative. Did you forget a +/- sign?'))
return req
def _tokenize_sync(sub_no, sync):
sub_no = sub_no.strip()
sync = sync.strip()
SubAssert(sub_no, _('Sync: expected subtitle number'))
SubAssert(sync, _('Sync: expected time spec'))
try:
sub_no = int(sub_no)
except ValueError:
raise SubException(_('Sync: incorrect subtitle number: %s' % sub_no))
SubAssert(sub_no != 0, _('Sync: incorrect subtitle number: %s' % sub_no))
req = _Request()
req.type_ = _Request.Type.SYNC
req.sub_no = sub_no if sub_no < 0 else sub_no - 1
req.time = _tokenize_time(sync)
return req
def _tokenize_request(s):
requests = []
for req in s.split(','):
req = req.strip()
if not req:
continue
left, sep, remainder = req.partition(':')
if not sep:
requests.append(_tokenize_offset(left))
else:
requests.append(_tokenize_sync(left, remainder))
return requests
def _abs_index(index, list_len):
if index >= 0:
return index
return list_len + index
def _offset_subtitles(req, subs):
points = []
ft = req.to_frametime(subs[0].fps)
for i, sub in enumerate(subs):
sp = SyncPoint(i, sub.start + ft, sub.end + ft)
SubAssert(sp.start.fullSeconds >= 0 and sp.end.fullSeconds >= 0,
_('Sync: incorrect offset. '
'Resulting subtitle time would be lower than 0'))
points.append(sp)
return points
def _sync_subtitles(requests, subs):
points = []
for req in requests:
SubAssert(req.type_ == _Request.Type.SYNC,
_('Sync: expected sync request'))
abs_sub_no = _abs_index(req.sub_no, len(subs))
SubAssert(abs_sub_no >= 0 and abs_sub_no < len(subs),
_('Sync: incorrect subtitle number: %d' % req.sub_no))
sub = subs[abs_sub_no]
ft = req.to_frametime(sub.fps)
sp = None
if req.time.sign is not None:
sp = SyncPoint(abs_sub_no, sub.start + ft, sub.end + ft)
SubAssert(sp.start.fullSeconds >= 0 and sp.end.fullSeconds >= 0,
_('Sync: incorrect time spec. '
'Resulting subtitle time would be lower than 0'))
else:
delta = sub.end - sub.start
sp = SyncPoint(abs_sub_no, ft, ft + delta)
points.append(sp)
return points
def parse(s, subs):
"""Parses a given string and creates a list of SyncPoints."""
if len(subs) == 0:
return []
points = []
requests = _tokenize_request(s)
if len(requests) == 1 and requests[0].type_ == _Request.Type.OFFSET:
return _offset_subtitles(requests[0], subs)
return _sync_subtitles(requests, subs)
| gpl-3.0 |
ederfmartins/spojrec | crawler/util.py | 1 | 1390 | # -*- coding: utf-8 -*-
import sys
from lxml.html import parse
from lxml.html import tostring
from urllib2 import urlopen
from constants import SPOJ_URLS
from crawler.dataExtractor.extractor import extract_problem_data, extract_user_data, extract_submissions_data
from crawler.dataExtractor.signedlistParser import parseSignedlist
def _raw_fetch(url):
print >>sys.stderr, url
page = urlopen(url)
html = parse(page).getroot()
return html
def _fetch_user_statistics(spojId, contest, database):
url = SPOJ_URLS[contest] + '/users/' + spojId
html = _raw_fetch(url)
item = extract_user_data(html)
if item['_id'] == spojId:
database.update_user(dict(item))
def _fetch_user_problems(spojId, contest, database):
url = SPOJ_URLS[contest] + '/status/' + spojId + '/signedlist/'
html = tostring(_raw_fetch(url))
item = extract_submissions_data(spojId, html)
parsedProblems = parseSignedlist(item['data'])
if len(parsedProblems) > 0:
database.update_submission_data(dict(item))
def fetch_user(spojId, contest, database, onlySubmitions=False):
if not onlySubmitions:
_fetch_user_statistics(spojId, contest, database)
_fetch_user_problems(spojId, contest, database)
def fetch_problem(spojId, contest, database):
url = SPOJ_URLS[contest] + '/problems/' + spojId
html = _raw_fetch(url)
item = extract_problem_data(html, url)
database.update_problem(dict(item))
| apache-2.0 |
ZhangXinNan/tensorflow | tensorflow/python/estimator/export/export_output.py | 5 | 13561 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes for different types of export output."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.util.tf_export import estimator_export
@estimator_export('estimator.export.ExportOutput')
class ExportOutput(object):
"""Represents an output of a model that can be served.
These typically correspond to model heads.
"""
__metaclass__ = abc.ABCMeta
_SEPARATOR_CHAR = '/'
@abc.abstractmethod
def as_signature_def(self, receiver_tensors):
"""Generate a SignatureDef proto for inclusion in a MetaGraphDef.
The SignatureDef will specify outputs as described in this ExportOutput,
and will use the provided receiver_tensors as inputs.
Args:
receiver_tensors: a `Tensor`, or a dict of string to `Tensor`, specifying
input nodes that will be fed.
"""
pass
def _check_output_key(self, key, error_label):
# For multi-head models, the key can be a tuple.
if isinstance(key, tuple):
key = self._SEPARATOR_CHAR.join(key)
if not isinstance(key, six.string_types):
raise ValueError(
'{} output key must be a string; got {}.'.format(error_label, key))
return key
def _wrap_and_check_outputs(
self, outputs, single_output_default_name, error_label=None):
"""Wraps raw tensors as dicts and checks type.
Note that we create a new dict here so that we can overwrite the keys
if necessary.
Args:
outputs: A `Tensor` or a dict of string to `Tensor`.
single_output_default_name: A string key for use in the output dict
if the provided `outputs` is a raw tensor.
error_label: descriptive string for use in error messages. If none,
single_output_default_name will be used.
Returns:
A dict of tensors
Raises:
ValueError: if the outputs dict keys are not strings or tuples of strings
or the values are not Tensors.
"""
if not isinstance(outputs, dict):
outputs = {single_output_default_name: outputs}
output_dict = {}
for key, value in outputs.items():
error_name = error_label or single_output_default_name
key = self._check_output_key(key, error_name)
if not isinstance(value, ops.Tensor):
raise ValueError(
'{} output value must be a Tensor; got {}.'.format(
error_name, value))
output_dict[key] = value
return output_dict
@estimator_export('estimator.export.ClassificationOutput')
class ClassificationOutput(ExportOutput):
"""Represents the output of a classification head.
Either classes or scores or both must be set.
The classes `Tensor` must provide string labels, not integer class IDs.
If only classes is set, it is interpreted as providing top-k results in
descending order.
If only scores is set, it is interpreted as providing a score for every class
in order of class ID.
If both classes and scores are set, they are interpreted as zipped, so each
score corresponds to the class at the same index. Clients should not depend
on the order of the entries.
"""
def __init__(self, scores=None, classes=None):
"""Constructor for `ClassificationOutput`.
Args:
scores: A float `Tensor` giving scores (sometimes but not always
interpretable as probabilities) for each class. May be `None`, but
only if `classes` is set. Interpretation varies-- see class doc.
classes: A string `Tensor` giving predicted class labels. May be `None`,
but only if `scores` is set. Interpretation varies-- see class doc.
Raises:
ValueError: if neither classes nor scores is set, or one of them is not a
`Tensor` with the correct dtype.
"""
if (scores is not None
and not (isinstance(scores, ops.Tensor)
and scores.dtype.is_floating)):
raise ValueError('Classification scores must be a float32 Tensor; '
'got {}'.format(scores))
if (classes is not None
and not (isinstance(classes, ops.Tensor)
and dtypes.as_dtype(classes.dtype) == dtypes.string)):
raise ValueError('Classification classes must be a string Tensor; '
'got {}'.format(classes))
if scores is None and classes is None:
raise ValueError('At least one of scores and classes must be set.')
self._scores = scores
self._classes = classes
@property
def scores(self):
return self._scores
@property
def classes(self):
return self._classes
def as_signature_def(self, receiver_tensors):
if len(receiver_tensors) != 1:
raise ValueError('Classification input must be a single string Tensor; '
'got {}'.format(receiver_tensors))
(_, examples), = receiver_tensors.items()
if dtypes.as_dtype(examples.dtype) != dtypes.string:
raise ValueError('Classification input must be a single string Tensor; '
'got {}'.format(receiver_tensors))
return signature_def_utils.classification_signature_def(
examples, self.classes, self.scores)
@estimator_export('estimator.export.RegressionOutput')
class RegressionOutput(ExportOutput):
"""Represents the output of a regression head."""
def __init__(self, value):
"""Constructor for `RegressionOutput`.
Args:
value: a float `Tensor` giving the predicted values. Required.
Raises:
ValueError: if the value is not a `Tensor` with dtype tf.float32.
"""
if not (isinstance(value, ops.Tensor) and value.dtype.is_floating):
raise ValueError('Regression output value must be a float32 Tensor; '
'got {}'.format(value))
self._value = value
@property
def value(self):
return self._value
def as_signature_def(self, receiver_tensors):
if len(receiver_tensors) != 1:
raise ValueError('Regression input must be a single string Tensor; '
'got {}'.format(receiver_tensors))
(_, examples), = receiver_tensors.items()
if dtypes.as_dtype(examples.dtype) != dtypes.string:
raise ValueError('Regression input must be a single string Tensor; '
'got {}'.format(receiver_tensors))
return signature_def_utils.regression_signature_def(examples, self.value)
@estimator_export('estimator.export.PredictOutput')
class PredictOutput(ExportOutput):
"""Represents the output of a generic prediction head.
A generic prediction need not be either a classification or a regression.
Named outputs must be provided as a dict from string to `Tensor`,
"""
_SINGLE_OUTPUT_DEFAULT_NAME = 'output'
def __init__(self, outputs):
"""Constructor for PredictOutput.
Args:
outputs: A `Tensor` or a dict of string to `Tensor` representing the
predictions.
Raises:
ValueError: if the outputs is not dict, or any of its keys are not
strings, or any of its values are not `Tensor`s.
"""
self._outputs = self._wrap_and_check_outputs(
outputs, self._SINGLE_OUTPUT_DEFAULT_NAME, error_label='Prediction')
@property
def outputs(self):
return self._outputs
def as_signature_def(self, receiver_tensors):
return signature_def_utils.predict_signature_def(receiver_tensors,
self.outputs)
class _SupervisedOutput(ExportOutput):
"""Represents the output of a supervised training or eval process."""
__metaclass__ = abc.ABCMeta
LOSS_NAME = 'loss'
PREDICTIONS_NAME = 'predictions'
METRICS_NAME = 'metrics'
METRIC_VALUE_SUFFIX = 'value'
METRIC_UPDATE_SUFFIX = 'update_op'
_loss = None
_predictions = None
_metrics = None
def __init__(self, loss=None, predictions=None, metrics=None):
"""Constructor for SupervisedOutput (ie, Train or Eval output).
Args:
loss: dict of Tensors or single Tensor representing calculated loss.
predictions: dict of Tensors or single Tensor representing model
predictions.
metrics: dict of (metric_value, update_op) tuples, or a single tuple.
metric_value must be a Tensor, and update_op must be a Tensor or Op.
Raises:
ValueError: if any of the outputs' dict keys are not strings or tuples of
strings or the values are not Tensors (or Operations in the case of
update_op).
"""
if loss is not None:
loss_dict = self._wrap_and_check_outputs(loss, self.LOSS_NAME)
self._loss = self._prefix_output_keys(loss_dict, self.LOSS_NAME)
if predictions is not None:
pred_dict = self._wrap_and_check_outputs(
predictions, self.PREDICTIONS_NAME)
self._predictions = self._prefix_output_keys(
pred_dict, self.PREDICTIONS_NAME)
if metrics is not None:
self._metrics = self._wrap_and_check_metrics(metrics)
def _prefix_output_keys(self, output_dict, output_name):
"""Prepend output_name to the output_dict keys if it doesn't exist.
This produces predictable prefixes for the pre-determined outputs
of SupervisedOutput.
Args:
output_dict: dict of string to Tensor, assumed valid.
output_name: prefix string to prepend to existing keys.
Returns:
dict with updated keys and existing values.
"""
new_outputs = {}
for key, val in output_dict.items():
key = self._prefix_key(key, output_name)
new_outputs[key] = val
return new_outputs
def _prefix_key(self, key, output_name):
if key.find(output_name) != 0:
key = output_name + self._SEPARATOR_CHAR + key
return key
def _wrap_and_check_metrics(self, metrics):
"""Handle the saving of metrics.
Metrics is either a tuple of (value, update_op), or a dict of such tuples.
Here, we separate out the tuples and create a dict with names to tensors.
Args:
metrics: dict of (metric_value, update_op) tuples, or a single tuple.
Returns:
dict of output_names to tensors
Raises:
ValueError: if the dict key is not a string, or the metric values or ops
are not tensors.
"""
if not isinstance(metrics, dict):
metrics = {self.METRICS_NAME: metrics}
outputs = {}
for key, (metric_val, metric_op) in metrics.items():
key = self._check_output_key(key, self.METRICS_NAME)
key = self._prefix_key(key, self.METRICS_NAME)
val_name = key + self._SEPARATOR_CHAR + self.METRIC_VALUE_SUFFIX
op_name = key + self._SEPARATOR_CHAR + self.METRIC_UPDATE_SUFFIX
if not isinstance(metric_val, ops.Tensor):
raise ValueError(
'{} output value must be a Tensor; got {}.'.format(
key, metric_val))
if (not isinstance(metric_op, ops.Tensor) and
not isinstance(metric_op, ops.Operation)):
raise ValueError(
'{} update_op must be a Tensor or Operation; got {}.'.format(
key, metric_op))
# We must wrap any ops in a Tensor before export, as the SignatureDef
# proto expects tensors only. See b/109740581
metric_op_tensor = metric_op
if isinstance(metric_op, ops.Operation):
with ops.control_dependencies([metric_op]):
metric_op_tensor = constant_op.constant([], name='metric_op_wrapper')
outputs[val_name] = metric_val
outputs[op_name] = metric_op_tensor
return outputs
@property
def loss(self):
return self._loss
@property
def predictions(self):
return self._predictions
@property
def metrics(self):
return self._metrics
@abc.abstractmethod
def _get_signature_def_fn(self):
"""Returns a function that produces a SignatureDef given desired outputs."""
pass
def as_signature_def(self, receiver_tensors):
signature_def_fn = self._get_signature_def_fn()
return signature_def_fn(
receiver_tensors, self.loss, self.predictions, self.metrics)
class TrainOutput(_SupervisedOutput):
"""Represents the output of a supervised training process.
This class generates the appropriate signature def for exporting
training output by type-checking and wrapping loss, predictions, and metrics
values.
"""
def _get_signature_def_fn(self):
return signature_def_utils.supervised_train_signature_def
class EvalOutput(_SupervisedOutput):
"""Represents the output of a supervised eval process.
This class generates the appropriate signature def for exporting
eval output by type-checking and wrapping loss, predictions, and metrics
values.
"""
def _get_signature_def_fn(self):
return signature_def_utils.supervised_eval_signature_def
| apache-2.0 |
kriberg/stationspinner | stationspinner/corporation/views.py | 1 | 1853 | from django.core.cache import cache
from rest_framework import viewsets, views
from rest_framework.response import Response
from stationspinner.corporation.serializers import CorporationSheetSerializer, \
CorporationSheetListSerializer
from stationspinner.corporation.models import CorporationSheet, Asset
from stationspinner.libs.drf_extensions import CapsulerPermission
class CorporationSheetViewset(viewsets.ReadOnlyModelViewSet):
serializer_class = CorporationSheetSerializer
model = CorporationSheet
permission_classes = [CapsulerPermission]
def list(self, request):
serializer = CorporationSheetListSerializer(
self.get_queryset(),
many=True,
context={'request': request}
)
return Response(serializer.data)
def get_queryset(self):
return CorporationSheet.objects.filter(owner=self.request.user, enabled=True)
class AssetLocationsView(views.APIView):
permission_classes = [CapsulerPermission]
def get(self, request, format=None):
corporationID = request.query_params.get('corporationID', None)
regionID = request.query_params.get('regionID', None)
if not corporationID:
return Response([])
else:
try:
corporation = CorporationSheet.objects.get(owner=request.user,
pk=corporationID)
except CorporationSheet.DoesNotExist:
return Response([])
key = hash(('asset_locations', corporation.pk.__hash__, regionID))
asset_locations = cache.get(key, None)
if not asset_locations:
asset_locations = Asset.objects.get_top_level_locations(corporation.pk, regionID)
cache.set(key, asset_locations, 1800)
return Response(asset_locations) | agpl-3.0 |
4Quant/tensorflow | tensorflow/tensorboard/backend/float_wrapper.py | 27 | 1776 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module providing a function for serializing JSON values with Infinity.
Python provides no way to override how json.dumps serializes
Infinity/-Infinity/NaN; if allow_nan is true, it encodes them as
Infinity/-Infinity/NaN, in violation of the JSON spec and in violation of what
JSON.parse accepts. If it's false, it throws a ValueError, Neither subclassing
JSONEncoder nor passing a function in the |default| keyword argument overrides
this.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
def WrapSpecialFloats(obj):
"""Replaces all instances of Infinity/-Infinity/NaN with strings."""
if obj == float('inf'):
return 'Infinity'
elif obj == float('-inf'):
return '-Infinity'
elif isinstance(obj, float) and math.isnan(obj):
return 'NaN'
elif isinstance(obj, list) or isinstance(obj, tuple):
return list(map(WrapSpecialFloats, obj))
elif isinstance(obj, dict):
return {
WrapSpecialFloats(k): WrapSpecialFloats(v)
for k, v in obj.items()
}
else:
return obj
| apache-2.0 |
seanfisk/buzzword-bingo-server | django/contrib/sessions/backends/cache.py | 268 | 1881 | from django.contrib.sessions.backends.base import SessionBase, CreateError
from django.core.cache import cache
class SessionStore(SessionBase):
"""
A cache-based session store.
"""
def __init__(self, session_key=None):
self._cache = cache
super(SessionStore, self).__init__(session_key)
def load(self):
session_data = self._cache.get(self.session_key)
if session_data is not None:
return session_data
self.create()
return {}
def create(self):
# Because a cache can fail silently (e.g. memcache), we don't know if
# we are failing to create a new session because of a key collision or
# because the cache is missing. So we try for a (large) number of times
# and then raise an exception. That's the risk you shoulder if using
# cache backing.
for i in xrange(10000):
self.session_key = self._get_new_session_key()
try:
self.save(must_create=True)
except CreateError:
continue
self.modified = True
return
raise RuntimeError("Unable to create a new session key.")
def save(self, must_create=False):
if must_create:
func = self._cache.add
else:
func = self._cache.set
result = func(self.session_key, self._get_session(no_load=must_create),
self.get_expiry_age())
if must_create and not result:
raise CreateError
def exists(self, session_key):
if self._cache.has_key(session_key):
return True
return False
def delete(self, session_key=None):
if session_key is None:
if self._session_key is None:
return
session_key = self._session_key
self._cache.delete(session_key)
| bsd-3-clause |
cdgallahue/atomic-turbine | web/lib/python2.7/site-packages/pip/_vendor/html5lib/constants.py | 354 | 83387 | from __future__ import absolute_import, division, unicode_literals
import string
EOF = None
E = {
"null-character":
"Null character in input stream, replaced with U+FFFD.",
"invalid-codepoint":
"Invalid codepoint in stream.",
"incorrectly-placed-solidus":
"Solidus (/) incorrectly placed in tag.",
"incorrect-cr-newline-entity":
"Incorrect CR newline entity, replaced with LF.",
"illegal-windows-1252-entity":
"Entity used with illegal number (windows-1252 reference).",
"cant-convert-numeric-entity":
"Numeric entity couldn't be converted to character "
"(codepoint U+%(charAsInt)08x).",
"illegal-codepoint-for-numeric-entity":
"Numeric entity represents an illegal codepoint: "
"U+%(charAsInt)08x.",
"numeric-entity-without-semicolon":
"Numeric entity didn't end with ';'.",
"expected-numeric-entity-but-got-eof":
"Numeric entity expected. Got end of file instead.",
"expected-numeric-entity":
"Numeric entity expected but none found.",
"named-entity-without-semicolon":
"Named entity didn't end with ';'.",
"expected-named-entity":
"Named entity expected. Got none.",
"attributes-in-end-tag":
"End tag contains unexpected attributes.",
'self-closing-flag-on-end-tag':
"End tag contains unexpected self-closing flag.",
"expected-tag-name-but-got-right-bracket":
"Expected tag name. Got '>' instead.",
"expected-tag-name-but-got-question-mark":
"Expected tag name. Got '?' instead. (HTML doesn't "
"support processing instructions.)",
"expected-tag-name":
"Expected tag name. Got something else instead",
"expected-closing-tag-but-got-right-bracket":
"Expected closing tag. Got '>' instead. Ignoring '</>'.",
"expected-closing-tag-but-got-eof":
"Expected closing tag. Unexpected end of file.",
"expected-closing-tag-but-got-char":
"Expected closing tag. Unexpected character '%(data)s' found.",
"eof-in-tag-name":
"Unexpected end of file in the tag name.",
"expected-attribute-name-but-got-eof":
"Unexpected end of file. Expected attribute name instead.",
"eof-in-attribute-name":
"Unexpected end of file in attribute name.",
"invalid-character-in-attribute-name":
"Invalid character in attribute name",
"duplicate-attribute":
"Dropped duplicate attribute on tag.",
"expected-end-of-tag-name-but-got-eof":
"Unexpected end of file. Expected = or end of tag.",
"expected-attribute-value-but-got-eof":
"Unexpected end of file. Expected attribute value.",
"expected-attribute-value-but-got-right-bracket":
"Expected attribute value. Got '>' instead.",
'equals-in-unquoted-attribute-value':
"Unexpected = in unquoted attribute",
'unexpected-character-in-unquoted-attribute-value':
"Unexpected character in unquoted attribute",
"invalid-character-after-attribute-name":
"Unexpected character after attribute name.",
"unexpected-character-after-attribute-value":
"Unexpected character after attribute value.",
"eof-in-attribute-value-double-quote":
"Unexpected end of file in attribute value (\").",
"eof-in-attribute-value-single-quote":
"Unexpected end of file in attribute value (').",
"eof-in-attribute-value-no-quotes":
"Unexpected end of file in attribute value.",
"unexpected-EOF-after-solidus-in-tag":
"Unexpected end of file in tag. Expected >",
"unexpected-character-after-solidus-in-tag":
"Unexpected character after / in tag. Expected >",
"expected-dashes-or-doctype":
"Expected '--' or 'DOCTYPE'. Not found.",
"unexpected-bang-after-double-dash-in-comment":
"Unexpected ! after -- in comment",
"unexpected-space-after-double-dash-in-comment":
"Unexpected space after -- in comment",
"incorrect-comment":
"Incorrect comment.",
"eof-in-comment":
"Unexpected end of file in comment.",
"eof-in-comment-end-dash":
"Unexpected end of file in comment (-)",
"unexpected-dash-after-double-dash-in-comment":
"Unexpected '-' after '--' found in comment.",
"eof-in-comment-double-dash":
"Unexpected end of file in comment (--).",
"eof-in-comment-end-space-state":
"Unexpected end of file in comment.",
"eof-in-comment-end-bang-state":
"Unexpected end of file in comment.",
"unexpected-char-in-comment":
"Unexpected character in comment found.",
"need-space-after-doctype":
"No space after literal string 'DOCTYPE'.",
"expected-doctype-name-but-got-right-bracket":
"Unexpected > character. Expected DOCTYPE name.",
"expected-doctype-name-but-got-eof":
"Unexpected end of file. Expected DOCTYPE name.",
"eof-in-doctype-name":
"Unexpected end of file in DOCTYPE name.",
"eof-in-doctype":
"Unexpected end of file in DOCTYPE.",
"expected-space-or-right-bracket-in-doctype":
"Expected space or '>'. Got '%(data)s'",
"unexpected-end-of-doctype":
"Unexpected end of DOCTYPE.",
"unexpected-char-in-doctype":
"Unexpected character in DOCTYPE.",
"eof-in-innerhtml":
"XXX innerHTML EOF",
"unexpected-doctype":
"Unexpected DOCTYPE. Ignored.",
"non-html-root":
"html needs to be the first start tag.",
"expected-doctype-but-got-eof":
"Unexpected End of file. Expected DOCTYPE.",
"unknown-doctype":
"Erroneous DOCTYPE.",
"expected-doctype-but-got-chars":
"Unexpected non-space characters. Expected DOCTYPE.",
"expected-doctype-but-got-start-tag":
"Unexpected start tag (%(name)s). Expected DOCTYPE.",
"expected-doctype-but-got-end-tag":
"Unexpected end tag (%(name)s). Expected DOCTYPE.",
"end-tag-after-implied-root":
"Unexpected end tag (%(name)s) after the (implied) root element.",
"expected-named-closing-tag-but-got-eof":
"Unexpected end of file. Expected end tag (%(name)s).",
"two-heads-are-not-better-than-one":
"Unexpected start tag head in existing head. Ignored.",
"unexpected-end-tag":
"Unexpected end tag (%(name)s). Ignored.",
"unexpected-start-tag-out-of-my-head":
"Unexpected start tag (%(name)s) that can be in head. Moved.",
"unexpected-start-tag":
"Unexpected start tag (%(name)s).",
"missing-end-tag":
"Missing end tag (%(name)s).",
"missing-end-tags":
"Missing end tags (%(name)s).",
"unexpected-start-tag-implies-end-tag":
"Unexpected start tag (%(startName)s) "
"implies end tag (%(endName)s).",
"unexpected-start-tag-treated-as":
"Unexpected start tag (%(originalName)s). Treated as %(newName)s.",
"deprecated-tag":
"Unexpected start tag %(name)s. Don't use it!",
"unexpected-start-tag-ignored":
"Unexpected start tag %(name)s. Ignored.",
"expected-one-end-tag-but-got-another":
"Unexpected end tag (%(gotName)s). "
"Missing end tag (%(expectedName)s).",
"end-tag-too-early":
"End tag (%(name)s) seen too early. Expected other end tag.",
"end-tag-too-early-named":
"Unexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s).",
"end-tag-too-early-ignored":
"End tag (%(name)s) seen too early. Ignored.",
"adoption-agency-1.1":
"End tag (%(name)s) violates step 1, "
"paragraph 1 of the adoption agency algorithm.",
"adoption-agency-1.2":
"End tag (%(name)s) violates step 1, "
"paragraph 2 of the adoption agency algorithm.",
"adoption-agency-1.3":
"End tag (%(name)s) violates step 1, "
"paragraph 3 of the adoption agency algorithm.",
"adoption-agency-4.4":
"End tag (%(name)s) violates step 4, "
"paragraph 4 of the adoption agency algorithm.",
"unexpected-end-tag-treated-as":
"Unexpected end tag (%(originalName)s). Treated as %(newName)s.",
"no-end-tag":
"This element (%(name)s) has no end tag.",
"unexpected-implied-end-tag-in-table":
"Unexpected implied end tag (%(name)s) in the table phase.",
"unexpected-implied-end-tag-in-table-body":
"Unexpected implied end tag (%(name)s) in the table body phase.",
"unexpected-char-implies-table-voodoo":
"Unexpected non-space characters in "
"table context caused voodoo mode.",
"unexpected-hidden-input-in-table":
"Unexpected input with type hidden in table context.",
"unexpected-form-in-table":
"Unexpected form in table context.",
"unexpected-start-tag-implies-table-voodoo":
"Unexpected start tag (%(name)s) in "
"table context caused voodoo mode.",
"unexpected-end-tag-implies-table-voodoo":
"Unexpected end tag (%(name)s) in "
"table context caused voodoo mode.",
"unexpected-cell-in-table-body":
"Unexpected table cell start tag (%(name)s) "
"in the table body phase.",
"unexpected-cell-end-tag":
"Got table cell end tag (%(name)s) "
"while required end tags are missing.",
"unexpected-end-tag-in-table-body":
"Unexpected end tag (%(name)s) in the table body phase. Ignored.",
"unexpected-implied-end-tag-in-table-row":
"Unexpected implied end tag (%(name)s) in the table row phase.",
"unexpected-end-tag-in-table-row":
"Unexpected end tag (%(name)s) in the table row phase. Ignored.",
"unexpected-select-in-select":
"Unexpected select start tag in the select phase "
"treated as select end tag.",
"unexpected-input-in-select":
"Unexpected input start tag in the select phase.",
"unexpected-start-tag-in-select":
"Unexpected start tag token (%(name)s in the select phase. "
"Ignored.",
"unexpected-end-tag-in-select":
"Unexpected end tag (%(name)s) in the select phase. Ignored.",
"unexpected-table-element-start-tag-in-select-in-table":
"Unexpected table element start tag (%(name)s) in the select in table phase.",
"unexpected-table-element-end-tag-in-select-in-table":
"Unexpected table element end tag (%(name)s) in the select in table phase.",
"unexpected-char-after-body":
"Unexpected non-space characters in the after body phase.",
"unexpected-start-tag-after-body":
"Unexpected start tag token (%(name)s)"
" in the after body phase.",
"unexpected-end-tag-after-body":
"Unexpected end tag token (%(name)s)"
" in the after body phase.",
"unexpected-char-in-frameset":
"Unexpected characters in the frameset phase. Characters ignored.",
"unexpected-start-tag-in-frameset":
"Unexpected start tag token (%(name)s)"
" in the frameset phase. Ignored.",
"unexpected-frameset-in-frameset-innerhtml":
"Unexpected end tag token (frameset) "
"in the frameset phase (innerHTML).",
"unexpected-end-tag-in-frameset":
"Unexpected end tag token (%(name)s)"
" in the frameset phase. Ignored.",
"unexpected-char-after-frameset":
"Unexpected non-space characters in the "
"after frameset phase. Ignored.",
"unexpected-start-tag-after-frameset":
"Unexpected start tag (%(name)s)"
" in the after frameset phase. Ignored.",
"unexpected-end-tag-after-frameset":
"Unexpected end tag (%(name)s)"
" in the after frameset phase. Ignored.",
"unexpected-end-tag-after-body-innerhtml":
"Unexpected end tag after body(innerHtml)",
"expected-eof-but-got-char":
"Unexpected non-space characters. Expected end of file.",
"expected-eof-but-got-start-tag":
"Unexpected start tag (%(name)s)"
". Expected end of file.",
"expected-eof-but-got-end-tag":
"Unexpected end tag (%(name)s)"
". Expected end of file.",
"eof-in-table":
"Unexpected end of file. Expected table content.",
"eof-in-select":
"Unexpected end of file. Expected select content.",
"eof-in-frameset":
"Unexpected end of file. Expected frameset content.",
"eof-in-script-in-script":
"Unexpected end of file. Expected script content.",
"eof-in-foreign-lands":
"Unexpected end of file. Expected foreign content",
"non-void-element-with-trailing-solidus":
"Trailing solidus not allowed on element %(name)s",
"unexpected-html-element-in-foreign-content":
"Element %(name)s not allowed in a non-html context",
"unexpected-end-tag-before-html":
"Unexpected end tag (%(name)s) before html.",
"unexpected-inhead-noscript-tag":
"Element %(name)s not allowed in a inhead-noscript context",
"eof-in-head-noscript":
"Unexpected end of file. Expected inhead-noscript content",
"char-in-head-noscript":
"Unexpected non-space character. Expected inhead-noscript content",
"XXX-undefined-error":
"Undefined error (this sucks and should be fixed)",
}
namespaces = {
"html": "http://www.w3.org/1999/xhtml",
"mathml": "http://www.w3.org/1998/Math/MathML",
"svg": "http://www.w3.org/2000/svg",
"xlink": "http://www.w3.org/1999/xlink",
"xml": "http://www.w3.org/XML/1998/namespace",
"xmlns": "http://www.w3.org/2000/xmlns/"
}
scopingElements = frozenset([
(namespaces["html"], "applet"),
(namespaces["html"], "caption"),
(namespaces["html"], "html"),
(namespaces["html"], "marquee"),
(namespaces["html"], "object"),
(namespaces["html"], "table"),
(namespaces["html"], "td"),
(namespaces["html"], "th"),
(namespaces["mathml"], "mi"),
(namespaces["mathml"], "mo"),
(namespaces["mathml"], "mn"),
(namespaces["mathml"], "ms"),
(namespaces["mathml"], "mtext"),
(namespaces["mathml"], "annotation-xml"),
(namespaces["svg"], "foreignObject"),
(namespaces["svg"], "desc"),
(namespaces["svg"], "title"),
])
formattingElements = frozenset([
(namespaces["html"], "a"),
(namespaces["html"], "b"),
(namespaces["html"], "big"),
(namespaces["html"], "code"),
(namespaces["html"], "em"),
(namespaces["html"], "font"),
(namespaces["html"], "i"),
(namespaces["html"], "nobr"),
(namespaces["html"], "s"),
(namespaces["html"], "small"),
(namespaces["html"], "strike"),
(namespaces["html"], "strong"),
(namespaces["html"], "tt"),
(namespaces["html"], "u")
])
specialElements = frozenset([
(namespaces["html"], "address"),
(namespaces["html"], "applet"),
(namespaces["html"], "area"),
(namespaces["html"], "article"),
(namespaces["html"], "aside"),
(namespaces["html"], "base"),
(namespaces["html"], "basefont"),
(namespaces["html"], "bgsound"),
(namespaces["html"], "blockquote"),
(namespaces["html"], "body"),
(namespaces["html"], "br"),
(namespaces["html"], "button"),
(namespaces["html"], "caption"),
(namespaces["html"], "center"),
(namespaces["html"], "col"),
(namespaces["html"], "colgroup"),
(namespaces["html"], "command"),
(namespaces["html"], "dd"),
(namespaces["html"], "details"),
(namespaces["html"], "dir"),
(namespaces["html"], "div"),
(namespaces["html"], "dl"),
(namespaces["html"], "dt"),
(namespaces["html"], "embed"),
(namespaces["html"], "fieldset"),
(namespaces["html"], "figure"),
(namespaces["html"], "footer"),
(namespaces["html"], "form"),
(namespaces["html"], "frame"),
(namespaces["html"], "frameset"),
(namespaces["html"], "h1"),
(namespaces["html"], "h2"),
(namespaces["html"], "h3"),
(namespaces["html"], "h4"),
(namespaces["html"], "h5"),
(namespaces["html"], "h6"),
(namespaces["html"], "head"),
(namespaces["html"], "header"),
(namespaces["html"], "hr"),
(namespaces["html"], "html"),
(namespaces["html"], "iframe"),
# Note that image is commented out in the spec as "this isn't an
# element that can end up on the stack, so it doesn't matter,"
(namespaces["html"], "image"),
(namespaces["html"], "img"),
(namespaces["html"], "input"),
(namespaces["html"], "isindex"),
(namespaces["html"], "li"),
(namespaces["html"], "link"),
(namespaces["html"], "listing"),
(namespaces["html"], "marquee"),
(namespaces["html"], "menu"),
(namespaces["html"], "meta"),
(namespaces["html"], "nav"),
(namespaces["html"], "noembed"),
(namespaces["html"], "noframes"),
(namespaces["html"], "noscript"),
(namespaces["html"], "object"),
(namespaces["html"], "ol"),
(namespaces["html"], "p"),
(namespaces["html"], "param"),
(namespaces["html"], "plaintext"),
(namespaces["html"], "pre"),
(namespaces["html"], "script"),
(namespaces["html"], "section"),
(namespaces["html"], "select"),
(namespaces["html"], "style"),
(namespaces["html"], "table"),
(namespaces["html"], "tbody"),
(namespaces["html"], "td"),
(namespaces["html"], "textarea"),
(namespaces["html"], "tfoot"),
(namespaces["html"], "th"),
(namespaces["html"], "thead"),
(namespaces["html"], "title"),
(namespaces["html"], "tr"),
(namespaces["html"], "ul"),
(namespaces["html"], "wbr"),
(namespaces["html"], "xmp"),
(namespaces["svg"], "foreignObject")
])
htmlIntegrationPointElements = frozenset([
(namespaces["mathml"], "annotaion-xml"),
(namespaces["svg"], "foreignObject"),
(namespaces["svg"], "desc"),
(namespaces["svg"], "title")
])
mathmlTextIntegrationPointElements = frozenset([
(namespaces["mathml"], "mi"),
(namespaces["mathml"], "mo"),
(namespaces["mathml"], "mn"),
(namespaces["mathml"], "ms"),
(namespaces["mathml"], "mtext")
])
adjustSVGAttributes = {
"attributename": "attributeName",
"attributetype": "attributeType",
"basefrequency": "baseFrequency",
"baseprofile": "baseProfile",
"calcmode": "calcMode",
"clippathunits": "clipPathUnits",
"contentscripttype": "contentScriptType",
"contentstyletype": "contentStyleType",
"diffuseconstant": "diffuseConstant",
"edgemode": "edgeMode",
"externalresourcesrequired": "externalResourcesRequired",
"filterres": "filterRes",
"filterunits": "filterUnits",
"glyphref": "glyphRef",
"gradienttransform": "gradientTransform",
"gradientunits": "gradientUnits",
"kernelmatrix": "kernelMatrix",
"kernelunitlength": "kernelUnitLength",
"keypoints": "keyPoints",
"keysplines": "keySplines",
"keytimes": "keyTimes",
"lengthadjust": "lengthAdjust",
"limitingconeangle": "limitingConeAngle",
"markerheight": "markerHeight",
"markerunits": "markerUnits",
"markerwidth": "markerWidth",
"maskcontentunits": "maskContentUnits",
"maskunits": "maskUnits",
"numoctaves": "numOctaves",
"pathlength": "pathLength",
"patterncontentunits": "patternContentUnits",
"patterntransform": "patternTransform",
"patternunits": "patternUnits",
"pointsatx": "pointsAtX",
"pointsaty": "pointsAtY",
"pointsatz": "pointsAtZ",
"preservealpha": "preserveAlpha",
"preserveaspectratio": "preserveAspectRatio",
"primitiveunits": "primitiveUnits",
"refx": "refX",
"refy": "refY",
"repeatcount": "repeatCount",
"repeatdur": "repeatDur",
"requiredextensions": "requiredExtensions",
"requiredfeatures": "requiredFeatures",
"specularconstant": "specularConstant",
"specularexponent": "specularExponent",
"spreadmethod": "spreadMethod",
"startoffset": "startOffset",
"stddeviation": "stdDeviation",
"stitchtiles": "stitchTiles",
"surfacescale": "surfaceScale",
"systemlanguage": "systemLanguage",
"tablevalues": "tableValues",
"targetx": "targetX",
"targety": "targetY",
"textlength": "textLength",
"viewbox": "viewBox",
"viewtarget": "viewTarget",
"xchannelselector": "xChannelSelector",
"ychannelselector": "yChannelSelector",
"zoomandpan": "zoomAndPan"
}
adjustMathMLAttributes = {"definitionurl": "definitionURL"}
adjustForeignAttributes = {
"xlink:actuate": ("xlink", "actuate", namespaces["xlink"]),
"xlink:arcrole": ("xlink", "arcrole", namespaces["xlink"]),
"xlink:href": ("xlink", "href", namespaces["xlink"]),
"xlink:role": ("xlink", "role", namespaces["xlink"]),
"xlink:show": ("xlink", "show", namespaces["xlink"]),
"xlink:title": ("xlink", "title", namespaces["xlink"]),
"xlink:type": ("xlink", "type", namespaces["xlink"]),
"xml:base": ("xml", "base", namespaces["xml"]),
"xml:lang": ("xml", "lang", namespaces["xml"]),
"xml:space": ("xml", "space", namespaces["xml"]),
"xmlns": (None, "xmlns", namespaces["xmlns"]),
"xmlns:xlink": ("xmlns", "xlink", namespaces["xmlns"])
}
unadjustForeignAttributes = dict([((ns, local), qname) for qname, (prefix, local, ns) in
adjustForeignAttributes.items()])
spaceCharacters = frozenset([
"\t",
"\n",
"\u000C",
" ",
"\r"
])
tableInsertModeElements = frozenset([
"table",
"tbody",
"tfoot",
"thead",
"tr"
])
asciiLowercase = frozenset(string.ascii_lowercase)
asciiUppercase = frozenset(string.ascii_uppercase)
asciiLetters = frozenset(string.ascii_letters)
digits = frozenset(string.digits)
hexDigits = frozenset(string.hexdigits)
asciiUpper2Lower = dict([(ord(c), ord(c.lower()))
for c in string.ascii_uppercase])
# Heading elements need to be ordered
headingElements = (
"h1",
"h2",
"h3",
"h4",
"h5",
"h6"
)
voidElements = frozenset([
"base",
"command",
"event-source",
"link",
"meta",
"hr",
"br",
"img",
"embed",
"param",
"area",
"col",
"input",
"source",
"track"
])
cdataElements = frozenset(['title', 'textarea'])
rcdataElements = frozenset([
'style',
'script',
'xmp',
'iframe',
'noembed',
'noframes',
'noscript'
])
booleanAttributes = {
"": frozenset(["irrelevant"]),
"style": frozenset(["scoped"]),
"img": frozenset(["ismap"]),
"audio": frozenset(["autoplay", "controls"]),
"video": frozenset(["autoplay", "controls"]),
"script": frozenset(["defer", "async"]),
"details": frozenset(["open"]),
"datagrid": frozenset(["multiple", "disabled"]),
"command": frozenset(["hidden", "disabled", "checked", "default"]),
"hr": frozenset(["noshade"]),
"menu": frozenset(["autosubmit"]),
"fieldset": frozenset(["disabled", "readonly"]),
"option": frozenset(["disabled", "readonly", "selected"]),
"optgroup": frozenset(["disabled", "readonly"]),
"button": frozenset(["disabled", "autofocus"]),
"input": frozenset(["disabled", "readonly", "required", "autofocus", "checked", "ismap"]),
"select": frozenset(["disabled", "readonly", "autofocus", "multiple"]),
"output": frozenset(["disabled", "readonly"]),
}
# entitiesWindows1252 has to be _ordered_ and needs to have an index. It
# therefore can't be a frozenset.
entitiesWindows1252 = (
8364, # 0x80 0x20AC EURO SIGN
65533, # 0x81 UNDEFINED
8218, # 0x82 0x201A SINGLE LOW-9 QUOTATION MARK
402, # 0x83 0x0192 LATIN SMALL LETTER F WITH HOOK
8222, # 0x84 0x201E DOUBLE LOW-9 QUOTATION MARK
8230, # 0x85 0x2026 HORIZONTAL ELLIPSIS
8224, # 0x86 0x2020 DAGGER
8225, # 0x87 0x2021 DOUBLE DAGGER
710, # 0x88 0x02C6 MODIFIER LETTER CIRCUMFLEX ACCENT
8240, # 0x89 0x2030 PER MILLE SIGN
352, # 0x8A 0x0160 LATIN CAPITAL LETTER S WITH CARON
8249, # 0x8B 0x2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK
338, # 0x8C 0x0152 LATIN CAPITAL LIGATURE OE
65533, # 0x8D UNDEFINED
381, # 0x8E 0x017D LATIN CAPITAL LETTER Z WITH CARON
65533, # 0x8F UNDEFINED
65533, # 0x90 UNDEFINED
8216, # 0x91 0x2018 LEFT SINGLE QUOTATION MARK
8217, # 0x92 0x2019 RIGHT SINGLE QUOTATION MARK
8220, # 0x93 0x201C LEFT DOUBLE QUOTATION MARK
8221, # 0x94 0x201D RIGHT DOUBLE QUOTATION MARK
8226, # 0x95 0x2022 BULLET
8211, # 0x96 0x2013 EN DASH
8212, # 0x97 0x2014 EM DASH
732, # 0x98 0x02DC SMALL TILDE
8482, # 0x99 0x2122 TRADE MARK SIGN
353, # 0x9A 0x0161 LATIN SMALL LETTER S WITH CARON
8250, # 0x9B 0x203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
339, # 0x9C 0x0153 LATIN SMALL LIGATURE OE
65533, # 0x9D UNDEFINED
382, # 0x9E 0x017E LATIN SMALL LETTER Z WITH CARON
376 # 0x9F 0x0178 LATIN CAPITAL LETTER Y WITH DIAERESIS
)
xmlEntities = frozenset(['lt;', 'gt;', 'amp;', 'apos;', 'quot;'])
entities = {
"AElig": "\xc6",
"AElig;": "\xc6",
"AMP": "&",
"AMP;": "&",
"Aacute": "\xc1",
"Aacute;": "\xc1",
"Abreve;": "\u0102",
"Acirc": "\xc2",
"Acirc;": "\xc2",
"Acy;": "\u0410",
"Afr;": "\U0001d504",
"Agrave": "\xc0",
"Agrave;": "\xc0",
"Alpha;": "\u0391",
"Amacr;": "\u0100",
"And;": "\u2a53",
"Aogon;": "\u0104",
"Aopf;": "\U0001d538",
"ApplyFunction;": "\u2061",
"Aring": "\xc5",
"Aring;": "\xc5",
"Ascr;": "\U0001d49c",
"Assign;": "\u2254",
"Atilde": "\xc3",
"Atilde;": "\xc3",
"Auml": "\xc4",
"Auml;": "\xc4",
"Backslash;": "\u2216",
"Barv;": "\u2ae7",
"Barwed;": "\u2306",
"Bcy;": "\u0411",
"Because;": "\u2235",
"Bernoullis;": "\u212c",
"Beta;": "\u0392",
"Bfr;": "\U0001d505",
"Bopf;": "\U0001d539",
"Breve;": "\u02d8",
"Bscr;": "\u212c",
"Bumpeq;": "\u224e",
"CHcy;": "\u0427",
"COPY": "\xa9",
"COPY;": "\xa9",
"Cacute;": "\u0106",
"Cap;": "\u22d2",
"CapitalDifferentialD;": "\u2145",
"Cayleys;": "\u212d",
"Ccaron;": "\u010c",
"Ccedil": "\xc7",
"Ccedil;": "\xc7",
"Ccirc;": "\u0108",
"Cconint;": "\u2230",
"Cdot;": "\u010a",
"Cedilla;": "\xb8",
"CenterDot;": "\xb7",
"Cfr;": "\u212d",
"Chi;": "\u03a7",
"CircleDot;": "\u2299",
"CircleMinus;": "\u2296",
"CirclePlus;": "\u2295",
"CircleTimes;": "\u2297",
"ClockwiseContourIntegral;": "\u2232",
"CloseCurlyDoubleQuote;": "\u201d",
"CloseCurlyQuote;": "\u2019",
"Colon;": "\u2237",
"Colone;": "\u2a74",
"Congruent;": "\u2261",
"Conint;": "\u222f",
"ContourIntegral;": "\u222e",
"Copf;": "\u2102",
"Coproduct;": "\u2210",
"CounterClockwiseContourIntegral;": "\u2233",
"Cross;": "\u2a2f",
"Cscr;": "\U0001d49e",
"Cup;": "\u22d3",
"CupCap;": "\u224d",
"DD;": "\u2145",
"DDotrahd;": "\u2911",
"DJcy;": "\u0402",
"DScy;": "\u0405",
"DZcy;": "\u040f",
"Dagger;": "\u2021",
"Darr;": "\u21a1",
"Dashv;": "\u2ae4",
"Dcaron;": "\u010e",
"Dcy;": "\u0414",
"Del;": "\u2207",
"Delta;": "\u0394",
"Dfr;": "\U0001d507",
"DiacriticalAcute;": "\xb4",
"DiacriticalDot;": "\u02d9",
"DiacriticalDoubleAcute;": "\u02dd",
"DiacriticalGrave;": "`",
"DiacriticalTilde;": "\u02dc",
"Diamond;": "\u22c4",
"DifferentialD;": "\u2146",
"Dopf;": "\U0001d53b",
"Dot;": "\xa8",
"DotDot;": "\u20dc",
"DotEqual;": "\u2250",
"DoubleContourIntegral;": "\u222f",
"DoubleDot;": "\xa8",
"DoubleDownArrow;": "\u21d3",
"DoubleLeftArrow;": "\u21d0",
"DoubleLeftRightArrow;": "\u21d4",
"DoubleLeftTee;": "\u2ae4",
"DoubleLongLeftArrow;": "\u27f8",
"DoubleLongLeftRightArrow;": "\u27fa",
"DoubleLongRightArrow;": "\u27f9",
"DoubleRightArrow;": "\u21d2",
"DoubleRightTee;": "\u22a8",
"DoubleUpArrow;": "\u21d1",
"DoubleUpDownArrow;": "\u21d5",
"DoubleVerticalBar;": "\u2225",
"DownArrow;": "\u2193",
"DownArrowBar;": "\u2913",
"DownArrowUpArrow;": "\u21f5",
"DownBreve;": "\u0311",
"DownLeftRightVector;": "\u2950",
"DownLeftTeeVector;": "\u295e",
"DownLeftVector;": "\u21bd",
"DownLeftVectorBar;": "\u2956",
"DownRightTeeVector;": "\u295f",
"DownRightVector;": "\u21c1",
"DownRightVectorBar;": "\u2957",
"DownTee;": "\u22a4",
"DownTeeArrow;": "\u21a7",
"Downarrow;": "\u21d3",
"Dscr;": "\U0001d49f",
"Dstrok;": "\u0110",
"ENG;": "\u014a",
"ETH": "\xd0",
"ETH;": "\xd0",
"Eacute": "\xc9",
"Eacute;": "\xc9",
"Ecaron;": "\u011a",
"Ecirc": "\xca",
"Ecirc;": "\xca",
"Ecy;": "\u042d",
"Edot;": "\u0116",
"Efr;": "\U0001d508",
"Egrave": "\xc8",
"Egrave;": "\xc8",
"Element;": "\u2208",
"Emacr;": "\u0112",
"EmptySmallSquare;": "\u25fb",
"EmptyVerySmallSquare;": "\u25ab",
"Eogon;": "\u0118",
"Eopf;": "\U0001d53c",
"Epsilon;": "\u0395",
"Equal;": "\u2a75",
"EqualTilde;": "\u2242",
"Equilibrium;": "\u21cc",
"Escr;": "\u2130",
"Esim;": "\u2a73",
"Eta;": "\u0397",
"Euml": "\xcb",
"Euml;": "\xcb",
"Exists;": "\u2203",
"ExponentialE;": "\u2147",
"Fcy;": "\u0424",
"Ffr;": "\U0001d509",
"FilledSmallSquare;": "\u25fc",
"FilledVerySmallSquare;": "\u25aa",
"Fopf;": "\U0001d53d",
"ForAll;": "\u2200",
"Fouriertrf;": "\u2131",
"Fscr;": "\u2131",
"GJcy;": "\u0403",
"GT": ">",
"GT;": ">",
"Gamma;": "\u0393",
"Gammad;": "\u03dc",
"Gbreve;": "\u011e",
"Gcedil;": "\u0122",
"Gcirc;": "\u011c",
"Gcy;": "\u0413",
"Gdot;": "\u0120",
"Gfr;": "\U0001d50a",
"Gg;": "\u22d9",
"Gopf;": "\U0001d53e",
"GreaterEqual;": "\u2265",
"GreaterEqualLess;": "\u22db",
"GreaterFullEqual;": "\u2267",
"GreaterGreater;": "\u2aa2",
"GreaterLess;": "\u2277",
"GreaterSlantEqual;": "\u2a7e",
"GreaterTilde;": "\u2273",
"Gscr;": "\U0001d4a2",
"Gt;": "\u226b",
"HARDcy;": "\u042a",
"Hacek;": "\u02c7",
"Hat;": "^",
"Hcirc;": "\u0124",
"Hfr;": "\u210c",
"HilbertSpace;": "\u210b",
"Hopf;": "\u210d",
"HorizontalLine;": "\u2500",
"Hscr;": "\u210b",
"Hstrok;": "\u0126",
"HumpDownHump;": "\u224e",
"HumpEqual;": "\u224f",
"IEcy;": "\u0415",
"IJlig;": "\u0132",
"IOcy;": "\u0401",
"Iacute": "\xcd",
"Iacute;": "\xcd",
"Icirc": "\xce",
"Icirc;": "\xce",
"Icy;": "\u0418",
"Idot;": "\u0130",
"Ifr;": "\u2111",
"Igrave": "\xcc",
"Igrave;": "\xcc",
"Im;": "\u2111",
"Imacr;": "\u012a",
"ImaginaryI;": "\u2148",
"Implies;": "\u21d2",
"Int;": "\u222c",
"Integral;": "\u222b",
"Intersection;": "\u22c2",
"InvisibleComma;": "\u2063",
"InvisibleTimes;": "\u2062",
"Iogon;": "\u012e",
"Iopf;": "\U0001d540",
"Iota;": "\u0399",
"Iscr;": "\u2110",
"Itilde;": "\u0128",
"Iukcy;": "\u0406",
"Iuml": "\xcf",
"Iuml;": "\xcf",
"Jcirc;": "\u0134",
"Jcy;": "\u0419",
"Jfr;": "\U0001d50d",
"Jopf;": "\U0001d541",
"Jscr;": "\U0001d4a5",
"Jsercy;": "\u0408",
"Jukcy;": "\u0404",
"KHcy;": "\u0425",
"KJcy;": "\u040c",
"Kappa;": "\u039a",
"Kcedil;": "\u0136",
"Kcy;": "\u041a",
"Kfr;": "\U0001d50e",
"Kopf;": "\U0001d542",
"Kscr;": "\U0001d4a6",
"LJcy;": "\u0409",
"LT": "<",
"LT;": "<",
"Lacute;": "\u0139",
"Lambda;": "\u039b",
"Lang;": "\u27ea",
"Laplacetrf;": "\u2112",
"Larr;": "\u219e",
"Lcaron;": "\u013d",
"Lcedil;": "\u013b",
"Lcy;": "\u041b",
"LeftAngleBracket;": "\u27e8",
"LeftArrow;": "\u2190",
"LeftArrowBar;": "\u21e4",
"LeftArrowRightArrow;": "\u21c6",
"LeftCeiling;": "\u2308",
"LeftDoubleBracket;": "\u27e6",
"LeftDownTeeVector;": "\u2961",
"LeftDownVector;": "\u21c3",
"LeftDownVectorBar;": "\u2959",
"LeftFloor;": "\u230a",
"LeftRightArrow;": "\u2194",
"LeftRightVector;": "\u294e",
"LeftTee;": "\u22a3",
"LeftTeeArrow;": "\u21a4",
"LeftTeeVector;": "\u295a",
"LeftTriangle;": "\u22b2",
"LeftTriangleBar;": "\u29cf",
"LeftTriangleEqual;": "\u22b4",
"LeftUpDownVector;": "\u2951",
"LeftUpTeeVector;": "\u2960",
"LeftUpVector;": "\u21bf",
"LeftUpVectorBar;": "\u2958",
"LeftVector;": "\u21bc",
"LeftVectorBar;": "\u2952",
"Leftarrow;": "\u21d0",
"Leftrightarrow;": "\u21d4",
"LessEqualGreater;": "\u22da",
"LessFullEqual;": "\u2266",
"LessGreater;": "\u2276",
"LessLess;": "\u2aa1",
"LessSlantEqual;": "\u2a7d",
"LessTilde;": "\u2272",
"Lfr;": "\U0001d50f",
"Ll;": "\u22d8",
"Lleftarrow;": "\u21da",
"Lmidot;": "\u013f",
"LongLeftArrow;": "\u27f5",
"LongLeftRightArrow;": "\u27f7",
"LongRightArrow;": "\u27f6",
"Longleftarrow;": "\u27f8",
"Longleftrightarrow;": "\u27fa",
"Longrightarrow;": "\u27f9",
"Lopf;": "\U0001d543",
"LowerLeftArrow;": "\u2199",
"LowerRightArrow;": "\u2198",
"Lscr;": "\u2112",
"Lsh;": "\u21b0",
"Lstrok;": "\u0141",
"Lt;": "\u226a",
"Map;": "\u2905",
"Mcy;": "\u041c",
"MediumSpace;": "\u205f",
"Mellintrf;": "\u2133",
"Mfr;": "\U0001d510",
"MinusPlus;": "\u2213",
"Mopf;": "\U0001d544",
"Mscr;": "\u2133",
"Mu;": "\u039c",
"NJcy;": "\u040a",
"Nacute;": "\u0143",
"Ncaron;": "\u0147",
"Ncedil;": "\u0145",
"Ncy;": "\u041d",
"NegativeMediumSpace;": "\u200b",
"NegativeThickSpace;": "\u200b",
"NegativeThinSpace;": "\u200b",
"NegativeVeryThinSpace;": "\u200b",
"NestedGreaterGreater;": "\u226b",
"NestedLessLess;": "\u226a",
"NewLine;": "\n",
"Nfr;": "\U0001d511",
"NoBreak;": "\u2060",
"NonBreakingSpace;": "\xa0",
"Nopf;": "\u2115",
"Not;": "\u2aec",
"NotCongruent;": "\u2262",
"NotCupCap;": "\u226d",
"NotDoubleVerticalBar;": "\u2226",
"NotElement;": "\u2209",
"NotEqual;": "\u2260",
"NotEqualTilde;": "\u2242\u0338",
"NotExists;": "\u2204",
"NotGreater;": "\u226f",
"NotGreaterEqual;": "\u2271",
"NotGreaterFullEqual;": "\u2267\u0338",
"NotGreaterGreater;": "\u226b\u0338",
"NotGreaterLess;": "\u2279",
"NotGreaterSlantEqual;": "\u2a7e\u0338",
"NotGreaterTilde;": "\u2275",
"NotHumpDownHump;": "\u224e\u0338",
"NotHumpEqual;": "\u224f\u0338",
"NotLeftTriangle;": "\u22ea",
"NotLeftTriangleBar;": "\u29cf\u0338",
"NotLeftTriangleEqual;": "\u22ec",
"NotLess;": "\u226e",
"NotLessEqual;": "\u2270",
"NotLessGreater;": "\u2278",
"NotLessLess;": "\u226a\u0338",
"NotLessSlantEqual;": "\u2a7d\u0338",
"NotLessTilde;": "\u2274",
"NotNestedGreaterGreater;": "\u2aa2\u0338",
"NotNestedLessLess;": "\u2aa1\u0338",
"NotPrecedes;": "\u2280",
"NotPrecedesEqual;": "\u2aaf\u0338",
"NotPrecedesSlantEqual;": "\u22e0",
"NotReverseElement;": "\u220c",
"NotRightTriangle;": "\u22eb",
"NotRightTriangleBar;": "\u29d0\u0338",
"NotRightTriangleEqual;": "\u22ed",
"NotSquareSubset;": "\u228f\u0338",
"NotSquareSubsetEqual;": "\u22e2",
"NotSquareSuperset;": "\u2290\u0338",
"NotSquareSupersetEqual;": "\u22e3",
"NotSubset;": "\u2282\u20d2",
"NotSubsetEqual;": "\u2288",
"NotSucceeds;": "\u2281",
"NotSucceedsEqual;": "\u2ab0\u0338",
"NotSucceedsSlantEqual;": "\u22e1",
"NotSucceedsTilde;": "\u227f\u0338",
"NotSuperset;": "\u2283\u20d2",
"NotSupersetEqual;": "\u2289",
"NotTilde;": "\u2241",
"NotTildeEqual;": "\u2244",
"NotTildeFullEqual;": "\u2247",
"NotTildeTilde;": "\u2249",
"NotVerticalBar;": "\u2224",
"Nscr;": "\U0001d4a9",
"Ntilde": "\xd1",
"Ntilde;": "\xd1",
"Nu;": "\u039d",
"OElig;": "\u0152",
"Oacute": "\xd3",
"Oacute;": "\xd3",
"Ocirc": "\xd4",
"Ocirc;": "\xd4",
"Ocy;": "\u041e",
"Odblac;": "\u0150",
"Ofr;": "\U0001d512",
"Ograve": "\xd2",
"Ograve;": "\xd2",
"Omacr;": "\u014c",
"Omega;": "\u03a9",
"Omicron;": "\u039f",
"Oopf;": "\U0001d546",
"OpenCurlyDoubleQuote;": "\u201c",
"OpenCurlyQuote;": "\u2018",
"Or;": "\u2a54",
"Oscr;": "\U0001d4aa",
"Oslash": "\xd8",
"Oslash;": "\xd8",
"Otilde": "\xd5",
"Otilde;": "\xd5",
"Otimes;": "\u2a37",
"Ouml": "\xd6",
"Ouml;": "\xd6",
"OverBar;": "\u203e",
"OverBrace;": "\u23de",
"OverBracket;": "\u23b4",
"OverParenthesis;": "\u23dc",
"PartialD;": "\u2202",
"Pcy;": "\u041f",
"Pfr;": "\U0001d513",
"Phi;": "\u03a6",
"Pi;": "\u03a0",
"PlusMinus;": "\xb1",
"Poincareplane;": "\u210c",
"Popf;": "\u2119",
"Pr;": "\u2abb",
"Precedes;": "\u227a",
"PrecedesEqual;": "\u2aaf",
"PrecedesSlantEqual;": "\u227c",
"PrecedesTilde;": "\u227e",
"Prime;": "\u2033",
"Product;": "\u220f",
"Proportion;": "\u2237",
"Proportional;": "\u221d",
"Pscr;": "\U0001d4ab",
"Psi;": "\u03a8",
"QUOT": "\"",
"QUOT;": "\"",
"Qfr;": "\U0001d514",
"Qopf;": "\u211a",
"Qscr;": "\U0001d4ac",
"RBarr;": "\u2910",
"REG": "\xae",
"REG;": "\xae",
"Racute;": "\u0154",
"Rang;": "\u27eb",
"Rarr;": "\u21a0",
"Rarrtl;": "\u2916",
"Rcaron;": "\u0158",
"Rcedil;": "\u0156",
"Rcy;": "\u0420",
"Re;": "\u211c",
"ReverseElement;": "\u220b",
"ReverseEquilibrium;": "\u21cb",
"ReverseUpEquilibrium;": "\u296f",
"Rfr;": "\u211c",
"Rho;": "\u03a1",
"RightAngleBracket;": "\u27e9",
"RightArrow;": "\u2192",
"RightArrowBar;": "\u21e5",
"RightArrowLeftArrow;": "\u21c4",
"RightCeiling;": "\u2309",
"RightDoubleBracket;": "\u27e7",
"RightDownTeeVector;": "\u295d",
"RightDownVector;": "\u21c2",
"RightDownVectorBar;": "\u2955",
"RightFloor;": "\u230b",
"RightTee;": "\u22a2",
"RightTeeArrow;": "\u21a6",
"RightTeeVector;": "\u295b",
"RightTriangle;": "\u22b3",
"RightTriangleBar;": "\u29d0",
"RightTriangleEqual;": "\u22b5",
"RightUpDownVector;": "\u294f",
"RightUpTeeVector;": "\u295c",
"RightUpVector;": "\u21be",
"RightUpVectorBar;": "\u2954",
"RightVector;": "\u21c0",
"RightVectorBar;": "\u2953",
"Rightarrow;": "\u21d2",
"Ropf;": "\u211d",
"RoundImplies;": "\u2970",
"Rrightarrow;": "\u21db",
"Rscr;": "\u211b",
"Rsh;": "\u21b1",
"RuleDelayed;": "\u29f4",
"SHCHcy;": "\u0429",
"SHcy;": "\u0428",
"SOFTcy;": "\u042c",
"Sacute;": "\u015a",
"Sc;": "\u2abc",
"Scaron;": "\u0160",
"Scedil;": "\u015e",
"Scirc;": "\u015c",
"Scy;": "\u0421",
"Sfr;": "\U0001d516",
"ShortDownArrow;": "\u2193",
"ShortLeftArrow;": "\u2190",
"ShortRightArrow;": "\u2192",
"ShortUpArrow;": "\u2191",
"Sigma;": "\u03a3",
"SmallCircle;": "\u2218",
"Sopf;": "\U0001d54a",
"Sqrt;": "\u221a",
"Square;": "\u25a1",
"SquareIntersection;": "\u2293",
"SquareSubset;": "\u228f",
"SquareSubsetEqual;": "\u2291",
"SquareSuperset;": "\u2290",
"SquareSupersetEqual;": "\u2292",
"SquareUnion;": "\u2294",
"Sscr;": "\U0001d4ae",
"Star;": "\u22c6",
"Sub;": "\u22d0",
"Subset;": "\u22d0",
"SubsetEqual;": "\u2286",
"Succeeds;": "\u227b",
"SucceedsEqual;": "\u2ab0",
"SucceedsSlantEqual;": "\u227d",
"SucceedsTilde;": "\u227f",
"SuchThat;": "\u220b",
"Sum;": "\u2211",
"Sup;": "\u22d1",
"Superset;": "\u2283",
"SupersetEqual;": "\u2287",
"Supset;": "\u22d1",
"THORN": "\xde",
"THORN;": "\xde",
"TRADE;": "\u2122",
"TSHcy;": "\u040b",
"TScy;": "\u0426",
"Tab;": "\t",
"Tau;": "\u03a4",
"Tcaron;": "\u0164",
"Tcedil;": "\u0162",
"Tcy;": "\u0422",
"Tfr;": "\U0001d517",
"Therefore;": "\u2234",
"Theta;": "\u0398",
"ThickSpace;": "\u205f\u200a",
"ThinSpace;": "\u2009",
"Tilde;": "\u223c",
"TildeEqual;": "\u2243",
"TildeFullEqual;": "\u2245",
"TildeTilde;": "\u2248",
"Topf;": "\U0001d54b",
"TripleDot;": "\u20db",
"Tscr;": "\U0001d4af",
"Tstrok;": "\u0166",
"Uacute": "\xda",
"Uacute;": "\xda",
"Uarr;": "\u219f",
"Uarrocir;": "\u2949",
"Ubrcy;": "\u040e",
"Ubreve;": "\u016c",
"Ucirc": "\xdb",
"Ucirc;": "\xdb",
"Ucy;": "\u0423",
"Udblac;": "\u0170",
"Ufr;": "\U0001d518",
"Ugrave": "\xd9",
"Ugrave;": "\xd9",
"Umacr;": "\u016a",
"UnderBar;": "_",
"UnderBrace;": "\u23df",
"UnderBracket;": "\u23b5",
"UnderParenthesis;": "\u23dd",
"Union;": "\u22c3",
"UnionPlus;": "\u228e",
"Uogon;": "\u0172",
"Uopf;": "\U0001d54c",
"UpArrow;": "\u2191",
"UpArrowBar;": "\u2912",
"UpArrowDownArrow;": "\u21c5",
"UpDownArrow;": "\u2195",
"UpEquilibrium;": "\u296e",
"UpTee;": "\u22a5",
"UpTeeArrow;": "\u21a5",
"Uparrow;": "\u21d1",
"Updownarrow;": "\u21d5",
"UpperLeftArrow;": "\u2196",
"UpperRightArrow;": "\u2197",
"Upsi;": "\u03d2",
"Upsilon;": "\u03a5",
"Uring;": "\u016e",
"Uscr;": "\U0001d4b0",
"Utilde;": "\u0168",
"Uuml": "\xdc",
"Uuml;": "\xdc",
"VDash;": "\u22ab",
"Vbar;": "\u2aeb",
"Vcy;": "\u0412",
"Vdash;": "\u22a9",
"Vdashl;": "\u2ae6",
"Vee;": "\u22c1",
"Verbar;": "\u2016",
"Vert;": "\u2016",
"VerticalBar;": "\u2223",
"VerticalLine;": "|",
"VerticalSeparator;": "\u2758",
"VerticalTilde;": "\u2240",
"VeryThinSpace;": "\u200a",
"Vfr;": "\U0001d519",
"Vopf;": "\U0001d54d",
"Vscr;": "\U0001d4b1",
"Vvdash;": "\u22aa",
"Wcirc;": "\u0174",
"Wedge;": "\u22c0",
"Wfr;": "\U0001d51a",
"Wopf;": "\U0001d54e",
"Wscr;": "\U0001d4b2",
"Xfr;": "\U0001d51b",
"Xi;": "\u039e",
"Xopf;": "\U0001d54f",
"Xscr;": "\U0001d4b3",
"YAcy;": "\u042f",
"YIcy;": "\u0407",
"YUcy;": "\u042e",
"Yacute": "\xdd",
"Yacute;": "\xdd",
"Ycirc;": "\u0176",
"Ycy;": "\u042b",
"Yfr;": "\U0001d51c",
"Yopf;": "\U0001d550",
"Yscr;": "\U0001d4b4",
"Yuml;": "\u0178",
"ZHcy;": "\u0416",
"Zacute;": "\u0179",
"Zcaron;": "\u017d",
"Zcy;": "\u0417",
"Zdot;": "\u017b",
"ZeroWidthSpace;": "\u200b",
"Zeta;": "\u0396",
"Zfr;": "\u2128",
"Zopf;": "\u2124",
"Zscr;": "\U0001d4b5",
"aacute": "\xe1",
"aacute;": "\xe1",
"abreve;": "\u0103",
"ac;": "\u223e",
"acE;": "\u223e\u0333",
"acd;": "\u223f",
"acirc": "\xe2",
"acirc;": "\xe2",
"acute": "\xb4",
"acute;": "\xb4",
"acy;": "\u0430",
"aelig": "\xe6",
"aelig;": "\xe6",
"af;": "\u2061",
"afr;": "\U0001d51e",
"agrave": "\xe0",
"agrave;": "\xe0",
"alefsym;": "\u2135",
"aleph;": "\u2135",
"alpha;": "\u03b1",
"amacr;": "\u0101",
"amalg;": "\u2a3f",
"amp": "&",
"amp;": "&",
"and;": "\u2227",
"andand;": "\u2a55",
"andd;": "\u2a5c",
"andslope;": "\u2a58",
"andv;": "\u2a5a",
"ang;": "\u2220",
"ange;": "\u29a4",
"angle;": "\u2220",
"angmsd;": "\u2221",
"angmsdaa;": "\u29a8",
"angmsdab;": "\u29a9",
"angmsdac;": "\u29aa",
"angmsdad;": "\u29ab",
"angmsdae;": "\u29ac",
"angmsdaf;": "\u29ad",
"angmsdag;": "\u29ae",
"angmsdah;": "\u29af",
"angrt;": "\u221f",
"angrtvb;": "\u22be",
"angrtvbd;": "\u299d",
"angsph;": "\u2222",
"angst;": "\xc5",
"angzarr;": "\u237c",
"aogon;": "\u0105",
"aopf;": "\U0001d552",
"ap;": "\u2248",
"apE;": "\u2a70",
"apacir;": "\u2a6f",
"ape;": "\u224a",
"apid;": "\u224b",
"apos;": "'",
"approx;": "\u2248",
"approxeq;": "\u224a",
"aring": "\xe5",
"aring;": "\xe5",
"ascr;": "\U0001d4b6",
"ast;": "*",
"asymp;": "\u2248",
"asympeq;": "\u224d",
"atilde": "\xe3",
"atilde;": "\xe3",
"auml": "\xe4",
"auml;": "\xe4",
"awconint;": "\u2233",
"awint;": "\u2a11",
"bNot;": "\u2aed",
"backcong;": "\u224c",
"backepsilon;": "\u03f6",
"backprime;": "\u2035",
"backsim;": "\u223d",
"backsimeq;": "\u22cd",
"barvee;": "\u22bd",
"barwed;": "\u2305",
"barwedge;": "\u2305",
"bbrk;": "\u23b5",
"bbrktbrk;": "\u23b6",
"bcong;": "\u224c",
"bcy;": "\u0431",
"bdquo;": "\u201e",
"becaus;": "\u2235",
"because;": "\u2235",
"bemptyv;": "\u29b0",
"bepsi;": "\u03f6",
"bernou;": "\u212c",
"beta;": "\u03b2",
"beth;": "\u2136",
"between;": "\u226c",
"bfr;": "\U0001d51f",
"bigcap;": "\u22c2",
"bigcirc;": "\u25ef",
"bigcup;": "\u22c3",
"bigodot;": "\u2a00",
"bigoplus;": "\u2a01",
"bigotimes;": "\u2a02",
"bigsqcup;": "\u2a06",
"bigstar;": "\u2605",
"bigtriangledown;": "\u25bd",
"bigtriangleup;": "\u25b3",
"biguplus;": "\u2a04",
"bigvee;": "\u22c1",
"bigwedge;": "\u22c0",
"bkarow;": "\u290d",
"blacklozenge;": "\u29eb",
"blacksquare;": "\u25aa",
"blacktriangle;": "\u25b4",
"blacktriangledown;": "\u25be",
"blacktriangleleft;": "\u25c2",
"blacktriangleright;": "\u25b8",
"blank;": "\u2423",
"blk12;": "\u2592",
"blk14;": "\u2591",
"blk34;": "\u2593",
"block;": "\u2588",
"bne;": "=\u20e5",
"bnequiv;": "\u2261\u20e5",
"bnot;": "\u2310",
"bopf;": "\U0001d553",
"bot;": "\u22a5",
"bottom;": "\u22a5",
"bowtie;": "\u22c8",
"boxDL;": "\u2557",
"boxDR;": "\u2554",
"boxDl;": "\u2556",
"boxDr;": "\u2553",
"boxH;": "\u2550",
"boxHD;": "\u2566",
"boxHU;": "\u2569",
"boxHd;": "\u2564",
"boxHu;": "\u2567",
"boxUL;": "\u255d",
"boxUR;": "\u255a",
"boxUl;": "\u255c",
"boxUr;": "\u2559",
"boxV;": "\u2551",
"boxVH;": "\u256c",
"boxVL;": "\u2563",
"boxVR;": "\u2560",
"boxVh;": "\u256b",
"boxVl;": "\u2562",
"boxVr;": "\u255f",
"boxbox;": "\u29c9",
"boxdL;": "\u2555",
"boxdR;": "\u2552",
"boxdl;": "\u2510",
"boxdr;": "\u250c",
"boxh;": "\u2500",
"boxhD;": "\u2565",
"boxhU;": "\u2568",
"boxhd;": "\u252c",
"boxhu;": "\u2534",
"boxminus;": "\u229f",
"boxplus;": "\u229e",
"boxtimes;": "\u22a0",
"boxuL;": "\u255b",
"boxuR;": "\u2558",
"boxul;": "\u2518",
"boxur;": "\u2514",
"boxv;": "\u2502",
"boxvH;": "\u256a",
"boxvL;": "\u2561",
"boxvR;": "\u255e",
"boxvh;": "\u253c",
"boxvl;": "\u2524",
"boxvr;": "\u251c",
"bprime;": "\u2035",
"breve;": "\u02d8",
"brvbar": "\xa6",
"brvbar;": "\xa6",
"bscr;": "\U0001d4b7",
"bsemi;": "\u204f",
"bsim;": "\u223d",
"bsime;": "\u22cd",
"bsol;": "\\",
"bsolb;": "\u29c5",
"bsolhsub;": "\u27c8",
"bull;": "\u2022",
"bullet;": "\u2022",
"bump;": "\u224e",
"bumpE;": "\u2aae",
"bumpe;": "\u224f",
"bumpeq;": "\u224f",
"cacute;": "\u0107",
"cap;": "\u2229",
"capand;": "\u2a44",
"capbrcup;": "\u2a49",
"capcap;": "\u2a4b",
"capcup;": "\u2a47",
"capdot;": "\u2a40",
"caps;": "\u2229\ufe00",
"caret;": "\u2041",
"caron;": "\u02c7",
"ccaps;": "\u2a4d",
"ccaron;": "\u010d",
"ccedil": "\xe7",
"ccedil;": "\xe7",
"ccirc;": "\u0109",
"ccups;": "\u2a4c",
"ccupssm;": "\u2a50",
"cdot;": "\u010b",
"cedil": "\xb8",
"cedil;": "\xb8",
"cemptyv;": "\u29b2",
"cent": "\xa2",
"cent;": "\xa2",
"centerdot;": "\xb7",
"cfr;": "\U0001d520",
"chcy;": "\u0447",
"check;": "\u2713",
"checkmark;": "\u2713",
"chi;": "\u03c7",
"cir;": "\u25cb",
"cirE;": "\u29c3",
"circ;": "\u02c6",
"circeq;": "\u2257",
"circlearrowleft;": "\u21ba",
"circlearrowright;": "\u21bb",
"circledR;": "\xae",
"circledS;": "\u24c8",
"circledast;": "\u229b",
"circledcirc;": "\u229a",
"circleddash;": "\u229d",
"cire;": "\u2257",
"cirfnint;": "\u2a10",
"cirmid;": "\u2aef",
"cirscir;": "\u29c2",
"clubs;": "\u2663",
"clubsuit;": "\u2663",
"colon;": ":",
"colone;": "\u2254",
"coloneq;": "\u2254",
"comma;": ",",
"commat;": "@",
"comp;": "\u2201",
"compfn;": "\u2218",
"complement;": "\u2201",
"complexes;": "\u2102",
"cong;": "\u2245",
"congdot;": "\u2a6d",
"conint;": "\u222e",
"copf;": "\U0001d554",
"coprod;": "\u2210",
"copy": "\xa9",
"copy;": "\xa9",
"copysr;": "\u2117",
"crarr;": "\u21b5",
"cross;": "\u2717",
"cscr;": "\U0001d4b8",
"csub;": "\u2acf",
"csube;": "\u2ad1",
"csup;": "\u2ad0",
"csupe;": "\u2ad2",
"ctdot;": "\u22ef",
"cudarrl;": "\u2938",
"cudarrr;": "\u2935",
"cuepr;": "\u22de",
"cuesc;": "\u22df",
"cularr;": "\u21b6",
"cularrp;": "\u293d",
"cup;": "\u222a",
"cupbrcap;": "\u2a48",
"cupcap;": "\u2a46",
"cupcup;": "\u2a4a",
"cupdot;": "\u228d",
"cupor;": "\u2a45",
"cups;": "\u222a\ufe00",
"curarr;": "\u21b7",
"curarrm;": "\u293c",
"curlyeqprec;": "\u22de",
"curlyeqsucc;": "\u22df",
"curlyvee;": "\u22ce",
"curlywedge;": "\u22cf",
"curren": "\xa4",
"curren;": "\xa4",
"curvearrowleft;": "\u21b6",
"curvearrowright;": "\u21b7",
"cuvee;": "\u22ce",
"cuwed;": "\u22cf",
"cwconint;": "\u2232",
"cwint;": "\u2231",
"cylcty;": "\u232d",
"dArr;": "\u21d3",
"dHar;": "\u2965",
"dagger;": "\u2020",
"daleth;": "\u2138",
"darr;": "\u2193",
"dash;": "\u2010",
"dashv;": "\u22a3",
"dbkarow;": "\u290f",
"dblac;": "\u02dd",
"dcaron;": "\u010f",
"dcy;": "\u0434",
"dd;": "\u2146",
"ddagger;": "\u2021",
"ddarr;": "\u21ca",
"ddotseq;": "\u2a77",
"deg": "\xb0",
"deg;": "\xb0",
"delta;": "\u03b4",
"demptyv;": "\u29b1",
"dfisht;": "\u297f",
"dfr;": "\U0001d521",
"dharl;": "\u21c3",
"dharr;": "\u21c2",
"diam;": "\u22c4",
"diamond;": "\u22c4",
"diamondsuit;": "\u2666",
"diams;": "\u2666",
"die;": "\xa8",
"digamma;": "\u03dd",
"disin;": "\u22f2",
"div;": "\xf7",
"divide": "\xf7",
"divide;": "\xf7",
"divideontimes;": "\u22c7",
"divonx;": "\u22c7",
"djcy;": "\u0452",
"dlcorn;": "\u231e",
"dlcrop;": "\u230d",
"dollar;": "$",
"dopf;": "\U0001d555",
"dot;": "\u02d9",
"doteq;": "\u2250",
"doteqdot;": "\u2251",
"dotminus;": "\u2238",
"dotplus;": "\u2214",
"dotsquare;": "\u22a1",
"doublebarwedge;": "\u2306",
"downarrow;": "\u2193",
"downdownarrows;": "\u21ca",
"downharpoonleft;": "\u21c3",
"downharpoonright;": "\u21c2",
"drbkarow;": "\u2910",
"drcorn;": "\u231f",
"drcrop;": "\u230c",
"dscr;": "\U0001d4b9",
"dscy;": "\u0455",
"dsol;": "\u29f6",
"dstrok;": "\u0111",
"dtdot;": "\u22f1",
"dtri;": "\u25bf",
"dtrif;": "\u25be",
"duarr;": "\u21f5",
"duhar;": "\u296f",
"dwangle;": "\u29a6",
"dzcy;": "\u045f",
"dzigrarr;": "\u27ff",
"eDDot;": "\u2a77",
"eDot;": "\u2251",
"eacute": "\xe9",
"eacute;": "\xe9",
"easter;": "\u2a6e",
"ecaron;": "\u011b",
"ecir;": "\u2256",
"ecirc": "\xea",
"ecirc;": "\xea",
"ecolon;": "\u2255",
"ecy;": "\u044d",
"edot;": "\u0117",
"ee;": "\u2147",
"efDot;": "\u2252",
"efr;": "\U0001d522",
"eg;": "\u2a9a",
"egrave": "\xe8",
"egrave;": "\xe8",
"egs;": "\u2a96",
"egsdot;": "\u2a98",
"el;": "\u2a99",
"elinters;": "\u23e7",
"ell;": "\u2113",
"els;": "\u2a95",
"elsdot;": "\u2a97",
"emacr;": "\u0113",
"empty;": "\u2205",
"emptyset;": "\u2205",
"emptyv;": "\u2205",
"emsp13;": "\u2004",
"emsp14;": "\u2005",
"emsp;": "\u2003",
"eng;": "\u014b",
"ensp;": "\u2002",
"eogon;": "\u0119",
"eopf;": "\U0001d556",
"epar;": "\u22d5",
"eparsl;": "\u29e3",
"eplus;": "\u2a71",
"epsi;": "\u03b5",
"epsilon;": "\u03b5",
"epsiv;": "\u03f5",
"eqcirc;": "\u2256",
"eqcolon;": "\u2255",
"eqsim;": "\u2242",
"eqslantgtr;": "\u2a96",
"eqslantless;": "\u2a95",
"equals;": "=",
"equest;": "\u225f",
"equiv;": "\u2261",
"equivDD;": "\u2a78",
"eqvparsl;": "\u29e5",
"erDot;": "\u2253",
"erarr;": "\u2971",
"escr;": "\u212f",
"esdot;": "\u2250",
"esim;": "\u2242",
"eta;": "\u03b7",
"eth": "\xf0",
"eth;": "\xf0",
"euml": "\xeb",
"euml;": "\xeb",
"euro;": "\u20ac",
"excl;": "!",
"exist;": "\u2203",
"expectation;": "\u2130",
"exponentiale;": "\u2147",
"fallingdotseq;": "\u2252",
"fcy;": "\u0444",
"female;": "\u2640",
"ffilig;": "\ufb03",
"fflig;": "\ufb00",
"ffllig;": "\ufb04",
"ffr;": "\U0001d523",
"filig;": "\ufb01",
"fjlig;": "fj",
"flat;": "\u266d",
"fllig;": "\ufb02",
"fltns;": "\u25b1",
"fnof;": "\u0192",
"fopf;": "\U0001d557",
"forall;": "\u2200",
"fork;": "\u22d4",
"forkv;": "\u2ad9",
"fpartint;": "\u2a0d",
"frac12": "\xbd",
"frac12;": "\xbd",
"frac13;": "\u2153",
"frac14": "\xbc",
"frac14;": "\xbc",
"frac15;": "\u2155",
"frac16;": "\u2159",
"frac18;": "\u215b",
"frac23;": "\u2154",
"frac25;": "\u2156",
"frac34": "\xbe",
"frac34;": "\xbe",
"frac35;": "\u2157",
"frac38;": "\u215c",
"frac45;": "\u2158",
"frac56;": "\u215a",
"frac58;": "\u215d",
"frac78;": "\u215e",
"frasl;": "\u2044",
"frown;": "\u2322",
"fscr;": "\U0001d4bb",
"gE;": "\u2267",
"gEl;": "\u2a8c",
"gacute;": "\u01f5",
"gamma;": "\u03b3",
"gammad;": "\u03dd",
"gap;": "\u2a86",
"gbreve;": "\u011f",
"gcirc;": "\u011d",
"gcy;": "\u0433",
"gdot;": "\u0121",
"ge;": "\u2265",
"gel;": "\u22db",
"geq;": "\u2265",
"geqq;": "\u2267",
"geqslant;": "\u2a7e",
"ges;": "\u2a7e",
"gescc;": "\u2aa9",
"gesdot;": "\u2a80",
"gesdoto;": "\u2a82",
"gesdotol;": "\u2a84",
"gesl;": "\u22db\ufe00",
"gesles;": "\u2a94",
"gfr;": "\U0001d524",
"gg;": "\u226b",
"ggg;": "\u22d9",
"gimel;": "\u2137",
"gjcy;": "\u0453",
"gl;": "\u2277",
"glE;": "\u2a92",
"gla;": "\u2aa5",
"glj;": "\u2aa4",
"gnE;": "\u2269",
"gnap;": "\u2a8a",
"gnapprox;": "\u2a8a",
"gne;": "\u2a88",
"gneq;": "\u2a88",
"gneqq;": "\u2269",
"gnsim;": "\u22e7",
"gopf;": "\U0001d558",
"grave;": "`",
"gscr;": "\u210a",
"gsim;": "\u2273",
"gsime;": "\u2a8e",
"gsiml;": "\u2a90",
"gt": ">",
"gt;": ">",
"gtcc;": "\u2aa7",
"gtcir;": "\u2a7a",
"gtdot;": "\u22d7",
"gtlPar;": "\u2995",
"gtquest;": "\u2a7c",
"gtrapprox;": "\u2a86",
"gtrarr;": "\u2978",
"gtrdot;": "\u22d7",
"gtreqless;": "\u22db",
"gtreqqless;": "\u2a8c",
"gtrless;": "\u2277",
"gtrsim;": "\u2273",
"gvertneqq;": "\u2269\ufe00",
"gvnE;": "\u2269\ufe00",
"hArr;": "\u21d4",
"hairsp;": "\u200a",
"half;": "\xbd",
"hamilt;": "\u210b",
"hardcy;": "\u044a",
"harr;": "\u2194",
"harrcir;": "\u2948",
"harrw;": "\u21ad",
"hbar;": "\u210f",
"hcirc;": "\u0125",
"hearts;": "\u2665",
"heartsuit;": "\u2665",
"hellip;": "\u2026",
"hercon;": "\u22b9",
"hfr;": "\U0001d525",
"hksearow;": "\u2925",
"hkswarow;": "\u2926",
"hoarr;": "\u21ff",
"homtht;": "\u223b",
"hookleftarrow;": "\u21a9",
"hookrightarrow;": "\u21aa",
"hopf;": "\U0001d559",
"horbar;": "\u2015",
"hscr;": "\U0001d4bd",
"hslash;": "\u210f",
"hstrok;": "\u0127",
"hybull;": "\u2043",
"hyphen;": "\u2010",
"iacute": "\xed",
"iacute;": "\xed",
"ic;": "\u2063",
"icirc": "\xee",
"icirc;": "\xee",
"icy;": "\u0438",
"iecy;": "\u0435",
"iexcl": "\xa1",
"iexcl;": "\xa1",
"iff;": "\u21d4",
"ifr;": "\U0001d526",
"igrave": "\xec",
"igrave;": "\xec",
"ii;": "\u2148",
"iiiint;": "\u2a0c",
"iiint;": "\u222d",
"iinfin;": "\u29dc",
"iiota;": "\u2129",
"ijlig;": "\u0133",
"imacr;": "\u012b",
"image;": "\u2111",
"imagline;": "\u2110",
"imagpart;": "\u2111",
"imath;": "\u0131",
"imof;": "\u22b7",
"imped;": "\u01b5",
"in;": "\u2208",
"incare;": "\u2105",
"infin;": "\u221e",
"infintie;": "\u29dd",
"inodot;": "\u0131",
"int;": "\u222b",
"intcal;": "\u22ba",
"integers;": "\u2124",
"intercal;": "\u22ba",
"intlarhk;": "\u2a17",
"intprod;": "\u2a3c",
"iocy;": "\u0451",
"iogon;": "\u012f",
"iopf;": "\U0001d55a",
"iota;": "\u03b9",
"iprod;": "\u2a3c",
"iquest": "\xbf",
"iquest;": "\xbf",
"iscr;": "\U0001d4be",
"isin;": "\u2208",
"isinE;": "\u22f9",
"isindot;": "\u22f5",
"isins;": "\u22f4",
"isinsv;": "\u22f3",
"isinv;": "\u2208",
"it;": "\u2062",
"itilde;": "\u0129",
"iukcy;": "\u0456",
"iuml": "\xef",
"iuml;": "\xef",
"jcirc;": "\u0135",
"jcy;": "\u0439",
"jfr;": "\U0001d527",
"jmath;": "\u0237",
"jopf;": "\U0001d55b",
"jscr;": "\U0001d4bf",
"jsercy;": "\u0458",
"jukcy;": "\u0454",
"kappa;": "\u03ba",
"kappav;": "\u03f0",
"kcedil;": "\u0137",
"kcy;": "\u043a",
"kfr;": "\U0001d528",
"kgreen;": "\u0138",
"khcy;": "\u0445",
"kjcy;": "\u045c",
"kopf;": "\U0001d55c",
"kscr;": "\U0001d4c0",
"lAarr;": "\u21da",
"lArr;": "\u21d0",
"lAtail;": "\u291b",
"lBarr;": "\u290e",
"lE;": "\u2266",
"lEg;": "\u2a8b",
"lHar;": "\u2962",
"lacute;": "\u013a",
"laemptyv;": "\u29b4",
"lagran;": "\u2112",
"lambda;": "\u03bb",
"lang;": "\u27e8",
"langd;": "\u2991",
"langle;": "\u27e8",
"lap;": "\u2a85",
"laquo": "\xab",
"laquo;": "\xab",
"larr;": "\u2190",
"larrb;": "\u21e4",
"larrbfs;": "\u291f",
"larrfs;": "\u291d",
"larrhk;": "\u21a9",
"larrlp;": "\u21ab",
"larrpl;": "\u2939",
"larrsim;": "\u2973",
"larrtl;": "\u21a2",
"lat;": "\u2aab",
"latail;": "\u2919",
"late;": "\u2aad",
"lates;": "\u2aad\ufe00",
"lbarr;": "\u290c",
"lbbrk;": "\u2772",
"lbrace;": "{",
"lbrack;": "[",
"lbrke;": "\u298b",
"lbrksld;": "\u298f",
"lbrkslu;": "\u298d",
"lcaron;": "\u013e",
"lcedil;": "\u013c",
"lceil;": "\u2308",
"lcub;": "{",
"lcy;": "\u043b",
"ldca;": "\u2936",
"ldquo;": "\u201c",
"ldquor;": "\u201e",
"ldrdhar;": "\u2967",
"ldrushar;": "\u294b",
"ldsh;": "\u21b2",
"le;": "\u2264",
"leftarrow;": "\u2190",
"leftarrowtail;": "\u21a2",
"leftharpoondown;": "\u21bd",
"leftharpoonup;": "\u21bc",
"leftleftarrows;": "\u21c7",
"leftrightarrow;": "\u2194",
"leftrightarrows;": "\u21c6",
"leftrightharpoons;": "\u21cb",
"leftrightsquigarrow;": "\u21ad",
"leftthreetimes;": "\u22cb",
"leg;": "\u22da",
"leq;": "\u2264",
"leqq;": "\u2266",
"leqslant;": "\u2a7d",
"les;": "\u2a7d",
"lescc;": "\u2aa8",
"lesdot;": "\u2a7f",
"lesdoto;": "\u2a81",
"lesdotor;": "\u2a83",
"lesg;": "\u22da\ufe00",
"lesges;": "\u2a93",
"lessapprox;": "\u2a85",
"lessdot;": "\u22d6",
"lesseqgtr;": "\u22da",
"lesseqqgtr;": "\u2a8b",
"lessgtr;": "\u2276",
"lesssim;": "\u2272",
"lfisht;": "\u297c",
"lfloor;": "\u230a",
"lfr;": "\U0001d529",
"lg;": "\u2276",
"lgE;": "\u2a91",
"lhard;": "\u21bd",
"lharu;": "\u21bc",
"lharul;": "\u296a",
"lhblk;": "\u2584",
"ljcy;": "\u0459",
"ll;": "\u226a",
"llarr;": "\u21c7",
"llcorner;": "\u231e",
"llhard;": "\u296b",
"lltri;": "\u25fa",
"lmidot;": "\u0140",
"lmoust;": "\u23b0",
"lmoustache;": "\u23b0",
"lnE;": "\u2268",
"lnap;": "\u2a89",
"lnapprox;": "\u2a89",
"lne;": "\u2a87",
"lneq;": "\u2a87",
"lneqq;": "\u2268",
"lnsim;": "\u22e6",
"loang;": "\u27ec",
"loarr;": "\u21fd",
"lobrk;": "\u27e6",
"longleftarrow;": "\u27f5",
"longleftrightarrow;": "\u27f7",
"longmapsto;": "\u27fc",
"longrightarrow;": "\u27f6",
"looparrowleft;": "\u21ab",
"looparrowright;": "\u21ac",
"lopar;": "\u2985",
"lopf;": "\U0001d55d",
"loplus;": "\u2a2d",
"lotimes;": "\u2a34",
"lowast;": "\u2217",
"lowbar;": "_",
"loz;": "\u25ca",
"lozenge;": "\u25ca",
"lozf;": "\u29eb",
"lpar;": "(",
"lparlt;": "\u2993",
"lrarr;": "\u21c6",
"lrcorner;": "\u231f",
"lrhar;": "\u21cb",
"lrhard;": "\u296d",
"lrm;": "\u200e",
"lrtri;": "\u22bf",
"lsaquo;": "\u2039",
"lscr;": "\U0001d4c1",
"lsh;": "\u21b0",
"lsim;": "\u2272",
"lsime;": "\u2a8d",
"lsimg;": "\u2a8f",
"lsqb;": "[",
"lsquo;": "\u2018",
"lsquor;": "\u201a",
"lstrok;": "\u0142",
"lt": "<",
"lt;": "<",
"ltcc;": "\u2aa6",
"ltcir;": "\u2a79",
"ltdot;": "\u22d6",
"lthree;": "\u22cb",
"ltimes;": "\u22c9",
"ltlarr;": "\u2976",
"ltquest;": "\u2a7b",
"ltrPar;": "\u2996",
"ltri;": "\u25c3",
"ltrie;": "\u22b4",
"ltrif;": "\u25c2",
"lurdshar;": "\u294a",
"luruhar;": "\u2966",
"lvertneqq;": "\u2268\ufe00",
"lvnE;": "\u2268\ufe00",
"mDDot;": "\u223a",
"macr": "\xaf",
"macr;": "\xaf",
"male;": "\u2642",
"malt;": "\u2720",
"maltese;": "\u2720",
"map;": "\u21a6",
"mapsto;": "\u21a6",
"mapstodown;": "\u21a7",
"mapstoleft;": "\u21a4",
"mapstoup;": "\u21a5",
"marker;": "\u25ae",
"mcomma;": "\u2a29",
"mcy;": "\u043c",
"mdash;": "\u2014",
"measuredangle;": "\u2221",
"mfr;": "\U0001d52a",
"mho;": "\u2127",
"micro": "\xb5",
"micro;": "\xb5",
"mid;": "\u2223",
"midast;": "*",
"midcir;": "\u2af0",
"middot": "\xb7",
"middot;": "\xb7",
"minus;": "\u2212",
"minusb;": "\u229f",
"minusd;": "\u2238",
"minusdu;": "\u2a2a",
"mlcp;": "\u2adb",
"mldr;": "\u2026",
"mnplus;": "\u2213",
"models;": "\u22a7",
"mopf;": "\U0001d55e",
"mp;": "\u2213",
"mscr;": "\U0001d4c2",
"mstpos;": "\u223e",
"mu;": "\u03bc",
"multimap;": "\u22b8",
"mumap;": "\u22b8",
"nGg;": "\u22d9\u0338",
"nGt;": "\u226b\u20d2",
"nGtv;": "\u226b\u0338",
"nLeftarrow;": "\u21cd",
"nLeftrightarrow;": "\u21ce",
"nLl;": "\u22d8\u0338",
"nLt;": "\u226a\u20d2",
"nLtv;": "\u226a\u0338",
"nRightarrow;": "\u21cf",
"nVDash;": "\u22af",
"nVdash;": "\u22ae",
"nabla;": "\u2207",
"nacute;": "\u0144",
"nang;": "\u2220\u20d2",
"nap;": "\u2249",
"napE;": "\u2a70\u0338",
"napid;": "\u224b\u0338",
"napos;": "\u0149",
"napprox;": "\u2249",
"natur;": "\u266e",
"natural;": "\u266e",
"naturals;": "\u2115",
"nbsp": "\xa0",
"nbsp;": "\xa0",
"nbump;": "\u224e\u0338",
"nbumpe;": "\u224f\u0338",
"ncap;": "\u2a43",
"ncaron;": "\u0148",
"ncedil;": "\u0146",
"ncong;": "\u2247",
"ncongdot;": "\u2a6d\u0338",
"ncup;": "\u2a42",
"ncy;": "\u043d",
"ndash;": "\u2013",
"ne;": "\u2260",
"neArr;": "\u21d7",
"nearhk;": "\u2924",
"nearr;": "\u2197",
"nearrow;": "\u2197",
"nedot;": "\u2250\u0338",
"nequiv;": "\u2262",
"nesear;": "\u2928",
"nesim;": "\u2242\u0338",
"nexist;": "\u2204",
"nexists;": "\u2204",
"nfr;": "\U0001d52b",
"ngE;": "\u2267\u0338",
"nge;": "\u2271",
"ngeq;": "\u2271",
"ngeqq;": "\u2267\u0338",
"ngeqslant;": "\u2a7e\u0338",
"nges;": "\u2a7e\u0338",
"ngsim;": "\u2275",
"ngt;": "\u226f",
"ngtr;": "\u226f",
"nhArr;": "\u21ce",
"nharr;": "\u21ae",
"nhpar;": "\u2af2",
"ni;": "\u220b",
"nis;": "\u22fc",
"nisd;": "\u22fa",
"niv;": "\u220b",
"njcy;": "\u045a",
"nlArr;": "\u21cd",
"nlE;": "\u2266\u0338",
"nlarr;": "\u219a",
"nldr;": "\u2025",
"nle;": "\u2270",
"nleftarrow;": "\u219a",
"nleftrightarrow;": "\u21ae",
"nleq;": "\u2270",
"nleqq;": "\u2266\u0338",
"nleqslant;": "\u2a7d\u0338",
"nles;": "\u2a7d\u0338",
"nless;": "\u226e",
"nlsim;": "\u2274",
"nlt;": "\u226e",
"nltri;": "\u22ea",
"nltrie;": "\u22ec",
"nmid;": "\u2224",
"nopf;": "\U0001d55f",
"not": "\xac",
"not;": "\xac",
"notin;": "\u2209",
"notinE;": "\u22f9\u0338",
"notindot;": "\u22f5\u0338",
"notinva;": "\u2209",
"notinvb;": "\u22f7",
"notinvc;": "\u22f6",
"notni;": "\u220c",
"notniva;": "\u220c",
"notnivb;": "\u22fe",
"notnivc;": "\u22fd",
"npar;": "\u2226",
"nparallel;": "\u2226",
"nparsl;": "\u2afd\u20e5",
"npart;": "\u2202\u0338",
"npolint;": "\u2a14",
"npr;": "\u2280",
"nprcue;": "\u22e0",
"npre;": "\u2aaf\u0338",
"nprec;": "\u2280",
"npreceq;": "\u2aaf\u0338",
"nrArr;": "\u21cf",
"nrarr;": "\u219b",
"nrarrc;": "\u2933\u0338",
"nrarrw;": "\u219d\u0338",
"nrightarrow;": "\u219b",
"nrtri;": "\u22eb",
"nrtrie;": "\u22ed",
"nsc;": "\u2281",
"nsccue;": "\u22e1",
"nsce;": "\u2ab0\u0338",
"nscr;": "\U0001d4c3",
"nshortmid;": "\u2224",
"nshortparallel;": "\u2226",
"nsim;": "\u2241",
"nsime;": "\u2244",
"nsimeq;": "\u2244",
"nsmid;": "\u2224",
"nspar;": "\u2226",
"nsqsube;": "\u22e2",
"nsqsupe;": "\u22e3",
"nsub;": "\u2284",
"nsubE;": "\u2ac5\u0338",
"nsube;": "\u2288",
"nsubset;": "\u2282\u20d2",
"nsubseteq;": "\u2288",
"nsubseteqq;": "\u2ac5\u0338",
"nsucc;": "\u2281",
"nsucceq;": "\u2ab0\u0338",
"nsup;": "\u2285",
"nsupE;": "\u2ac6\u0338",
"nsupe;": "\u2289",
"nsupset;": "\u2283\u20d2",
"nsupseteq;": "\u2289",
"nsupseteqq;": "\u2ac6\u0338",
"ntgl;": "\u2279",
"ntilde": "\xf1",
"ntilde;": "\xf1",
"ntlg;": "\u2278",
"ntriangleleft;": "\u22ea",
"ntrianglelefteq;": "\u22ec",
"ntriangleright;": "\u22eb",
"ntrianglerighteq;": "\u22ed",
"nu;": "\u03bd",
"num;": "#",
"numero;": "\u2116",
"numsp;": "\u2007",
"nvDash;": "\u22ad",
"nvHarr;": "\u2904",
"nvap;": "\u224d\u20d2",
"nvdash;": "\u22ac",
"nvge;": "\u2265\u20d2",
"nvgt;": ">\u20d2",
"nvinfin;": "\u29de",
"nvlArr;": "\u2902",
"nvle;": "\u2264\u20d2",
"nvlt;": "<\u20d2",
"nvltrie;": "\u22b4\u20d2",
"nvrArr;": "\u2903",
"nvrtrie;": "\u22b5\u20d2",
"nvsim;": "\u223c\u20d2",
"nwArr;": "\u21d6",
"nwarhk;": "\u2923",
"nwarr;": "\u2196",
"nwarrow;": "\u2196",
"nwnear;": "\u2927",
"oS;": "\u24c8",
"oacute": "\xf3",
"oacute;": "\xf3",
"oast;": "\u229b",
"ocir;": "\u229a",
"ocirc": "\xf4",
"ocirc;": "\xf4",
"ocy;": "\u043e",
"odash;": "\u229d",
"odblac;": "\u0151",
"odiv;": "\u2a38",
"odot;": "\u2299",
"odsold;": "\u29bc",
"oelig;": "\u0153",
"ofcir;": "\u29bf",
"ofr;": "\U0001d52c",
"ogon;": "\u02db",
"ograve": "\xf2",
"ograve;": "\xf2",
"ogt;": "\u29c1",
"ohbar;": "\u29b5",
"ohm;": "\u03a9",
"oint;": "\u222e",
"olarr;": "\u21ba",
"olcir;": "\u29be",
"olcross;": "\u29bb",
"oline;": "\u203e",
"olt;": "\u29c0",
"omacr;": "\u014d",
"omega;": "\u03c9",
"omicron;": "\u03bf",
"omid;": "\u29b6",
"ominus;": "\u2296",
"oopf;": "\U0001d560",
"opar;": "\u29b7",
"operp;": "\u29b9",
"oplus;": "\u2295",
"or;": "\u2228",
"orarr;": "\u21bb",
"ord;": "\u2a5d",
"order;": "\u2134",
"orderof;": "\u2134",
"ordf": "\xaa",
"ordf;": "\xaa",
"ordm": "\xba",
"ordm;": "\xba",
"origof;": "\u22b6",
"oror;": "\u2a56",
"orslope;": "\u2a57",
"orv;": "\u2a5b",
"oscr;": "\u2134",
"oslash": "\xf8",
"oslash;": "\xf8",
"osol;": "\u2298",
"otilde": "\xf5",
"otilde;": "\xf5",
"otimes;": "\u2297",
"otimesas;": "\u2a36",
"ouml": "\xf6",
"ouml;": "\xf6",
"ovbar;": "\u233d",
"par;": "\u2225",
"para": "\xb6",
"para;": "\xb6",
"parallel;": "\u2225",
"parsim;": "\u2af3",
"parsl;": "\u2afd",
"part;": "\u2202",
"pcy;": "\u043f",
"percnt;": "%",
"period;": ".",
"permil;": "\u2030",
"perp;": "\u22a5",
"pertenk;": "\u2031",
"pfr;": "\U0001d52d",
"phi;": "\u03c6",
"phiv;": "\u03d5",
"phmmat;": "\u2133",
"phone;": "\u260e",
"pi;": "\u03c0",
"pitchfork;": "\u22d4",
"piv;": "\u03d6",
"planck;": "\u210f",
"planckh;": "\u210e",
"plankv;": "\u210f",
"plus;": "+",
"plusacir;": "\u2a23",
"plusb;": "\u229e",
"pluscir;": "\u2a22",
"plusdo;": "\u2214",
"plusdu;": "\u2a25",
"pluse;": "\u2a72",
"plusmn": "\xb1",
"plusmn;": "\xb1",
"plussim;": "\u2a26",
"plustwo;": "\u2a27",
"pm;": "\xb1",
"pointint;": "\u2a15",
"popf;": "\U0001d561",
"pound": "\xa3",
"pound;": "\xa3",
"pr;": "\u227a",
"prE;": "\u2ab3",
"prap;": "\u2ab7",
"prcue;": "\u227c",
"pre;": "\u2aaf",
"prec;": "\u227a",
"precapprox;": "\u2ab7",
"preccurlyeq;": "\u227c",
"preceq;": "\u2aaf",
"precnapprox;": "\u2ab9",
"precneqq;": "\u2ab5",
"precnsim;": "\u22e8",
"precsim;": "\u227e",
"prime;": "\u2032",
"primes;": "\u2119",
"prnE;": "\u2ab5",
"prnap;": "\u2ab9",
"prnsim;": "\u22e8",
"prod;": "\u220f",
"profalar;": "\u232e",
"profline;": "\u2312",
"profsurf;": "\u2313",
"prop;": "\u221d",
"propto;": "\u221d",
"prsim;": "\u227e",
"prurel;": "\u22b0",
"pscr;": "\U0001d4c5",
"psi;": "\u03c8",
"puncsp;": "\u2008",
"qfr;": "\U0001d52e",
"qint;": "\u2a0c",
"qopf;": "\U0001d562",
"qprime;": "\u2057",
"qscr;": "\U0001d4c6",
"quaternions;": "\u210d",
"quatint;": "\u2a16",
"quest;": "?",
"questeq;": "\u225f",
"quot": "\"",
"quot;": "\"",
"rAarr;": "\u21db",
"rArr;": "\u21d2",
"rAtail;": "\u291c",
"rBarr;": "\u290f",
"rHar;": "\u2964",
"race;": "\u223d\u0331",
"racute;": "\u0155",
"radic;": "\u221a",
"raemptyv;": "\u29b3",
"rang;": "\u27e9",
"rangd;": "\u2992",
"range;": "\u29a5",
"rangle;": "\u27e9",
"raquo": "\xbb",
"raquo;": "\xbb",
"rarr;": "\u2192",
"rarrap;": "\u2975",
"rarrb;": "\u21e5",
"rarrbfs;": "\u2920",
"rarrc;": "\u2933",
"rarrfs;": "\u291e",
"rarrhk;": "\u21aa",
"rarrlp;": "\u21ac",
"rarrpl;": "\u2945",
"rarrsim;": "\u2974",
"rarrtl;": "\u21a3",
"rarrw;": "\u219d",
"ratail;": "\u291a",
"ratio;": "\u2236",
"rationals;": "\u211a",
"rbarr;": "\u290d",
"rbbrk;": "\u2773",
"rbrace;": "}",
"rbrack;": "]",
"rbrke;": "\u298c",
"rbrksld;": "\u298e",
"rbrkslu;": "\u2990",
"rcaron;": "\u0159",
"rcedil;": "\u0157",
"rceil;": "\u2309",
"rcub;": "}",
"rcy;": "\u0440",
"rdca;": "\u2937",
"rdldhar;": "\u2969",
"rdquo;": "\u201d",
"rdquor;": "\u201d",
"rdsh;": "\u21b3",
"real;": "\u211c",
"realine;": "\u211b",
"realpart;": "\u211c",
"reals;": "\u211d",
"rect;": "\u25ad",
"reg": "\xae",
"reg;": "\xae",
"rfisht;": "\u297d",
"rfloor;": "\u230b",
"rfr;": "\U0001d52f",
"rhard;": "\u21c1",
"rharu;": "\u21c0",
"rharul;": "\u296c",
"rho;": "\u03c1",
"rhov;": "\u03f1",
"rightarrow;": "\u2192",
"rightarrowtail;": "\u21a3",
"rightharpoondown;": "\u21c1",
"rightharpoonup;": "\u21c0",
"rightleftarrows;": "\u21c4",
"rightleftharpoons;": "\u21cc",
"rightrightarrows;": "\u21c9",
"rightsquigarrow;": "\u219d",
"rightthreetimes;": "\u22cc",
"ring;": "\u02da",
"risingdotseq;": "\u2253",
"rlarr;": "\u21c4",
"rlhar;": "\u21cc",
"rlm;": "\u200f",
"rmoust;": "\u23b1",
"rmoustache;": "\u23b1",
"rnmid;": "\u2aee",
"roang;": "\u27ed",
"roarr;": "\u21fe",
"robrk;": "\u27e7",
"ropar;": "\u2986",
"ropf;": "\U0001d563",
"roplus;": "\u2a2e",
"rotimes;": "\u2a35",
"rpar;": ")",
"rpargt;": "\u2994",
"rppolint;": "\u2a12",
"rrarr;": "\u21c9",
"rsaquo;": "\u203a",
"rscr;": "\U0001d4c7",
"rsh;": "\u21b1",
"rsqb;": "]",
"rsquo;": "\u2019",
"rsquor;": "\u2019",
"rthree;": "\u22cc",
"rtimes;": "\u22ca",
"rtri;": "\u25b9",
"rtrie;": "\u22b5",
"rtrif;": "\u25b8",
"rtriltri;": "\u29ce",
"ruluhar;": "\u2968",
"rx;": "\u211e",
"sacute;": "\u015b",
"sbquo;": "\u201a",
"sc;": "\u227b",
"scE;": "\u2ab4",
"scap;": "\u2ab8",
"scaron;": "\u0161",
"sccue;": "\u227d",
"sce;": "\u2ab0",
"scedil;": "\u015f",
"scirc;": "\u015d",
"scnE;": "\u2ab6",
"scnap;": "\u2aba",
"scnsim;": "\u22e9",
"scpolint;": "\u2a13",
"scsim;": "\u227f",
"scy;": "\u0441",
"sdot;": "\u22c5",
"sdotb;": "\u22a1",
"sdote;": "\u2a66",
"seArr;": "\u21d8",
"searhk;": "\u2925",
"searr;": "\u2198",
"searrow;": "\u2198",
"sect": "\xa7",
"sect;": "\xa7",
"semi;": ";",
"seswar;": "\u2929",
"setminus;": "\u2216",
"setmn;": "\u2216",
"sext;": "\u2736",
"sfr;": "\U0001d530",
"sfrown;": "\u2322",
"sharp;": "\u266f",
"shchcy;": "\u0449",
"shcy;": "\u0448",
"shortmid;": "\u2223",
"shortparallel;": "\u2225",
"shy": "\xad",
"shy;": "\xad",
"sigma;": "\u03c3",
"sigmaf;": "\u03c2",
"sigmav;": "\u03c2",
"sim;": "\u223c",
"simdot;": "\u2a6a",
"sime;": "\u2243",
"simeq;": "\u2243",
"simg;": "\u2a9e",
"simgE;": "\u2aa0",
"siml;": "\u2a9d",
"simlE;": "\u2a9f",
"simne;": "\u2246",
"simplus;": "\u2a24",
"simrarr;": "\u2972",
"slarr;": "\u2190",
"smallsetminus;": "\u2216",
"smashp;": "\u2a33",
"smeparsl;": "\u29e4",
"smid;": "\u2223",
"smile;": "\u2323",
"smt;": "\u2aaa",
"smte;": "\u2aac",
"smtes;": "\u2aac\ufe00",
"softcy;": "\u044c",
"sol;": "/",
"solb;": "\u29c4",
"solbar;": "\u233f",
"sopf;": "\U0001d564",
"spades;": "\u2660",
"spadesuit;": "\u2660",
"spar;": "\u2225",
"sqcap;": "\u2293",
"sqcaps;": "\u2293\ufe00",
"sqcup;": "\u2294",
"sqcups;": "\u2294\ufe00",
"sqsub;": "\u228f",
"sqsube;": "\u2291",
"sqsubset;": "\u228f",
"sqsubseteq;": "\u2291",
"sqsup;": "\u2290",
"sqsupe;": "\u2292",
"sqsupset;": "\u2290",
"sqsupseteq;": "\u2292",
"squ;": "\u25a1",
"square;": "\u25a1",
"squarf;": "\u25aa",
"squf;": "\u25aa",
"srarr;": "\u2192",
"sscr;": "\U0001d4c8",
"ssetmn;": "\u2216",
"ssmile;": "\u2323",
"sstarf;": "\u22c6",
"star;": "\u2606",
"starf;": "\u2605",
"straightepsilon;": "\u03f5",
"straightphi;": "\u03d5",
"strns;": "\xaf",
"sub;": "\u2282",
"subE;": "\u2ac5",
"subdot;": "\u2abd",
"sube;": "\u2286",
"subedot;": "\u2ac3",
"submult;": "\u2ac1",
"subnE;": "\u2acb",
"subne;": "\u228a",
"subplus;": "\u2abf",
"subrarr;": "\u2979",
"subset;": "\u2282",
"subseteq;": "\u2286",
"subseteqq;": "\u2ac5",
"subsetneq;": "\u228a",
"subsetneqq;": "\u2acb",
"subsim;": "\u2ac7",
"subsub;": "\u2ad5",
"subsup;": "\u2ad3",
"succ;": "\u227b",
"succapprox;": "\u2ab8",
"succcurlyeq;": "\u227d",
"succeq;": "\u2ab0",
"succnapprox;": "\u2aba",
"succneqq;": "\u2ab6",
"succnsim;": "\u22e9",
"succsim;": "\u227f",
"sum;": "\u2211",
"sung;": "\u266a",
"sup1": "\xb9",
"sup1;": "\xb9",
"sup2": "\xb2",
"sup2;": "\xb2",
"sup3": "\xb3",
"sup3;": "\xb3",
"sup;": "\u2283",
"supE;": "\u2ac6",
"supdot;": "\u2abe",
"supdsub;": "\u2ad8",
"supe;": "\u2287",
"supedot;": "\u2ac4",
"suphsol;": "\u27c9",
"suphsub;": "\u2ad7",
"suplarr;": "\u297b",
"supmult;": "\u2ac2",
"supnE;": "\u2acc",
"supne;": "\u228b",
"supplus;": "\u2ac0",
"supset;": "\u2283",
"supseteq;": "\u2287",
"supseteqq;": "\u2ac6",
"supsetneq;": "\u228b",
"supsetneqq;": "\u2acc",
"supsim;": "\u2ac8",
"supsub;": "\u2ad4",
"supsup;": "\u2ad6",
"swArr;": "\u21d9",
"swarhk;": "\u2926",
"swarr;": "\u2199",
"swarrow;": "\u2199",
"swnwar;": "\u292a",
"szlig": "\xdf",
"szlig;": "\xdf",
"target;": "\u2316",
"tau;": "\u03c4",
"tbrk;": "\u23b4",
"tcaron;": "\u0165",
"tcedil;": "\u0163",
"tcy;": "\u0442",
"tdot;": "\u20db",
"telrec;": "\u2315",
"tfr;": "\U0001d531",
"there4;": "\u2234",
"therefore;": "\u2234",
"theta;": "\u03b8",
"thetasym;": "\u03d1",
"thetav;": "\u03d1",
"thickapprox;": "\u2248",
"thicksim;": "\u223c",
"thinsp;": "\u2009",
"thkap;": "\u2248",
"thksim;": "\u223c",
"thorn": "\xfe",
"thorn;": "\xfe",
"tilde;": "\u02dc",
"times": "\xd7",
"times;": "\xd7",
"timesb;": "\u22a0",
"timesbar;": "\u2a31",
"timesd;": "\u2a30",
"tint;": "\u222d",
"toea;": "\u2928",
"top;": "\u22a4",
"topbot;": "\u2336",
"topcir;": "\u2af1",
"topf;": "\U0001d565",
"topfork;": "\u2ada",
"tosa;": "\u2929",
"tprime;": "\u2034",
"trade;": "\u2122",
"triangle;": "\u25b5",
"triangledown;": "\u25bf",
"triangleleft;": "\u25c3",
"trianglelefteq;": "\u22b4",
"triangleq;": "\u225c",
"triangleright;": "\u25b9",
"trianglerighteq;": "\u22b5",
"tridot;": "\u25ec",
"trie;": "\u225c",
"triminus;": "\u2a3a",
"triplus;": "\u2a39",
"trisb;": "\u29cd",
"tritime;": "\u2a3b",
"trpezium;": "\u23e2",
"tscr;": "\U0001d4c9",
"tscy;": "\u0446",
"tshcy;": "\u045b",
"tstrok;": "\u0167",
"twixt;": "\u226c",
"twoheadleftarrow;": "\u219e",
"twoheadrightarrow;": "\u21a0",
"uArr;": "\u21d1",
"uHar;": "\u2963",
"uacute": "\xfa",
"uacute;": "\xfa",
"uarr;": "\u2191",
"ubrcy;": "\u045e",
"ubreve;": "\u016d",
"ucirc": "\xfb",
"ucirc;": "\xfb",
"ucy;": "\u0443",
"udarr;": "\u21c5",
"udblac;": "\u0171",
"udhar;": "\u296e",
"ufisht;": "\u297e",
"ufr;": "\U0001d532",
"ugrave": "\xf9",
"ugrave;": "\xf9",
"uharl;": "\u21bf",
"uharr;": "\u21be",
"uhblk;": "\u2580",
"ulcorn;": "\u231c",
"ulcorner;": "\u231c",
"ulcrop;": "\u230f",
"ultri;": "\u25f8",
"umacr;": "\u016b",
"uml": "\xa8",
"uml;": "\xa8",
"uogon;": "\u0173",
"uopf;": "\U0001d566",
"uparrow;": "\u2191",
"updownarrow;": "\u2195",
"upharpoonleft;": "\u21bf",
"upharpoonright;": "\u21be",
"uplus;": "\u228e",
"upsi;": "\u03c5",
"upsih;": "\u03d2",
"upsilon;": "\u03c5",
"upuparrows;": "\u21c8",
"urcorn;": "\u231d",
"urcorner;": "\u231d",
"urcrop;": "\u230e",
"uring;": "\u016f",
"urtri;": "\u25f9",
"uscr;": "\U0001d4ca",
"utdot;": "\u22f0",
"utilde;": "\u0169",
"utri;": "\u25b5",
"utrif;": "\u25b4",
"uuarr;": "\u21c8",
"uuml": "\xfc",
"uuml;": "\xfc",
"uwangle;": "\u29a7",
"vArr;": "\u21d5",
"vBar;": "\u2ae8",
"vBarv;": "\u2ae9",
"vDash;": "\u22a8",
"vangrt;": "\u299c",
"varepsilon;": "\u03f5",
"varkappa;": "\u03f0",
"varnothing;": "\u2205",
"varphi;": "\u03d5",
"varpi;": "\u03d6",
"varpropto;": "\u221d",
"varr;": "\u2195",
"varrho;": "\u03f1",
"varsigma;": "\u03c2",
"varsubsetneq;": "\u228a\ufe00",
"varsubsetneqq;": "\u2acb\ufe00",
"varsupsetneq;": "\u228b\ufe00",
"varsupsetneqq;": "\u2acc\ufe00",
"vartheta;": "\u03d1",
"vartriangleleft;": "\u22b2",
"vartriangleright;": "\u22b3",
"vcy;": "\u0432",
"vdash;": "\u22a2",
"vee;": "\u2228",
"veebar;": "\u22bb",
"veeeq;": "\u225a",
"vellip;": "\u22ee",
"verbar;": "|",
"vert;": "|",
"vfr;": "\U0001d533",
"vltri;": "\u22b2",
"vnsub;": "\u2282\u20d2",
"vnsup;": "\u2283\u20d2",
"vopf;": "\U0001d567",
"vprop;": "\u221d",
"vrtri;": "\u22b3",
"vscr;": "\U0001d4cb",
"vsubnE;": "\u2acb\ufe00",
"vsubne;": "\u228a\ufe00",
"vsupnE;": "\u2acc\ufe00",
"vsupne;": "\u228b\ufe00",
"vzigzag;": "\u299a",
"wcirc;": "\u0175",
"wedbar;": "\u2a5f",
"wedge;": "\u2227",
"wedgeq;": "\u2259",
"weierp;": "\u2118",
"wfr;": "\U0001d534",
"wopf;": "\U0001d568",
"wp;": "\u2118",
"wr;": "\u2240",
"wreath;": "\u2240",
"wscr;": "\U0001d4cc",
"xcap;": "\u22c2",
"xcirc;": "\u25ef",
"xcup;": "\u22c3",
"xdtri;": "\u25bd",
"xfr;": "\U0001d535",
"xhArr;": "\u27fa",
"xharr;": "\u27f7",
"xi;": "\u03be",
"xlArr;": "\u27f8",
"xlarr;": "\u27f5",
"xmap;": "\u27fc",
"xnis;": "\u22fb",
"xodot;": "\u2a00",
"xopf;": "\U0001d569",
"xoplus;": "\u2a01",
"xotime;": "\u2a02",
"xrArr;": "\u27f9",
"xrarr;": "\u27f6",
"xscr;": "\U0001d4cd",
"xsqcup;": "\u2a06",
"xuplus;": "\u2a04",
"xutri;": "\u25b3",
"xvee;": "\u22c1",
"xwedge;": "\u22c0",
"yacute": "\xfd",
"yacute;": "\xfd",
"yacy;": "\u044f",
"ycirc;": "\u0177",
"ycy;": "\u044b",
"yen": "\xa5",
"yen;": "\xa5",
"yfr;": "\U0001d536",
"yicy;": "\u0457",
"yopf;": "\U0001d56a",
"yscr;": "\U0001d4ce",
"yucy;": "\u044e",
"yuml": "\xff",
"yuml;": "\xff",
"zacute;": "\u017a",
"zcaron;": "\u017e",
"zcy;": "\u0437",
"zdot;": "\u017c",
"zeetrf;": "\u2128",
"zeta;": "\u03b6",
"zfr;": "\U0001d537",
"zhcy;": "\u0436",
"zigrarr;": "\u21dd",
"zopf;": "\U0001d56b",
"zscr;": "\U0001d4cf",
"zwj;": "\u200d",
"zwnj;": "\u200c",
}
replacementCharacters = {
0x0: "\uFFFD",
0x0d: "\u000D",
0x80: "\u20AC",
0x81: "\u0081",
0x82: "\u201A",
0x83: "\u0192",
0x84: "\u201E",
0x85: "\u2026",
0x86: "\u2020",
0x87: "\u2021",
0x88: "\u02C6",
0x89: "\u2030",
0x8A: "\u0160",
0x8B: "\u2039",
0x8C: "\u0152",
0x8D: "\u008D",
0x8E: "\u017D",
0x8F: "\u008F",
0x90: "\u0090",
0x91: "\u2018",
0x92: "\u2019",
0x93: "\u201C",
0x94: "\u201D",
0x95: "\u2022",
0x96: "\u2013",
0x97: "\u2014",
0x98: "\u02DC",
0x99: "\u2122",
0x9A: "\u0161",
0x9B: "\u203A",
0x9C: "\u0153",
0x9D: "\u009D",
0x9E: "\u017E",
0x9F: "\u0178",
}
tokenTypes = {
"Doctype": 0,
"Characters": 1,
"SpaceCharacters": 2,
"StartTag": 3,
"EndTag": 4,
"EmptyTag": 5,
"Comment": 6,
"ParseError": 7
}
tagTokenTypes = frozenset([tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]])
prefixes = dict([(v, k) for k, v in namespaces.items()])
prefixes["http://www.w3.org/1998/Math/MathML"] = "math"
class DataLossWarning(UserWarning):
pass
class ReparseException(Exception):
pass
| mit |
subutai/nupic.research | tests/unit/frameworks/vernon/self_supervised_learning_test.py | 2 | 3768 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2021, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import unittest
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from torchvision.datasets.fakedata import FakeData
from nupic.research.frameworks.pytorch.self_supervised_utils import EncoderClassifier
from nupic.research.frameworks.vernon import SelfSupervisedExperiment
class AutoEncoder(torch.nn.Module):
def __init__(self, input_dim=784, hidden_dim=20):
super().__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, input_dim)
def forward(self, x):
encoded = self.encode(x)
decoded = self.fc2(encoded).view(-1, 1, 28, 28)
return decoded
def encode(self, x):
x = x.flatten(start_dim=1)
encoded = self.fc1(x)
return encoded
class LinearClassifier(torch.nn.Module):
def __init__(self, input_dim=20, num_classes=10):
super().__init__()
self.fc = nn.Linear(input_dim, num_classes)
def forward(self, x):
out = self.fc(x)
return out
fake_data_args = dict(
size=1000, image_size=(1, 28, 28), num_classes=10, transform=transforms.ToTensor()
)
self_supervised_config = dict(
experiment_class=SelfSupervisedExperiment,
num_classes=10,
# Dataset
dataset_class=FakeData,
dataset_args=dict(
unsupervised=fake_data_args,
supervised=fake_data_args,
validation=fake_data_args,
),
# Number of epochs
epochs=5,
epochs_to_validate=[2, 4],
supervised_training_epochs_per_validation=1,
batch_size=32,
batch_size_supervised=32,
# Model class. Must inherit from "torch.nn.Module"
model_class=AutoEncoder,
# model model class arguments passed to the constructor
model_args=dict(),
optimizer_class=torch.optim.Adam,
optimizer_args=dict(lr=0.001),
classifier_config=dict(
model_class=LinearClassifier,
model_args=dict(),
optimizer_class=torch.optim.SGD,
optimizer_args=dict(lr=0.001),
loss_function=torch.nn.functional.cross_entropy,
),
loss_function=torch.nn.functional.mse_loss,
)
class SelfSupervisedLearningTest(unittest.TestCase):
"""
This is a test class for the `SelfSupervisedExperiment` class.
"""
def test_self_supervised_experiment(self):
# Setup experiment and initialize model.
exp = self_supervised_config["experiment_class"]()
exp.setup_experiment(self_supervised_config)
self.assertIsInstance(exp.model, EncoderClassifier)
self.assertTrue(hasattr(exp.model, "classifier"))
self.assertTrue(hasattr(exp.model, "encoder"))
# Loop through some pseudo epochs.
for _ in range(5):
exp.run_epoch()
if __name__ == "__main__":
unittest.main(verbosity=2)
| agpl-3.0 |
fo0nikens/shadowsocks | shadowsocks/crypto/sodium.py | 1032 | 3778 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
from ctypes import c_char_p, c_int, c_ulonglong, byref, \
create_string_buffer, c_void_p
from shadowsocks.crypto import util
__all__ = ['ciphers']
libsodium = None
loaded = False
buf_size = 2048
# for salsa20 and chacha20
BLOCK_SIZE = 64
def load_libsodium():
global loaded, libsodium, buf
libsodium = util.find_library('sodium', 'crypto_stream_salsa20_xor_ic',
'libsodium')
if libsodium is None:
raise Exception('libsodium not found')
libsodium.crypto_stream_salsa20_xor_ic.restype = c_int
libsodium.crypto_stream_salsa20_xor_ic.argtypes = (c_void_p, c_char_p,
c_ulonglong,
c_char_p, c_ulonglong,
c_char_p)
libsodium.crypto_stream_chacha20_xor_ic.restype = c_int
libsodium.crypto_stream_chacha20_xor_ic.argtypes = (c_void_p, c_char_p,
c_ulonglong,
c_char_p, c_ulonglong,
c_char_p)
buf = create_string_buffer(buf_size)
loaded = True
class SodiumCrypto(object):
def __init__(self, cipher_name, key, iv, op):
if not loaded:
load_libsodium()
self.key = key
self.iv = iv
self.key_ptr = c_char_p(key)
self.iv_ptr = c_char_p(iv)
if cipher_name == 'salsa20':
self.cipher = libsodium.crypto_stream_salsa20_xor_ic
elif cipher_name == 'chacha20':
self.cipher = libsodium.crypto_stream_chacha20_xor_ic
else:
raise Exception('Unknown cipher')
# byte counter, not block counter
self.counter = 0
def update(self, data):
global buf_size, buf
l = len(data)
# we can only prepend some padding to make the encryption align to
# blocks
padding = self.counter % BLOCK_SIZE
if buf_size < padding + l:
buf_size = (padding + l) * 2
buf = create_string_buffer(buf_size)
if padding:
data = (b'\0' * padding) + data
self.cipher(byref(buf), c_char_p(data), padding + l,
self.iv_ptr, int(self.counter / BLOCK_SIZE), self.key_ptr)
self.counter += l
# buf is copied to a str object when we access buf.raw
# strip off the padding
return buf.raw[padding:padding + l]
ciphers = {
'salsa20': (32, 8, SodiumCrypto),
'chacha20': (32, 8, SodiumCrypto),
}
def test_salsa20():
cipher = SodiumCrypto('salsa20', b'k' * 32, b'i' * 16, 1)
decipher = SodiumCrypto('salsa20', b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
def test_chacha20():
cipher = SodiumCrypto('chacha20', b'k' * 32, b'i' * 16, 1)
decipher = SodiumCrypto('chacha20', b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
if __name__ == '__main__':
test_chacha20()
test_salsa20()
| apache-2.0 |
betoesquivel/fil2014 | filenv/lib/python2.7/site-packages/haystack/fields.py | 10 | 13072 | from __future__ import unicode_literals
import re
from django.utils import datetime_safe
from django.utils import six
from django.template import loader, Context
from haystack.exceptions import SearchFieldError
class NOT_PROVIDED:
pass
DATETIME_REGEX = re.compile('^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})(T|\s+)(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2}).*?$')
# All the SearchFields variants.
class SearchField(object):
"""The base implementation of a search field."""
field_type = None
def __init__(self, model_attr=None, use_template=False, template_name=None,
document=False, indexed=True, stored=True, faceted=False,
default=NOT_PROVIDED, null=False, index_fieldname=None,
facet_class=None, boost=1.0, weight=None):
# Track what the index thinks this field is called.
self.instance_name = None
self.model_attr = model_attr
self.use_template = use_template
self.template_name = template_name
self.document = document
self.indexed = indexed
self.stored = stored
self.faceted = faceted
self._default = default
self.null = null
self.index_fieldname = index_fieldname
self.boost = weight or boost
self.is_multivalued = False
# We supply the facet_class for making it easy to create a faceted
# field based off of this field.
self.facet_class = facet_class
if self.facet_class is None:
self.facet_class = FacetCharField
self.set_instance_name(None)
def set_instance_name(self, instance_name):
self.instance_name = instance_name
if self.index_fieldname is None:
self.index_fieldname = self.instance_name
def has_default(self):
"""Returns a boolean of whether this field has a default value."""
return self._default is not NOT_PROVIDED
@property
def default(self):
"""Returns the default value for the field."""
if callable(self._default):
return self._default()
return self._default
def prepare(self, obj):
"""
Takes data from the provided object and prepares it for storage in the
index.
"""
# Give priority to a template.
if self.use_template:
return self.prepare_template(obj)
elif self.model_attr is not None:
# Check for `__` in the field for looking through the relation.
attrs = self.model_attr.split('__')
current_object = obj
for attr in attrs:
if not hasattr(current_object, attr):
raise SearchFieldError("The model '%s' does not have a model_attr '%s'." % (repr(obj), attr))
current_object = getattr(current_object, attr, None)
if current_object is None:
if self.has_default():
current_object = self._default
# Fall out of the loop, given any further attempts at
# accesses will fail misreably.
break
elif self.null:
current_object = None
# Fall out of the loop, given any further attempts at
# accesses will fail misreably.
break
else:
raise SearchFieldError("The model '%s' has an empty model_attr '%s' and doesn't allow a default or null value." % (repr(obj), attr))
if callable(current_object):
return current_object()
return current_object
if self.has_default():
return self.default
else:
return None
def prepare_template(self, obj):
"""
Flattens an object for indexing.
This loads a template
(``search/indexes/{app_label}/{model_name}_{field_name}.txt``) and
returns the result of rendering that template. ``object`` will be in
its context.
"""
if self.instance_name is None and self.template_name is None:
raise SearchFieldError("This field requires either its instance_name variable to be populated or an explicit template_name in order to load the correct template.")
if self.template_name is not None:
template_names = self.template_name
if not isinstance(template_names, (list, tuple)):
template_names = [template_names]
else:
template_names = ['search/indexes/%s/%s_%s.txt' % (obj._meta.app_label, obj._meta.module_name, self.instance_name)]
t = loader.select_template(template_names)
return t.render(Context({'object': obj}))
def convert(self, value):
"""
Handles conversion between the data found and the type of the field.
Extending classes should override this method and provide correct
data coercion.
"""
return value
class CharField(SearchField):
field_type = 'string'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetCharField
super(CharField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(CharField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return six.text_type(value)
class LocationField(SearchField):
field_type = 'location'
def prepare(self, obj):
from haystack.utils.geo import ensure_point
value = super(LocationField, self).prepare(obj)
if value is None:
return None
pnt = ensure_point(value)
pnt_lng, pnt_lat = pnt.get_coords()
return "%s,%s" % (pnt_lat, pnt_lng)
def convert(self, value):
from haystack.utils.geo import ensure_point, Point
if value is None:
return None
if hasattr(value, 'geom_type'):
value = ensure_point(value)
return value
if isinstance(value, six.string_types):
lat, lng = value.split(',')
elif isinstance(value, (list, tuple)):
# GeoJSON-alike
lat, lng = value[1], value[0]
elif isinstance(value, dict):
lat = value.get('lat', 0)
lng = value.get('lon', 0)
value = Point(float(lng), float(lat))
return value
class NgramField(CharField):
field_type = 'ngram'
def __init__(self, **kwargs):
if kwargs.get('faceted') is True:
raise SearchFieldError("%s can not be faceted." % self.__class__.__name__)
super(NgramField, self).__init__(**kwargs)
class EdgeNgramField(NgramField):
field_type = 'edge_ngram'
class IntegerField(SearchField):
field_type = 'integer'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetIntegerField
super(IntegerField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(IntegerField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return int(value)
class FloatField(SearchField):
field_type = 'float'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetFloatField
super(FloatField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(FloatField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return float(value)
class DecimalField(SearchField):
field_type = 'string'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetDecimalField
super(DecimalField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(DecimalField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return six.text_type(value)
class BooleanField(SearchField):
field_type = 'boolean'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetBooleanField
super(BooleanField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(BooleanField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return bool(value)
class DateField(SearchField):
field_type = 'date'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetDateField
super(DateField, self).__init__(**kwargs)
def convert(self, value):
if value is None:
return None
if isinstance(value, six.string_types):
match = DATETIME_REGEX.search(value)
if match:
data = match.groupdict()
return datetime_safe.date(int(data['year']), int(data['month']), int(data['day']))
else:
raise SearchFieldError("Date provided to '%s' field doesn't appear to be a valid date string: '%s'" % (self.instance_name, value))
return value
class DateTimeField(SearchField):
field_type = 'datetime'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetDateTimeField
super(DateTimeField, self).__init__(**kwargs)
def convert(self, value):
if value is None:
return None
if isinstance(value, six.string_types):
match = DATETIME_REGEX.search(value)
if match:
data = match.groupdict()
return datetime_safe.datetime(int(data['year']), int(data['month']), int(data['day']), int(data['hour']), int(data['minute']), int(data['second']))
else:
raise SearchFieldError("Datetime provided to '%s' field doesn't appear to be a valid datetime string: '%s'" % (self.instance_name, value))
return value
class MultiValueField(SearchField):
field_type = 'string'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetMultiValueField
if kwargs.get('use_template') is True:
raise SearchFieldError("'%s' fields can not use templates to prepare their data." % self.__class__.__name__)
super(MultiValueField, self).__init__(**kwargs)
self.is_multivalued = True
def prepare(self, obj):
return self.convert(super(MultiValueField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return list(value)
class FacetField(SearchField):
"""
``FacetField`` is slightly different than the other fields because it can
work in conjunction with other fields as its data source.
Accepts an optional ``facet_for`` kwarg, which should be the field name
(not ``index_fieldname``) of the field it should pull data from.
"""
instance_name = None
def __init__(self, **kwargs):
handled_kwargs = self.handle_facet_parameters(kwargs)
super(FacetField, self).__init__(**handled_kwargs)
def handle_facet_parameters(self, kwargs):
if kwargs.get('faceted', False):
raise SearchFieldError("FacetField (%s) does not accept the 'faceted' argument." % self.instance_name)
if not kwargs.get('null', True):
raise SearchFieldError("FacetField (%s) does not accept False for the 'null' argument." % self.instance_name)
if not kwargs.get('indexed', True):
raise SearchFieldError("FacetField (%s) does not accept False for the 'indexed' argument." % self.instance_name)
if kwargs.get('facet_class'):
raise SearchFieldError("FacetField (%s) does not accept the 'facet_class' argument." % self.instance_name)
self.facet_for = None
self.facet_class = None
# Make sure the field is nullable.
kwargs['null'] = True
if 'facet_for' in kwargs:
self.facet_for = kwargs['facet_for']
del(kwargs['facet_for'])
return kwargs
def get_facet_for_name(self):
return self.facet_for or self.instance_name
class FacetCharField(FacetField, CharField):
pass
class FacetIntegerField(FacetField, IntegerField):
pass
class FacetFloatField(FacetField, FloatField):
pass
class FacetDecimalField(FacetField, DecimalField):
pass
class FacetBooleanField(FacetField, BooleanField):
pass
class FacetDateField(FacetField, DateField):
pass
class FacetDateTimeField(FacetField, DateTimeField):
pass
class FacetMultiValueField(FacetField, MultiValueField):
pass
| mit |
matmutant/sl4a | python/src/Lib/ntpath.py | 60 | 17131 | # Module 'ntpath' -- common operations on WinNT/Win95 pathnames
"""Common pathname manipulations, WindowsNT/95 version.
Instead of importing this module directly, import os and refer to this
module as os.path.
"""
import os
import sys
import stat
import genericpath
import warnings
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime", "islink","exists","lexists","isdir","isfile",
"ismount","walk","expanduser","expandvars","normpath","abspath",
"splitunc","curdir","pardir","sep","pathsep","defpath","altsep",
"extsep","devnull","realpath","supports_unicode_filenames","relpath"]
# strings representing various path-related bits and pieces
curdir = '.'
pardir = '..'
extsep = '.'
sep = '\\'
pathsep = ';'
altsep = '/'
defpath = '.;C:\\bin'
if 'ce' in sys.builtin_module_names:
defpath = '\\Windows'
elif 'os2' in sys.builtin_module_names:
# OS/2 w/ VACPP
altsep = '/'
devnull = 'nul'
# Normalize the case of a pathname and map slashes to backslashes.
# Other normalizations (such as optimizing '../' away) are not done
# (this is done by normpath).
def normcase(s):
"""Normalize case of pathname.
Makes all characters lowercase and all slashes into backslashes."""
return s.replace("/", "\\").lower()
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
# For DOS it is absolute if it starts with a slash or backslash (current
# volume), or if a pathname after the volume letter and colon / UNC resource
# starts with a slash or backslash.
def isabs(s):
"""Test whether a path is absolute"""
s = splitdrive(s)[1]
return s != '' and s[:1] in '/\\'
# Join two (or more) paths.
def join(a, *p):
"""Join two or more pathname components, inserting "\\" as needed.
If any component is an absolute path, all previous path components
will be discarded."""
path = a
for b in p:
b_wins = 0 # set to 1 iff b makes path irrelevant
if path == "":
b_wins = 1
elif isabs(b):
# This probably wipes out path so far. However, it's more
# complicated if path begins with a drive letter:
# 1. join('c:', '/a') == 'c:/a'
# 2. join('c:/', '/a') == 'c:/a'
# But
# 3. join('c:/a', '/b') == '/b'
# 4. join('c:', 'd:/') = 'd:/'
# 5. join('c:/', 'd:/') = 'd:/'
if path[1:2] != ":" or b[1:2] == ":":
# Path doesn't start with a drive letter, or cases 4 and 5.
b_wins = 1
# Else path has a drive letter, and b doesn't but is absolute.
elif len(path) > 3 or (len(path) == 3 and
path[-1] not in "/\\"):
# case 3
b_wins = 1
if b_wins:
path = b
else:
# Join, and ensure there's a separator.
assert len(path) > 0
if path[-1] in "/\\":
if b and b[0] in "/\\":
path += b[1:]
else:
path += b
elif path[-1] == ":":
path += b
elif b:
if b[0] in "/\\":
path += b
else:
path += "\\" + b
else:
# path is not empty and does not end with a backslash,
# but b is empty; since, e.g., split('a/') produces
# ('a', ''), it's best if join() adds a backslash in
# this case.
path += '\\'
return path
# Split a path in a drive specification (a drive letter followed by a
# colon) and the path specification.
# It is always true that drivespec + pathspec == p
def splitdrive(p):
"""Split a pathname into drive and path specifiers. Returns a 2-tuple
"(drive,path)"; either part may be empty"""
if p[1:2] == ':':
return p[0:2], p[2:]
return '', p
# Parse UNC paths
def splitunc(p):
"""Split a pathname into UNC mount point and relative path specifiers.
Return a 2-tuple (unc, rest); either part may be empty.
If unc is not empty, it has the form '//host/mount' (or similar
using backslashes). unc+rest is always the input path.
Paths containing drive letters never have an UNC part.
"""
if p[1:2] == ':':
return '', p # Drive letter present
firstTwo = p[0:2]
if firstTwo == '//' or firstTwo == '\\\\':
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvv equivalent to drive letter
# \\machine\mountpoint\directories...
# directory ^^^^^^^^^^^^^^^
normp = normcase(p)
index = normp.find('\\', 2)
if index == -1:
##raise RuntimeError, 'illegal UNC path: "' + p + '"'
return ("", p)
index = normp.find('\\', index + 1)
if index == -1:
index = len(p)
return p[:index], p[index:]
return '', p
# Split a path in head (everything up to the last '/') and tail (the
# rest). After the trailing '/' is stripped, the invariant
# join(head, tail) == p holds.
# The resulting head won't end in '/' unless it is the root.
def split(p):
"""Split a pathname.
Return tuple (head, tail) where tail is everything after the final slash.
Either part may be empty."""
d, p = splitdrive(p)
# set i to index beyond p's last slash
i = len(p)
while i and p[i-1] not in '/\\':
i = i - 1
head, tail = p[:i], p[i:] # now tail has no slashes
# remove trailing slashes from head, unless it's all slashes
head2 = head
while head2 and head2[-1] in '/\\':
head2 = head2[:-1]
head = head2 or head
return d + head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
return genericpath._splitext(p, sep, altsep, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
# Return the tail (basename) part of a path.
def basename(p):
"""Returns the final component of a pathname"""
return split(p)[1]
# Return the head (dirname) part of a path.
def dirname(p):
"""Returns the directory component of a pathname"""
return split(p)[0]
# Is a path a symbolic link?
# This will always return false on systems where posix.lstat doesn't exist.
def islink(path):
"""Test for symbolic link.
On WindowsNT/95 and OS/2 always returns false
"""
return False
# alias exists to lexists
lexists = exists
# Is a path a mount point? Either a root (with or without drive letter)
# or an UNC path with at most a / or \ after the mount point.
def ismount(path):
"""Test whether a path is a mount point (defined as root of drive)"""
unc, rest = splitunc(path)
if unc:
return rest in ("", "/", "\\")
p = splitdrive(path)[1]
return len(p) == 1 and p[0] in '/\\'
# Directory tree walk.
# For each directory under top (including top itself, but excluding
# '.' and '..'), func(arg, dirname, filenames) is called, where
# dirname is the name of the directory and filenames is the list
# of files (and subdirectories etc.) in the directory.
# The func may modify the filenames list, to implement a filter,
# or to impose a different order of visiting.
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
warnings.warnpy3k("In 3.x, os.path.walk is removed in favor of os.walk.")
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i + 1
if 'HOME' in os.environ:
userhome = os.environ['HOME']
elif 'USERPROFILE' in os.environ:
userhome = os.environ['USERPROFILE']
elif not 'HOMEPATH' in os.environ:
return path
else:
try:
drive = os.environ['HOMEDRIVE']
except KeyError:
drive = ''
userhome = join(drive, os.environ['HOMEPATH'])
if i != 1: #~user
userhome = join(dirname(userhome), path[1:i])
return userhome + path[i:]
# Expand paths containing shell variable substitutions.
# The following rules apply:
# - no expansion within single quotes
# - '$$' is translated into '$'
# - '%%' is translated into '%' if '%%' are not seen in %var1%%var2%
# - ${varname} is accepted.
# - $varname is accepted.
# - %varname% is accepted.
# - varnames can be made out of letters, digits and the characters '_-'
# (though is not verifed in the ${varname} and %varname% cases)
# XXX With COMMAND.COM you can use any characters in a variable name,
# XXX except '^|<>='.
def expandvars(path):
"""Expand shell variables of the forms $var, ${var} and %var%.
Unknown variables are left unchanged."""
if '$' not in path and '%' not in path:
return path
import string
varchars = string.ascii_letters + string.digits + '_-'
res = ''
index = 0
pathlen = len(path)
while index < pathlen:
c = path[index]
if c == '\'': # no expansion within single quotes
path = path[index + 1:]
pathlen = len(path)
try:
index = path.index('\'')
res = res + '\'' + path[:index + 1]
except ValueError:
res = res + path
index = pathlen - 1
elif c == '%': # variable or '%'
if path[index + 1:index + 2] == '%':
res = res + c
index = index + 1
else:
path = path[index+1:]
pathlen = len(path)
try:
index = path.index('%')
except ValueError:
res = res + '%' + path
index = pathlen - 1
else:
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '%' + var + '%'
elif c == '$': # variable or '$$'
if path[index + 1:index + 2] == '$':
res = res + c
index = index + 1
elif path[index + 1:index + 2] == '{':
path = path[index+2:]
pathlen = len(path)
try:
index = path.index('}')
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '${' + var + '}'
except ValueError:
res = res + '${' + path
index = pathlen - 1
else:
var = ''
index = index + 1
c = path[index:index + 1]
while c != '' and c in varchars:
var = var + c
index = index + 1
c = path[index:index + 1]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '$' + var
if c != '':
index = index - 1
else:
res = res + c
index = index + 1
return res
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B.
# Previously, this function also truncated pathnames to 8+3 format,
# but as this module is called "ntpath", that's obviously wrong!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
path = path.replace("/", "\\")
prefix, path = splitdrive(path)
# We need to be careful here. If the prefix is empty, and the path starts
# with a backslash, it could either be an absolute path on the current
# drive (\dir1\dir2\file) or a UNC filename (\\server\mount\dir1\file). It
# is therefore imperative NOT to collapse multiple backslashes blindly in
# that case.
# The code below preserves multiple backslashes when there is no drive
# letter. This means that the invalid filename \\\a\b is preserved
# unchanged, where a\\\b is normalised to a\b. It's not clear that there
# is any better behaviour for such edge cases.
if prefix == '':
# No drive letter - preserve initial backslashes
while path[:1] == "\\":
prefix = prefix + "\\"
path = path[1:]
else:
# We have a drive letter - collapse initial backslashes
if path.startswith("\\"):
prefix = prefix + "\\"
path = path.lstrip("\\")
comps = path.split("\\")
i = 0
while i < len(comps):
if comps[i] in ('.', ''):
del comps[i]
elif comps[i] == '..':
if i > 0 and comps[i-1] != '..':
del comps[i-1:i+1]
i -= 1
elif i == 0 and prefix.endswith("\\"):
del comps[i]
else:
i += 1
else:
i += 1
# If the path is now empty, substitute '.'
if not prefix and not comps:
comps.append('.')
return prefix + "\\".join(comps)
# Return an absolute path.
try:
from nt import _getfullpathname
except ImportError: # not running on Windows - mock up something sensible
def abspath(path):
"""Return the absolute version of a path."""
if not isabs(path):
path = join(os.getcwd(), path)
return normpath(path)
else: # use native Windows method on Windows
def abspath(path):
"""Return the absolute version of a path."""
if path: # Empty path must return current working directory.
try:
path = _getfullpathname(path)
except WindowsError:
pass # Bad path - return unchanged.
else:
path = os.getcwd()
return normpath(path)
# realpath is a no-op on systems without islink support
realpath = abspath
# Win9x family and earlier have no Unicode filename support.
supports_unicode_filenames = (hasattr(sys, "getwindowsversion") and
sys.getwindowsversion()[3] >= 2)
def relpath(path, start=curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = splitunc(path)
unc_start, rest = splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
| apache-2.0 |
shortbloke/home_assistant_config | custom_components/badnest/sensor.py | 1 | 3535 | import logging
from homeassistant.helpers.entity import Entity
from .const import DOMAIN
from homeassistant.const import (
ATTR_BATTERY_LEVEL,
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS
)
_LOGGER = logging.getLogger(__name__)
PROTECT_SENSOR_TYPES = [
"co_status",
"smoke_status",
"battery_health_state"
]
async def async_setup_platform(hass,
config,
async_add_entities,
discovery_info=None):
"""Set up the Nest climate device."""
api = hass.data[DOMAIN]['api']
temperature_sensors = []
_LOGGER.info("Adding temperature sensors")
for sensor in api['temperature_sensors']:
_LOGGER.info(f"Adding nest temp sensor uuid: {sensor}")
temperature_sensors.append(NestTemperatureSensor(sensor, api))
async_add_entities(temperature_sensors)
protect_sensors = []
_LOGGER.info("Adding protect sensors")
for sensor in api['protects']:
_LOGGER.info(f"Adding nest protect sensor uuid: {sensor}")
for sensor_type in PROTECT_SENSOR_TYPES:
protect_sensors.append(NestProtectSensor(sensor, sensor_type, api))
async_add_entities(protect_sensors)
class NestTemperatureSensor(Entity):
"""Implementation of the Nest Temperature Sensor."""
def __init__(self, device_id, api):
"""Initialize the sensor."""
self._name = "Nest Temperature Sensor"
self._unit_of_measurement = TEMP_CELSIUS
self.device_id = device_id
self.device = api
@property
def unique_id(self):
"""Return an unique ID."""
return self.device_id
@property
def name(self):
"""Return the name of the sensor."""
return self.device.device_data[self.device_id]['name']
@property
def state(self):
"""Return the state of the sensor."""
return self.device.device_data[self.device_id]['temperature']
@property
def device_class(self):
"""Return the device class of this entity."""
return DEVICE_CLASS_TEMPERATURE
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
def update(self):
"""Get the latest data from the DHT and updates the states."""
self.device.update()
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_BATTERY_LEVEL:
self.device.device_data[self.device_id]['battery_level']
}
class NestProtectSensor(Entity):
"""Implementation of the Nest Protect sensor."""
def __init__(self, device_id, sensor_type, api):
"""Initialize the sensor."""
self._name = "Nest Protect Sensor"
self.device_id = device_id
self._sensor_type = sensor_type
self.device = api
@property
def unique_id(self):
"""Return an unique ID."""
return self.device_id + '_' + self._sensor_type
@property
def name(self):
"""Return the name of the sensor."""
return self.device.device_data[self.device_id]['name'] + \
f' {self._sensor_type}'
@property
def state(self):
"""Return the state of the sensor."""
return self.device.device_data[self.device_id][self._sensor_type]
def update(self):
"""Get the latest data from the Protect and updates the states."""
self.device.update()
| mit |
louyihua/edx-platform | common/djangoapps/util/tests/test_db.py | 7 | 8019 | """Tests for util.db module."""
import ddt
import threading
import time
import unittest
from unittest import skipIf
from django.contrib.auth.models import User
from django.core.management import call_command
from django.conf import settings
from django.db import connection, IntegrityError
from django.db.transaction import atomic, TransactionManagementError
from django.test import TestCase, TransactionTestCase
from util.db import (
commit_on_success, enable_named_outer_atomic, outer_atomic, generate_int_id, NoOpMigrationModules
)
def do_nothing():
"""Just return."""
return
@ddt.ddt
class TransactionManagersTestCase(TransactionTestCase):
"""
Tests commit_on_success and outer_atomic.
Note: This TestCase only works with MySQL.
To test do: "./manage.py lms --settings=test_with_mysql test util.tests.test_db"
"""
@ddt.data(
(outer_atomic(), IntegrityError, None, True),
(outer_atomic(read_committed=True), type(None), False, True),
(commit_on_success(), IntegrityError, None, True),
(commit_on_success(read_committed=True), type(None), False, True),
)
@ddt.unpack
def test_concurrent_requests(self, transaction_decorator, exception_class, created_in_1, created_in_2):
"""
Test that when isolation level is set to READ COMMITTED get_or_create()
for the same row in concurrent requests does not raise an IntegrityError.
"""
if connection.vendor != 'mysql':
raise unittest.SkipTest('Only works on MySQL.')
class RequestThread(threading.Thread):
""" A thread which runs a dummy view."""
def __init__(self, delay, **kwargs):
super(RequestThread, self).__init__(**kwargs)
self.delay = delay
self.status = {}
@transaction_decorator
def run(self):
"""A dummy view."""
try:
try:
User.objects.get(username='student', email='student@edx.org')
except User.DoesNotExist:
pass
else:
raise AssertionError('Did not raise User.DoesNotExist.')
if self.delay > 0:
time.sleep(self.delay)
__, created = User.objects.get_or_create(username='student', email='student@edx.org')
except Exception as exception: # pylint: disable=broad-except
self.status['exception'] = exception
else:
self.status['created'] = created
thread1 = RequestThread(delay=1)
thread2 = RequestThread(delay=0)
thread1.start()
thread2.start()
thread2.join()
thread1.join()
self.assertIsInstance(thread1.status.get('exception'), exception_class)
self.assertEqual(thread1.status.get('created'), created_in_1)
self.assertIsNone(thread2.status.get('exception'))
self.assertEqual(thread2.status.get('created'), created_in_2)
def test_outer_atomic_nesting(self):
"""
Test that outer_atomic raises an error if it is nested inside
another atomic.
"""
if connection.vendor != 'mysql':
raise unittest.SkipTest('Only works on MySQL.')
outer_atomic()(do_nothing)()
with atomic():
atomic()(do_nothing)()
with outer_atomic():
atomic()(do_nothing)()
with self.assertRaisesRegexp(TransactionManagementError, 'Cannot be inside an atomic block.'):
with atomic():
outer_atomic()(do_nothing)()
with self.assertRaisesRegexp(TransactionManagementError, 'Cannot be inside an atomic block.'):
with outer_atomic():
outer_atomic()(do_nothing)()
def test_commit_on_success_nesting(self):
"""
Test that commit_on_success raises an error if it is nested inside
atomic or if the isolation level is changed when it is nested
inside another commit_on_success.
"""
# pylint: disable=not-callable
if connection.vendor != 'mysql':
raise unittest.SkipTest('Only works on MySQL.')
commit_on_success(read_committed=True)(do_nothing)()
with self.assertRaisesRegexp(TransactionManagementError, 'Cannot change isolation level when nested.'):
with commit_on_success():
commit_on_success(read_committed=True)(do_nothing)()
with self.assertRaisesRegexp(TransactionManagementError, 'Cannot be inside an atomic block.'):
with atomic():
commit_on_success(read_committed=True)(do_nothing)()
def test_named_outer_atomic_nesting(self):
"""
Test that a named outer_atomic raises an error only if nested in
enable_named_outer_atomic and inside another atomic.
"""
if connection.vendor != 'mysql':
raise unittest.SkipTest('Only works on MySQL.')
outer_atomic(name='abc')(do_nothing)()
with atomic():
outer_atomic(name='abc')(do_nothing)()
with enable_named_outer_atomic('abc'):
outer_atomic(name='abc')(do_nothing)() # Not nested.
with atomic():
outer_atomic(name='pqr')(do_nothing)() # Not enabled.
with self.assertRaisesRegexp(TransactionManagementError, 'Cannot be inside an atomic block.'):
with atomic():
outer_atomic(name='abc')(do_nothing)()
with enable_named_outer_atomic('abc', 'def'):
outer_atomic(name='def')(do_nothing)() # Not nested.
with atomic():
outer_atomic(name='pqr')(do_nothing)() # Not enabled.
with self.assertRaisesRegexp(TransactionManagementError, 'Cannot be inside an atomic block.'):
with atomic():
outer_atomic(name='def')(do_nothing)()
with self.assertRaisesRegexp(TransactionManagementError, 'Cannot be inside an atomic block.'):
with outer_atomic():
outer_atomic(name='def')(do_nothing)()
with self.assertRaisesRegexp(TransactionManagementError, 'Cannot be inside an atomic block.'):
with atomic():
outer_atomic(name='abc')(do_nothing)()
with self.assertRaisesRegexp(TransactionManagementError, 'Cannot be inside an atomic block.'):
with outer_atomic():
outer_atomic(name='abc')(do_nothing)()
@ddt.ddt
class GenerateIntIdTestCase(TestCase):
"""Tests for `generate_int_id`"""
@ddt.data(10)
def test_no_used_ids(self, times):
"""
Verify that we get a random integer within the specified range
when there are no used ids.
"""
minimum = 1
maximum = times
for __ in range(times):
self.assertIn(generate_int_id(minimum, maximum), range(minimum, maximum + 1))
@ddt.data(10)
def test_used_ids(self, times):
"""
Verify that we get a random integer within the specified range
but not in a list of used ids.
"""
minimum = 1
maximum = times
used_ids = {2, 4, 6, 8}
for __ in range(times):
int_id = generate_int_id(minimum, maximum, used_ids)
self.assertIn(int_id, list(set(range(minimum, maximum + 1)) - used_ids))
class MigrationTests(TestCase):
"""
Tests for migrations.
"""
@skipIf(isinstance(settings.MIGRATION_MODULES, NoOpMigrationModules), 'Skip in case of NoOpMigrationModules')
def test_migrations_are_in_sync(self):
"""
Tests that the migration files are in sync with the models.
If this fails, you needs to run the Django command makemigrations.
"""
with self.assertRaises(SystemExit):
call_command('makemigrations', '-e')
| agpl-3.0 |
caesar2164/edx-platform | openedx/core/djangoapps/contentserver/admin.py | 27 | 1455 | """
Django admin page for CourseAssetCacheTtlConfig, which allows you to configure the TTL
that gets used when sending cachability headers back with request course assets.
"""
from django.contrib import admin
from config_models.admin import ConfigurationModelAdmin
from .models import CourseAssetCacheTtlConfig, CdnUserAgentsConfig
class CourseAssetCacheTtlConfigAdmin(ConfigurationModelAdmin):
"""
Basic configuration for cache TTL.
"""
list_display = [
'cache_ttl'
]
def get_list_display(self, request):
"""
Restore default list_display behavior.
ConfigurationModelAdmin overrides this, but in a way that doesn't
respect the ordering. This lets us customize it the usual Django admin
way.
"""
return self.list_display
class CdnUserAgentsConfigAdmin(ConfigurationModelAdmin):
"""
Basic configuration for CDN user agent whitelist.
"""
list_display = [
'cdn_user_agents'
]
def get_list_display(self, request):
"""
Restore default list_display behavior.
ConfigurationModelAdmin overrides this, but in a way that doesn't
respect the ordering. This lets us customize it the usual Django admin
way.
"""
return self.list_display
admin.site.register(CourseAssetCacheTtlConfig, CourseAssetCacheTtlConfigAdmin)
admin.site.register(CdnUserAgentsConfig, CdnUserAgentsConfigAdmin)
| agpl-3.0 |
40223145c2g18/40223145 | static/Brython3.1.0-20150301-090019/Lib/multiprocessing/dummy/connection.py | 707 | 3049 | #
# Analogue of `multiprocessing.connection` which uses queues instead of sockets
#
# multiprocessing/dummy/connection.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = [ 'Client', 'Listener', 'Pipe' ]
from queue import Queue
families = [None]
class Listener(object):
def __init__(self, address=None, family=None, backlog=1):
self._backlog_queue = Queue(backlog)
def accept(self):
return Connection(*self._backlog_queue.get())
def close(self):
self._backlog_queue = None
address = property(lambda self: self._backlog_queue)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def Client(address):
_in, _out = Queue(), Queue()
address.put((_out, _in))
return Connection(_in, _out)
def Pipe(duplex=True):
a, b = Queue(), Queue()
return Connection(a, b), Connection(b, a)
class Connection(object):
def __init__(self, _in, _out):
self._out = _out
self._in = _in
self.send = self.send_bytes = _out.put
self.recv = self.recv_bytes = _in.get
def poll(self, timeout=0.0):
if self._in.qsize() > 0:
return True
if timeout <= 0.0:
return False
self._in.not_empty.acquire()
self._in.not_empty.wait(timeout)
self._in.not_empty.release()
return self._in.qsize() > 0
def close(self):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
| gpl-3.0 |
crateio/crate.web | crate/web/packages/admin.py | 1 | 3016 | from django.contrib import admin
from crate.web.packages.models import Package, Release, ReleaseFile, TroveClassifier, PackageURI
from crate.web.packages.models import ReleaseRequire, ReleaseProvide, ReleaseObsolete, ReleaseURI, ChangeLog
from crate.web.packages.models import DownloadDelta, ReadTheDocsPackageSlug
class PackageURIAdmin(admin.TabularInline):
model = PackageURI
extra = 0
class PackageAdmin(admin.ModelAdmin):
inlines = [PackageURIAdmin]
list_display = ["name", "created", "modified", "downloads_synced_on"]
list_filter = ["created", "modified", "downloads_synced_on"]
search_fields = ["name"]
class ReleaseRequireInline(admin.TabularInline):
model = ReleaseRequire
extra = 0
class ReleaseProvideInline(admin.TabularInline):
model = ReleaseProvide
extra = 0
class ReleaseObsoleteInline(admin.TabularInline):
model = ReleaseObsolete
extra = 0
class ReleaseFileInline(admin.TabularInline):
model = ReleaseFile
extra = 0
class ReleaseURIInline(admin.TabularInline):
model = ReleaseURI
extra = 0
class ReleaseAdmin(admin.ModelAdmin):
inlines = [ReleaseURIInline, ReleaseFileInline, ReleaseRequireInline, ReleaseProvideInline, ReleaseObsoleteInline]
list_display = ["__unicode__", "package", "version", "summary", "author", "author_email", "maintainer", "maintainer_email", "created", "modified"]
list_filter = ["created", "modified", "hidden"]
search_fields = ["package__name", "version", "summary", "author", "author_email", "maintainer", "maintainer_email"]
raw_id_fields = ["package"]
class TroveClassifierAdmin(admin.ModelAdmin):
list_display = ["trove"]
search_fields = ["trove"]
class ReleaseFileAdmin(admin.ModelAdmin):
list_display = ["release", "type", "python_version", "downloads", "comment", "created", "modified"]
list_filter = ["type", "created", "modified"]
search_fields = ["release__package__name", "filename", "digest"]
raw_id_fields = ["release"]
class DownloadDeltaAdmin(admin.ModelAdmin):
list_display = ["file", "date", "delta"]
list_filter = ["date"]
search_fields = ["file__release__package__name", "file__filename"]
raw_id_fields = ["file"]
class ChangeLogAdmin(admin.ModelAdmin):
list_display = ["package", "release", "type", "created", "modified"]
list_filter = ["type", "created", "modified"]
search_fields = ["package__name"]
raw_id_fields = ["package", "release"]
class ReadTheDocsPackageSlugAdmin(admin.ModelAdmin):
list_display = ["package", "slug"]
search_fields = ["package__name", "slug"]
raw_id_fields = ["package"]
admin.site.register(Package, PackageAdmin)
admin.site.register(Release, ReleaseAdmin)
admin.site.register(ReleaseFile, ReleaseFileAdmin)
admin.site.register(TroveClassifier, TroveClassifierAdmin)
admin.site.register(DownloadDelta, DownloadDeltaAdmin)
admin.site.register(ChangeLog, ChangeLogAdmin)
admin.site.register(ReadTheDocsPackageSlug, ReadTheDocsPackageSlugAdmin)
| bsd-2-clause |
Nic30/hwtHls | hwtHls/hls.py | 1 | 9330 | from hwt.code import If
from hwt.hdl.assignment import Assignment
from hwt.hdl.operator import Operator
from hwt.hdl.types.defs import BIT
from hwt.hdl.types.struct import HStruct
from hwt.hdl.value import HValue
from hwt.synthesizer.interfaceLevel.unitImplHelpers import getSignalName
from hwt.synthesizer.rtlLevel.netlist import RtlNetlist
from hwt.synthesizer.rtlLevel.rtlSignal import RtlSignal
from hwt.synthesizer.unit import Unit
from typing import Union
from hwtHls.codeOps import HlsRead, HlsWrite, HlsOperation, \
HlsConst, AbstractHlsOp, HlsIO
from hwt.synthesizer.rtlLevel.remove_unconnected_signals import removeUnconnectedSignals
class HLS_Error(Exception):
pass
def link_nodes(parent, child):
child.dependsOn.append(parent)
parent.usedBy.append(child)
def operator2Hls(operator: Operator, hls, nodeToHlsNode: dict) -> HlsOperation:
"""
Recursively convert operator and it's inputs to HLS representation
:return: instance of HlsOperation representing of this operator
"""
try:
return nodeToHlsNode[operator]
# was already discovered
except KeyError:
pass
# create HlsOperation node for this operator and register it
op_node = HlsOperation(hls,
operator.operator,
operator.operands[0]._dtype.bit_length())
nodeToHlsNode[operator] = op_node
# walk all inputs and connect them as my parent
for op in operator.operands:
op = hdlObj2Hls(op, hls, nodeToHlsNode)
if op is not None:
link_nodes(op, op_node)
return op_node
def mux2Hls(obj: RtlSignal, hls, nodeToHlsNode: dict):
"""
Recursively convert signal which is output of multiplexer/demultiplexer
to HLS nodes
"""
try:
return nodeToHlsNode[obj]
# was already discovered
except KeyError:
pass
if obj.hasGenericName:
name = "mux_"
else:
name = obj.name
_obj = HlsMux(hls, obj._dtype.bit_length(), name=name)
nodeToHlsNode[obj] = _obj
# add condition to dependencies of this MUX operator
c = hdlObj2Hls(obj.drivers[0].cond, hls, nodeToHlsNode)
link_nodes(c, _obj)
for a in obj.drivers:
assert isinstance(a, Assignment), a
if a.indexes:
raise NotImplementedError()
src = hdlObj2Hls(a.src, hls, nodeToHlsNode)
link_nodes(src, _obj)
return _obj
def driver2Hls(obj, hls, nodeToHlsNode: dict) -> AbstractHlsOp:
if isinstance(obj, HlsRead):
nodeToHlsNode[obj] = obj
return obj
elif isinstance(obj, HlsWrite):
nodeToHlsNode[obj] = obj
if obj.cond or obj.indexes:
raise NotImplementedError()
return hdlObj2Hls(obj.src, hls, nodeToHlsNode)
elif isinstance(obj, Operator):
return operator2Hls(obj, hls, nodeToHlsNode)
elif isinstance(obj, Assignment):
if obj.parentStm is not None or obj.indexes:
raise NotImplementedError()
src = hdlObj2Hls(obj.src, hls, nodeToHlsNode)
dst = nodeToHlsNode[obj.dst.endpoints[0]]
link_nodes(src, dst)
return src
elif isinstance(obj, If):
for o in obj._outputs:
# render if as tree of muxes
raise NotImplementedError(obj)
else:
raise NotImplementedError(obj)
def hdlObj2Hls(obj: Union[RtlSignal, HValue],
hls, nodeToHlsNode: dict) -> AbstractHlsOp:
"""
Convert RtlObject to HlsObject, register it and link it wit parent
:note: parent is who provides values to operation
"""
if isinstance(obj, HValue) or obj._const:
_obj = HlsConst(obj)
nodeToHlsNode[_obj] = _obj
return _obj
dcnt = len(obj.drivers)
if dcnt > 1:
# [TODO] mux X indexed assignments
return mux2Hls(obj, hls, nodeToHlsNode)
elif dcnt == 1:
# parent is just RtlSignal, we needs operation
# it is drivern from
return driver2Hls(obj.drivers[0], hls, nodeToHlsNode)
else:
assert isinstance(obj, HlsIO), obj
def reconnect_endpoint_list(signals, oldEp, newEp):
for s in signals:
if isinstance(s, RtlSignal):
try:
s.endpoints.remove(oldEp)
except KeyError:
pass
s.endpoints.append(newEp)
class Hls():
"""
High level synthesiser context.
Convert sequential code to RTL.
:ivar parentUnit: parent unit where RTL should be instantiated
:ivar platform: platform with configuration of this HLS context
:ivar freq: target frequency for RTL
:ivar maxLatency: optional maximum allowed latency of circut
:ivar resources: optional resource constrains
:ivar inputs: list of HlsRead in this context
:ivar outputs: list of HlsWrite in this context
:ivar io: dict HlsIO:Interface
:ivar ctx: RtlNetlist (container of RTL signals for this HLS context)
"""
def __init__(self, parentUnit: Unit,
freq, maxLatency=None, resource_constrain=None):
self.parentUnit = parentUnit
self.platform = parentUnit._target_platform
if self.platform is None:
raise Exception("HLS requires platform to be specified")
self.clk_period = 1 / int(freq)
self.maxLatency = maxLatency
self.resource_constrain = resource_constrain
self.inputs = []
self.outputs = []
self._io = {}
self.ctx = RtlNetlist()
self.scheduler = self.platform.scheduler(self)
self.allocator = self.platform.allocator(self)
self.platform.onHlsInit(self)
def var(self, name, dtype=BIT, def_val=None):
"""
Universal HLS code variable
"""
if isinstance(dtype, HStruct):
if def_val is not None:
raise NotImplementedError()
container = dtype.fromPy(None)
for f in dtype.fields:
if f.name is not None:
r = self._var("%s_%s" % (name, f.name), f.dtype)
setattr(container, f.name, r)
return container
return self.ctx.sig(name, dtype=dtype, def_val=def_val)
def convert_indexed_io_assignments_to_HlsWrite(self):
to_destroy = []
statements = self.ctx.statements
for stm in statements:
if isinstance(stm, Assignment)\
and stm.indexes\
and isinstance(stm.dst, HlsIO):
a = stm
to_destroy.append(a)
w = HlsWrite(self, a.src, a.dst)
w.indexes = a.indexes
reconnect_endpoint_list(w.indexes, a, w)
for a in to_destroy:
statements.remove(a)
def _discoverAllNodes(self):
"""
Walk signals and extract operations as AbstractHlsOp
(convert from representation with signals
to directed graph of operations)
"""
self.convert_indexed_io_assignments_to_HlsWrite()
removeUnconnectedSignals(self.ctx)
for io, ioIntf in self._io.items():
if io.drivers:
if io.endpoints:
# R/W
raise NotImplementedError()
else:
# WriteOnly, HlsWrite already created
pass
elif io.endpoints:
if io.drivers:
# R/W
raise NotImplementedError()
else:
# ReadOnly
r = HlsRead(self, ioIntf)
io.drivers.append(r)
io.origin = r
else:
raise HLS_Error("Unused IO", io, ioIntf)
# used as seen set
nodeToHlsNode = {}
# walk CFG of HDL objects from outputs to inputs and convert it to CFG
# of HLS nodes
# [TODO] write can be to same destination,
# if there is such a situation MUX has to be created
for out in self.outputs:
nodeToHlsNode[out] = out
for out in self.outputs:
driver = out.src
driver = hdlObj2Hls(driver, self, nodeToHlsNode)
link_nodes(driver, out)
# list of discovered nodes
nodes = list(nodeToHlsNode.values())
return nodes
def synthesise(self):
"""
Convert code template to circuit (netlist of Hdl objects)
"""
self.nodes = self._discoverAllNodes()
for n in self.nodes:
n.resolve_realization()
self.scheduler.schedule(self.resource_constrain)
self.allocator.allocate()
def io(self, io):
"""
Convert signal/interface to IO
"""
name = "hls_io_" + getSignalName(io)
dtype = io._dtype
_io = HlsIO(self, name, dtype)
_io.hasGenericName = True
self.ctx.signals.add(_io)
self._io[_io] = io
return _io
def __enter__(self):
# temporary overload _sig method to use var from HLS
self._unit_sig = self.parentUnit._sig
self.parentUnit._sig = self.var
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.parentUnit._sig = self._unit_sig
if exc_type is None:
self.synthesise()
| mit |
WSDC-NITWarangal/gunicorn | gunicorn/http/_sendfile.py | 86 | 2256 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import errno
import os
import sys
try:
import ctypes
import ctypes.util
except MemoryError:
# selinux execmem denial
# https://bugzilla.redhat.com/show_bug.cgi?id=488396
raise ImportError
SUPPORTED_PLATFORMS = (
'darwin',
'freebsd',
'dragonfly',
'linux2')
if sys.version_info < (2, 6) or \
sys.platform not in SUPPORTED_PLATFORMS:
raise ImportError("sendfile isn't supported on this platform")
_libc = ctypes.CDLL(ctypes.util.find_library("c"), use_errno=True)
_sendfile = _libc.sendfile
def sendfile(fdout, fdin, offset, nbytes):
if sys.platform == 'darwin':
_sendfile.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_uint64,
ctypes.POINTER(ctypes.c_uint64), ctypes.c_voidp,
ctypes.c_int]
_nbytes = ctypes.c_uint64(nbytes)
result = _sendfile(fdin, fdout, offset, _nbytes, None, 0)
if result == -1:
e = ctypes.get_errno()
if e == errno.EAGAIN and _nbytes.value is not None:
return _nbytes.value
raise OSError(e, os.strerror(e))
return _nbytes.value
elif sys.platform in ('freebsd', 'dragonfly',):
_sendfile.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_uint64,
ctypes.c_uint64, ctypes.c_voidp,
ctypes.POINTER(ctypes.c_uint64), ctypes.c_int]
_sbytes = ctypes.c_uint64()
result = _sendfile(fdin, fdout, offset, nbytes, None, _sbytes, 0)
if result == -1:
e = ctypes.get_errno()
if e == errno.EAGAIN and _sbytes.value is not None:
return _sbytes.value
raise OSError(e, os.strerror(e))
return _sbytes.value
else:
_sendfile.argtypes = [ctypes.c_int, ctypes.c_int,
ctypes.POINTER(ctypes.c_uint64), ctypes.c_size_t]
_offset = ctypes.c_uint64(offset)
sent = _sendfile(fdout, fdin, _offset, nbytes)
if sent == -1:
e = ctypes.get_errno()
raise OSError(e, os.strerror(e))
return sent
| mit |
Pikecillo/genna | external/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/dc_20001112.py | 1 | 1722 | #David Carlisle <davidc@nag.co.uk> plays nifty tricks with xsl:key
from Xml.Xslt import test_harness
sheet_1 = """<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
version="1.0"
>
<xsl:output method="xml" indent="yes"/>
<xsl:key name="items-by-itemid"
match="item"
use="concat(generate-id(..), @itemid)"
/>
<xsl:template match="itemlist">
<xsl:variable name="x" select="generate-id(.)"/>
<xsl:for-each select="item[count(. |
key('items-by-itemid',
concat($x, @itemid))[1]) = 1]">
<xsl:sort select="@itemid" />
<tr>
<td><xsl:value-of select="@itemid"/></td>
<td><xsl:value-of select="sum(key('items-by-itemid',
concat($x, @itemid))/@units)"/></td>
</tr>
</xsl:for-each>
</xsl:template>
<xsl:template match='text()'/>
</xsl:stylesheet>"""
source_1 = """<x>
<itemlist>
<item itemid="Z101" units="1"/>
<item itemid="Z102" units="2"/>
<item itemid="Z101" units="4"/>
</itemlist>
<itemlist>
<item itemid="y101" units="1"/>
<item itemid="y102" units="2"/>
<item itemid="y102" units="3"/>
<item itemid="y101" units="4"/>
<item itemid="y101" units="5"/>
</itemlist>
</x>"""
expected_1 = """<?xml version="1.0" encoding="UTF-8"?>
<tr>
<td>Z101</td>
<td>5</td>
</tr>
<tr>
<td>Z102</td>
<td>2</td>
</tr>
<tr>
<td>y101</td>
<td>10</td>
</tr>
<tr>
<td>y102</td>
<td>5</td>
</tr>"""
def Test(tester):
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_1)
test_harness.XsltTest(tester, source, [sheet], expected_1)
return
| gpl-2.0 |
xodus7/tensorflow | tensorflow/contrib/quantize/python/fold_batch_norms.py | 2 | 39652 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logic to fold batch norm into preceding convolution or FC layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.contrib.quantize.python import common
from tensorflow.contrib.quantize.python import graph_matcher
from tensorflow.contrib.quantize.python import input_to_ops
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import compat
def FoldBatchNorms(graph, is_training, freeze_batch_norm_delay=None):
"""Finds batch norm layers and folds them into preceding layers.
Folding only affects the following layers: Conv2D, fully connected, depthwise
convolution.
Args:
graph: Graph to walk and modify.
is_training: Bool, true if training.
freeze_batch_norm_delay: How many steps to wait before freezing moving mean
and variance and using them for batch normalization. This value is used
only when is_training is True.
Raises:
ValueError: When batch norm folding fails.
"""
_FoldFusedBatchNorms(
graph, is_training, freeze_batch_norm_delay=freeze_batch_norm_delay)
_FoldUnfusedBatchNorms(
graph,
is_training=is_training,
freeze_batch_norm_delay=freeze_batch_norm_delay)
def _FoldFusedBatchNorms(graph, is_training, freeze_batch_norm_delay):
"""Finds fused batch norm layers and folds them into preceding layers.
Folding only affects the following layers: Conv2D, fully connected, depthwise
convolution.
Args:
graph: Graph to walk and modify.
is_training: Bool, true if training.
freeze_batch_norm_delay: How many steps to wait before freezing moving mean
and variance and using them for batch normalization.
Raises:
ValueError: When batch norm folding fails.
"""
for match in _FindFusedBatchNorms(graph):
scope, sep, _ = match.layer_op.name.rpartition('/')
# Make sure new ops are added to `graph` and put on the same device as
# `bn_op`. The '/' (i.e. `sep`) ensures that we reuse the existing scope
# named `scope`. Otherwise, TF creates a unique scope whose name starts with
# `scope`.
with graph.as_default(), graph.name_scope(scope + sep):
with graph.name_scope(scope + sep + 'BatchNorm_Fold' + sep):
# new weights = old weights * gamma / sqrt(variance + epsilon)
# new biases = -mean * gamma / sqrt(variance + epsilon) + beta
multiplier_tensor = match.gamma_tensor * math_ops.rsqrt(
match.variance_tensor + match.bn_op.get_attr('epsilon'))
bias_tensor = math_ops.subtract(
match.beta_tensor,
match.mean_tensor * multiplier_tensor,
name='bias')
correction_scale, correction_recip, correction_offset = None, None, None
if is_training:
correction_scale, correction_recip, correction_offset = (
_ComputeBatchNormCorrections(
context='',
match=match,
freeze_batch_norm_delay=freeze_batch_norm_delay,
fused_batch_norm=True))
# The shape of depthwise weights is different, so we need to reshape the
# multiplier_tensor to ensure that the scaled_weight_tensor has the
# expected shape.
weights = match.weight_tensor
if match.layer_op.type == 'DepthwiseConv2dNative':
new_shape = [
match.weight_tensor.get_shape().as_list()[2],
match.weight_tensor.get_shape().as_list()[3]
]
multiplier_tensor = array_ops.reshape(
multiplier_tensor, new_shape, name='scale_reshape')
if correction_scale is not None:
correction_scale = array_ops.reshape(
correction_scale, new_shape, name='correction_reshape')
if correction_scale is not None:
weights = math_ops.multiply(
correction_scale, weights, name='correction_mult')
scaled_weight_tensor = math_ops.multiply(
weights, multiplier_tensor, name='mul_fold')
new_layer_tensor = _CloneWithNewOperands(
match.layer_op, match.input_tensor, scaled_weight_tensor,
match.batch_to_space_op)
if correction_recip is not None:
new_layer_tensor = math_ops.multiply(
correction_recip, new_layer_tensor, name='post_conv_mul')
new_layer_tensor = math_ops.add(new_layer_tensor, (correction_offset),
'correction_add')
bias_add_tensor = math_ops.add(
new_layer_tensor, bias_tensor, name='add_fold')
nodes_modified_count = common.RerouteTensor(bias_add_tensor,
match.output_tensor)
if nodes_modified_count == 0:
raise ValueError('Folding batch norms failed, %s had no outputs.' %
match.output_tensor.name)
def _FindFusedBatchNorms(graph):
"""Finds all ops and tensors related to found FusedBatchNorms.
Args:
graph: Graph to inspect.
Yields:
_FusedBatchNormMatches.
"""
input_pattern = graph_matcher.OpTypePattern('*')
# In practice, the weight pattern can match a Variable or a SpaceToBatchND
# operation that follows a variable for atrous convolutions.
weight_pattern = graph_matcher.OpTypePattern('*')
gamma_pattern = graph_matcher.OpTypePattern('*')
beta_pattern = graph_matcher.OpTypePattern('*')
mean_pattern = graph_matcher.OpTypePattern('*')
variance_pattern = graph_matcher.OpTypePattern('*')
moving_average_pattern = graph_matcher.OpTypePattern('*')
bn_decay_pattern = graph_matcher.OpTypePattern('*')
layer_pattern = graph_matcher.OpTypePattern(
'Conv2D|DepthwiseConv2dNative|MatMul',
inputs=[input_pattern, weight_pattern])
batch_to_space_pattern = graph_matcher.OpTypePattern(
'BatchToSpaceND',
inputs=[
layer_pattern,
graph_matcher.OpTypePattern('*'),
graph_matcher.OpTypePattern('*')
])
layer_output_pattern = graph_matcher.OneofPattern(
[layer_pattern, batch_to_space_pattern])
# MatMul has a Reshape between it and FusedBatchNorm.
matmul_reshape_pattern = graph_matcher.OpTypePattern(
'Reshape',
inputs=[layer_output_pattern,
graph_matcher.OpTypePattern('*')])
batch_norm_pattern = graph_matcher.OpTypePattern(
'FusedBatchNorm',
inputs=[
graph_matcher.OneofPattern(
[matmul_reshape_pattern, layer_output_pattern]), gamma_pattern,
beta_pattern, mean_pattern, variance_pattern
])
matmul_bn_output_reshape_pattern = graph_matcher.OpTypePattern(
'Reshape', inputs=[batch_norm_pattern,
graph_matcher.OpTypePattern('*')])
bn_matcher = graph_matcher.GraphMatcher(
graph_matcher.OneofPattern(
[matmul_bn_output_reshape_pattern, batch_norm_pattern]))
moving_average_sub_pattern = graph_matcher.OpTypePattern(
'Sub', inputs=[moving_average_pattern, batch_norm_pattern])
moving_average_mul_pattern = graph_matcher.OpTypePattern(
'Mul', inputs=[moving_average_sub_pattern, bn_decay_pattern])
moving_avg_mul_matcher = graph_matcher.GraphMatcher(
moving_average_mul_pattern)
for match_result in bn_matcher.match_graph(graph):
moving_mean_tensor = None
moving_variance_tensor = None
bn_decay_mean_tensor = None
bn_decay_var_tensor = None
batch_to_space_op = None
layer_op = match_result.get_op(layer_pattern)
layer_tensor = match_result.get_tensor(layer_pattern)
bn_op = match_result.get_op(batch_norm_pattern)
batch_epsilon = bn_op.get_attr('epsilon')
# In the MatMul case, the output of batch norm is reshaped back into a
# 2D tensor, so the output_tensor is the output of the Reshape op.
output_tensor = bn_op.outputs[0]
if layer_op.type == 'MatMul':
output_reshape_op = match_result.get_op(matmul_bn_output_reshape_pattern)
# If the matcher didn't match matmul_bn_output_reshape, there will be
# another match for this 'MatMul' later, so we can skip this one.
if output_reshape_op is None:
continue
output_tensor = output_reshape_op.outputs[0]
# Ensure that the output tensor has consumers, otherwise this is a dangling
# node and not a match.
if not output_tensor.consumers():
continue
batch_to_space_op = match_result.get_op(batch_to_space_pattern)
input_tensor = match_result.get_tensor(input_pattern)
weight_tensor = match_result.get_tensor(weight_pattern)
gamma_tensor = match_result.get_tensor(gamma_pattern)
beta_tensor = match_result.get_tensor(beta_pattern)
# FusedBatchNorm in training is different from that in inference. It takes
# empty 'mean' and empty 'variance', and produces the mean and the variance
# of the batch. Therefore, when is_training is true, mean_tensor and
# variance_tensor point to 1st and 2nd (0-based) output of bn_op,
# respectively; when is_training is false, they point to bn_op's inputs.
is_training = bn_op.get_attr('is_training')
if is_training:
# FusedBatchNormGrad doesn't compute gradients of the batch_mean and
# batch_variance outputs, so we need to substitute our own custom
# gradient.
# TODO(suharshs, raghuramank): Find a way to avoid needing this hack.
# pylint: disable=protected-access
bn_op._set_attr(
'_gradient_op_type',
attr_value_pb2.AttrValue(s=compat.as_bytes('FoldFusedBatchNormGrad')))
# pylint: enable=protected-access
mean_tensor = bn_op.outputs[1]
# The batch variance used during forward and backward prop is biased,
# i.e it is calculated as: V=sum(x(k)-mu)^2/N. For the moving average
# calculation, the variance is corrected by the term N/N-1 (Bessel's
# correction). The variance tensor read from FuseBatchNorm has Bessel's
# correction applied, so we undo it here.
scope, sep, _ = bn_op.name.rpartition('/')
g = ops.get_default_graph()
with g.as_default(), g.name_scope(scope + sep):
n = math_ops.cast(
array_ops.size(layer_tensor) / array_ops.size(mean_tensor),
dtypes.float32)
variance_tensor = math_ops.multiply(
bn_op.outputs[2], (n - 1) / n, name='Undo_Bessel_Correction')
# TODO(suharshs): Find a way to get rid of this inner match.
for mul_match_result in moving_avg_mul_matcher.match_graph(graph):
sub_op = mul_match_result.get_op(moving_average_sub_pattern)
if sub_op.inputs[1].name == bn_op.outputs[1].name:
# During training: Batch Mean is bn_op.outputs[1]
moving_mean_tensor = sub_op.inputs[0]
bn_decay_mean_tensor = mul_match_result.get_tensor(bn_decay_pattern)
if sub_op.inputs[1].name == bn_op.outputs[2].name:
# During training: Batch Var is bn_op.outputs[2]
moving_variance_tensor = sub_op.inputs[0]
bn_decay_var_tensor = mul_match_result.get_tensor(bn_decay_pattern)
else:
mean_tensor = match_result.get_tensor(mean_pattern)
variance_tensor = match_result.get_tensor(variance_pattern)
yield _BatchNormMatch(
layer_op=layer_op,
bn_op=bn_op,
output_tensor=output_tensor,
input_tensor=input_tensor,
weight_tensor=weight_tensor,
gamma_tensor=gamma_tensor,
beta_tensor=beta_tensor,
mean_tensor=mean_tensor,
variance_tensor=variance_tensor,
moving_mean_tensor=moving_mean_tensor,
moving_variance_tensor=moving_variance_tensor,
bn_decay_mean_tensor=bn_decay_mean_tensor,
bn_decay_var_tensor=bn_decay_var_tensor,
batch_epsilon=batch_epsilon,
batch_to_space_op=batch_to_space_op)
def _ComputeBatchNormCorrections(context, match, freeze_batch_norm_delay,
fused_batch_norm):
"""Computes batch norm correction params.
Before batch normalization is frozen:
We use batch statistics for batch norm.
correction_scale = sigma_b/sigma_mv
correction_recip = 1/correction_scale
correction_offset = 0
After batch normalization is frozen:
correction_scale = sigma_b/sigma_mv
correction_recip = 1
correction_offset = gamma*(mu_b/sigma_b-mu_mv/sigma_mv).
Batch norm is frozen if global_step > bn_freeze_delay.
The corrections ensure that:
a) The weights are quantized after scaling by gamma/sigma_mv. This enables
smoother training as the scaling on the weights changes slowly, rather than
jump across mini-batches
b) Changing the values of the corrections allows for one to switch between
using batch statistics to using moving mean and average, without requiring
changes to batch_norm
Args:
context: The scope under which we look for batch norm params
match: Object containing required batch norm tensors for correction
computation.
freeze_batch_norm_delay: Delay in steps at which computation switches
from regular batch norm to frozen mean and variance.
fused_batch_norm: Bool, true if fused batch norm is used.
Returns:
A tuple of correction_scale, correction_recip, correction_offset
"""
g = ops.get_default_graph()
prefix = '' if not context else context + '/'
with g.name_scope(prefix + 'batch_norm_correction'):
recip_sigma_mv = math_ops.rsqrt(
match.moving_variance_tensor + match.batch_epsilon)
recip_sigma = math_ops.rsqrt(match.variance_tensor + match.batch_epsilon)
correction_scale = math_ops.divide(
recip_sigma_mv, recip_sigma, name='scale_compute')
correction_scale = array_ops.identity(
correction_scale, name='correction_scale')
correction_recip = math_ops.reciprocal(
correction_scale, name='reciprocal_compute')
correction_offset = math_ops.multiply(
match.gamma_tensor,
match.mean_tensor * recip_sigma -
match.moving_mean_tensor * recip_sigma_mv,
name='offset_compute')
if freeze_batch_norm_delay is not None:
use_mv_avg = math_ops.greater_equal(
common.CreateOrGetQuantizationStep(),
freeze_batch_norm_delay,
name='use_moving_average')
else:
use_mv_avg = False
bn_decay_zero = 0.0
bn_decay_mean_consumers = list(match.bn_decay_mean_tensor.consumers())
bn_decay_var_consumers = list(match.bn_decay_mean_tensor.consumers())
bn_decay_mean_out = utils.smart_cond(
use_mv_avg,
lambda: bn_decay_zero,
lambda: match.bn_decay_mean_tensor,
name='freeze_moving_mean')
common.RerouteTensor(
bn_decay_mean_out,
match.bn_decay_mean_tensor,
can_modify=bn_decay_mean_consumers)
bn_decay_var_consumers = list(match.bn_decay_var_tensor.consumers())
bn_decay_var_out = utils.smart_cond(
use_mv_avg,
lambda: bn_decay_zero,
lambda: match.bn_decay_var_tensor,
name='freeze_moving_var')
common.RerouteTensor(
bn_decay_var_out,
match.bn_decay_var_tensor,
can_modify=bn_decay_var_consumers)
correction_recip = utils.smart_cond(
use_mv_avg,
lambda: array_ops.ones(correction_scale.shape),
lambda: correction_recip,
name='correction_recip')
correction_offset = utils.smart_cond(
use_mv_avg,
lambda: correction_offset,
lambda: array_ops.zeros(correction_offset.shape),
name='correction_offset')
return correction_scale, correction_recip, correction_offset
def _CloneWithNewOperands(layer_op, input_tensor, weight_tensor,
batch_to_space_op):
"""Clones layer_op with input_tensor and weight_tensor as new inputs."""
new_layer_name = layer_op.name.split('/')[-1] + '_Fold'
if layer_op.type == 'Conv2D':
return nn_ops.conv2d(
input_tensor,
weight_tensor,
strides=layer_op.get_attr('strides'),
padding=layer_op.get_attr('padding'),
use_cudnn_on_gpu=layer_op.get_attr('use_cudnn_on_gpu'),
data_format=layer_op.get_attr('data_format'),
name=new_layer_name)
elif layer_op.type == 'MatMul':
return math_ops.matmul(
input_tensor,
weight_tensor,
transpose_a=layer_op.get_attr('transpose_a'),
transpose_b=layer_op.get_attr('transpose_b'),
name=new_layer_name)
elif layer_op.type == 'DepthwiseConv2dNative':
conv = nn.depthwise_conv2d(
input_tensor,
weight_tensor,
rate=layer_op.get_attr('dilations'),
strides=layer_op.get_attr('strides'),
padding=layer_op.get_attr('padding'),
name=new_layer_name)
# Copy the batch to space operation if we have a atrous convolution.
if batch_to_space_op:
batch_to_space_op = layer_op.outputs[0].consumers()[0]
# TODO(suharshs): It's hard to make this name match with the unfused name.
# Restructure this code to not rely on scope at all.
new_batch_to_space_name = batch_to_space_op.name.split('/')[-1] + '_Fold'
conv = array_ops.batch_to_space_nd(
conv,
batch_to_space_op.inputs[1],
batch_to_space_op.inputs[2],
name=new_batch_to_space_name)
return conv
else:
raise ValueError('Cannot handle operation of type: %s' % layer_op.type)
@ops.RegisterGradient('FoldFusedBatchNormGrad')
def _FoldFusedBatchNormGrad(op, unused_grad_y, grad_mean, grad_var, unused_1,
unused_2):
x = op.inputs[0]
n = math_ops.cast(
array_ops.size(x) / array_ops.size(grad_mean), dtypes.float32)
dmean_dx = grad_mean / n
dvar_dx = 2 * grad_var * (x - op.outputs[1]) / (n - 1)
return (dmean_dx + dvar_dx), None, None, None, None
def _FoldUnfusedBatchNorms(graph, is_training, freeze_batch_norm_delay):
"""Finds unfused batch norm layers and folds them into preceding layers.
Folding only affects the following layers: Conv2D, fully connected, depthwise
convolution.
Args:
graph: Graph to walk and modify.
is_training: Bool, True if training.
freeze_batch_norm_delay: How many steps to wait before freezing moving mean
and variance and using them for batch normalization.
Raises:
ValueError: When batch norm folding fails.
"""
input_to_ops_map = input_to_ops.InputToOps(graph)
for bn in common.BatchNormGroups(graph):
has_scaling = _HasScaling(graph, input_to_ops_map, bn)
if not _IsValidUnfusedBatchNorm(graph, bn):
continue
# The mangling code intimately depends on BatchNorm node's internals.
original_op, folded_op = _CreateFoldedOp(
graph,
bn,
has_scaling=has_scaling,
freeze_batch_norm_delay=freeze_batch_norm_delay,
is_training=is_training)
activation = common.GetEndpointActivationOp(graph, bn)
if activation:
nodes_modified_count = common.RerouteTensor(
folded_op.outputs[0], original_op.outputs[0], can_modify=[activation])
if nodes_modified_count != 1:
raise ValueError('Unexpected inputs to op: %s' % activation.name)
continue
# Treat consumer ops in bypass modules differently since they have Add
# operations instead of Relu* above.
add_bypass_ctx = re.search(r'^(.*)/([^/]+)', bn).group(1)
add_bypass = graph.get_operation_by_name(add_bypass_ctx + '/Add')
nodes_modified_count = common.RerouteTensor(
folded_op.outputs[0], original_op.outputs[0], can_modify=[add_bypass])
if nodes_modified_count != 1:
raise ValueError('Unexpected inputs to op: %s' % add_bypass.name)
def _IsValidUnfusedBatchNorm(graph, context):
"""Checks that the output of the unfused batch norm has consumers."""
add_shift = graph.get_operation_by_name(
context + '/BatchNorm/batchnorm_1/add_1')
# Ensure that the output tensor of batch norm has consumers, otherwise this
# is a dangling node and not a match.
return bool(add_shift.outputs[0].consumers())
def _FindMatchingTensor(graph, match_pattern, scope):
"""Finds best match of ops matching match_pattern with scope.
Example: _FindMatchingTensor(graph,'/BatchNorm/moments/Squeeze',
'MobilenetV1/MobilenetV1/Conv2d_0/') returns:
Tensor('MobilenetV1/Conv2d_0/BatchNorm/moments/Squeeze')
Args:
graph: Graph to inspect.
match_pattern: Part of the name of the op that we need to match, should
be present in the op's name
scope: The scope of the op. All the elements of the scope need not be
present in the op's name.
Returns:
Tensor from graph that provides the best match to the match_pattern and
scope
"""
oplist = graph.get_operations()
split_context = set(scope.split('/'))
match_dict = {}
for op in oplist:
if op.name.endswith(match_pattern):
split_name = op.name.split('/')
num_matches = len(set(split_name) & split_context)
if num_matches > 0:
match_dict[op.name] = num_matches
# match_dict contains matching op names from graph with values being
# number of matches to scope. We pick the key with the most matches
if match_dict:
max_key = max(match_dict, key=match_dict.get)
return graph.get_tensor_by_name(max_key + ':0')
else:
return None
def _GetBatchNormParams(graph, context, has_scaling):
"""Extracts relevant tensors for folding batch norms.
Args:
graph: Graph to inspect.
context: The scope under which we look for batch norm params
has_scaling: Bool that specifies if scaling is done as part of batch norm.
Returns:
_BatchNormMatch containing all required batch norm parameters.
"""
gamma_tensor = None
batch_mean_tensor = None
batch_variance_tensor = None
moving_mean_tensor = None
moving_variance_tensor = None
batch_epsilon = None
bn_decay_mean_tensor = None
bn_decay_var_tensor = None
# TODO(raghuramank) This code relies on string matching and needs to be
# updated if unfused batch norm continues to be widely used
# Matching variable names is brittle and relies on scoping
# conventions. Fused batch norm folding is more robust. Support for unfused
# batch norms will be deprecated as we move forward. Fused batch norms allow
# for faster training and should be used whenever possible.
# context contains part of the names of the tensors we are interested in:
# For MobilenetV1, the context has repetitions:
# MobilenetV1/MobilenetV1/Conv2d_3_depthwise
# when the moving_mean tensor has the name:
# MobilenetV1/Conv2d_3_depthwise/BatchNorm/moving_mean/read
# To pick the correct variable name, it is necessary to ignore the repeating
# header.
# For MobilenetV2, this problem does not exist:
# The context is: MobilenetV2/expanded_conv_3/depthwise
# and the names of the tensors start with a single MobilenetV2
# The moving mean for example, has the name:
# MobilenetV2/expanded_conv_3/depthwise/BatchNorm/moving_mean/read
# We identify the best match for an op by checking for
# 1. The suffix of the op is exactly matched
# 2. Maximum number of matches with the context.The matching
# score is given by the number of parts of context (split by /) that
# are present in the parts of the tensor name (again split by /).
# For example: scope= MobilenetV2/MobilenetV2/expanded_conv_3 and
# op.name = MobilenetV2/expanded_conv_3/depthwise/BatchNorm/moving_mean/read
# will have 2 matches,scope with a different conv layer will have one match.
op_suffix_mean = '/BatchNorm/moments/Squeeze'
op_suffix_variance = '/BatchNorm/moments/Squeeze_1'
op_suffix_epsilon = '/BatchNorm/batchnorm_1/add/y'
op_suffix_bn_decay_mean = '/BatchNorm/AssignMovingAvg/decay'
op_suffix_bn_decay_var = '/BatchNorm/AssignMovingAvg_1/decay'
if variable_scope.get_variable_scope().use_resource:
op_suffix_gamma = '/BatchNorm/gamma/Read/ReadVariableOp'
op_suffix_moving_variance = (
'/BatchNorm/moving_variance/Read/ReadVariableOp')
op_suffix_moving_mean = ('/BatchNorm/moving_mean/Read/ReadVariableOp')
else:
op_suffix_gamma = '/BatchNorm/gamma'
op_suffix_moving_variance = '/BatchNorm/moving_variance/read'
op_suffix_moving_mean = '/BatchNorm/moving_mean/read'
# Parse through list of ops to find relevant ops
batch_mean_tensor = _FindMatchingTensor(graph, op_suffix_mean, context)
batch_variance_tensor = _FindMatchingTensor(graph, op_suffix_variance,
context)
moving_mean_tensor = _FindMatchingTensor(graph, op_suffix_moving_mean,
context)
moving_variance_tensor = _FindMatchingTensor(graph, op_suffix_moving_variance,
context)
batch_epsilon = _FindMatchingTensor(graph, op_suffix_epsilon, context)
bn_decay_mean_tensor = _FindMatchingTensor(graph, op_suffix_bn_decay_mean,
context)
bn_decay_var_tensor = _FindMatchingTensor(graph, op_suffix_bn_decay_var,
context)
if batch_mean_tensor is None and moving_mean_tensor is None:
ValueError('Error folding unfused batch norms')
if has_scaling:
gamma_tensor = _FindMatchingTensor(graph, op_suffix_gamma, context)
if not has_scaling:
gamma_tensor = array_ops.ones(moving_mean_tensor.shape)
return _BatchNormMatch(
layer_op=None,
bn_op=None,
output_tensor=None,
input_tensor=None,
weight_tensor=None,
gamma_tensor=gamma_tensor,
beta_tensor=None,
mean_tensor=batch_mean_tensor,
variance_tensor=batch_variance_tensor,
moving_mean_tensor=moving_mean_tensor,
moving_variance_tensor=moving_variance_tensor,
bn_decay_mean_tensor=bn_decay_mean_tensor,
bn_decay_var_tensor=bn_decay_var_tensor,
batch_epsilon=batch_epsilon,
batch_to_space_op=None)
def _CreateFoldedOp(graph, context, has_scaling, freeze_batch_norm_delay,
is_training):
"""Folds in batch norm layer into preceding convolution or FC layer.
Creates 3 new nodes, connects their inputs and adds them to the graph:
mul is cloned into mul_fold, Conv2D or MatMul, or DepthwiseConv2d is cloned
into respective *_Fold, add is cloned into add_fold.
Args:
graph: Graph to modify.
context: String, batch norm context, i.e. node into which BatchNorm is
nested.
has_scaling: Whether the batch norm has scaling enabled.
freeze_batch_norm_delay: How many steps to wait before freezing moving mean
and variance and using them for batch normalization.
is_training: Bool, true if training.
Raises:
ValueError: When operation type is not supported, or input and output tensor
shapes mismatch for created operations: mul_fold, add_fold.
Returns:
A pair of Operations, the first is the original consumer node of the batch
norm (../BatchNorm/batchnorm_1/add_1), the second is the consumer node of
the folded graph (add_fold).
"""
mul_scale_name = 'mul_1' if has_scaling else 'mul'
mul_scale = graph.get_operation_by_name(context +
'/BatchNorm/batchnorm_1/' +
mul_scale_name)
op_below = mul_scale.inputs[0].op
# Skip over the BatchToSpace operation in the case of atrous convolutions.
batch_to_space_op = None
if op_below.type == 'BatchToSpaceND':
batch_to_space_op = op_below
op_below = op_below.inputs[0].op
weights = op_below.inputs[1]
match = _GetBatchNormParams(
graph=graph, context=context, has_scaling=has_scaling)
correction_scale, correction_recip, correction_offset = None, None, None
if is_training:
correction_scale, correction_recip, correction_offset = (
_ComputeBatchNormCorrections(
context=context,
match=match,
freeze_batch_norm_delay=freeze_batch_norm_delay,
fused_batch_norm=False))
# Special handling for weights of depthwise convolution.
if op_below.type == 'DepthwiseConv2dNative':
new_shape = [
weights.get_shape().as_list()[2],
weights.get_shape().as_list()[3]
]
scale_name = 'mul' if has_scaling else 'Rsqrt'
scale = graph.get_operation_by_name(
context + '/BatchNorm/batchnorm_1/' + scale_name)
scale = array_ops.reshape(scale.outputs[0], new_shape,
context + '/scale_reshape')
if correction_scale is not None:
correction_scale = array_ops.reshape(correction_scale, new_shape,
context + '/correction_reshape')
with ops.device(mul_scale.device):
weights = math_ops.multiply(correction_scale, weights,
context + '/correction_mult')
mul_fold = _CloneOp(mul_scale, context + '/mul_fold', [(0, weights),
(1, scale)])
elif op_below.type in ['Conv2D', 'MatMul']:
if correction_scale is not None:
with ops.device(mul_scale.device):
weights = math_ops.multiply(correction_scale, weights,
context + '/correction_mult')
mul_fold = _CloneOp(mul_scale, context + '/mul_fold', [(0, weights)])
else:
raise ValueError('Cannot handle operation of type: %s' % op_below.type)
_AssertShapesMatch('mul_fold', mul_fold.inputs[0], mul_fold.outputs[0])
conv_or_fc_folded = _CloneOp(op_below, op_below.name + '_Fold',
[(1, mul_fold.outputs[0])])
add_shift = graph.get_operation_by_name(
context + '/BatchNorm/batchnorm_1/add_1')
corrected_output = conv_or_fc_folded.outputs[0]
# Copy the batch to space operation if we have a atrous convolution.
if batch_to_space_op:
corrected_output = array_ops.batch_to_space_nd(
corrected_output,
batch_to_space_op.inputs[1],
batch_to_space_op.inputs[2],
name=batch_to_space_op.name + '_Fold')
if correction_offset is not None:
with ops.device(conv_or_fc_folded.device):
corrected_output = math_ops.multiply(correction_recip, corrected_output,
context + '/post_conv_mul')
corrected_output = math_ops.add(corrected_output, (correction_offset),
context + '/correction_add')
add_fold = _CloneOp(add_shift, context + '/add_fold', [(0, corrected_output)])
_AssertShapesMatch('add_fold', add_fold.inputs[0], add_fold.outputs[0])
return add_shift, add_fold
def _CloneOp(op, new_name, new_inputs):
"""Clones a given op, replaces its name and some of its inputs.
Args:
op: Operation to modify.
new_name: String, a new name to set on cloned op.
new_inputs: A list of tuples (idx, tensor), each input with corresponding
index will be replaced by the given Tensor in the cloned op.
Returns:
Operation, the cloned op.
Raises:
TypeError: When Operation type is not supported.
ValueError: When input shapes are incompatible.
"""
inputs = list(op.inputs)
for new_input in new_inputs:
inputs[new_input[0]] = new_input[1]
return _OP_CLONER.Clone(op, inputs, new_name)
class _OpCloner(object):
"""Helper class that clones tf.Operations based on their type."""
def __init__(self):
self.op_type_to_action = {
'Mul': self._CloneMul,
'Add': self._CloneAdd,
'Conv2D': self._CloneConv2d,
'DepthwiseConv2dNative': self._CloneDepthwiseConv2d,
'MatMul': self._CloneMatMul,
}
def _CloneMul(self, op, inputs, new_name):
del op # Unused.
return math_ops.multiply(inputs[0], inputs[1], name=new_name).op
def _CloneAdd(self, op, inputs, new_name):
del op # Unused.
return math_ops.add(inputs[0], inputs[1], name=new_name).op
def _CloneConv2d(self, op, inputs, new_name):
input_tensor = inputs[0]
weights = inputs[1]
self._AssertConvShapes(op.name, input_tensor, weights)
return nn_ops.conv2d(
input_tensor,
weights,
strides=op.get_attr('strides'),
padding=op.get_attr('padding'),
use_cudnn_on_gpu=op.get_attr('use_cudnn_on_gpu'),
data_format=op.get_attr('data_format'),
name=new_name).op
def _CloneDepthwiseConv2d(self, op, inputs, new_name):
input_tensor = inputs[0]
weights = inputs[1]
self._AssertConvShapes(op.name, input_tensor, weights)
return nn.depthwise_conv2d(
input_tensor,
weights,
strides=op.get_attr('strides'),
padding=op.get_attr('padding'),
name=new_name).op
def _CloneMatMul(self, op, inputs, new_name):
weights = inputs[0]
input_tensor = inputs[1]
self._AssertFCShapes(op.name, weights, input_tensor)
return math_ops.matmul(
weights,
input_tensor,
transpose_a=op.get_attr('transpose_a'),
transpose_b=op.get_attr('transpose_b'),
name=new_name).op
def Clone(self, op, inputs, new_name):
try:
return self.op_type_to_action[op.type](op, inputs, new_name)
except KeyError:
raise TypeError('Unsupported operation type: %s' % op.type)
def _AssertConvShapes(self, op_name, input_tensor, weights):
"""Makes sure that convolution inputs have compatible shapes.
Args:
op_name: Operation name, only used in error message.
input_tensor: Input that is convolved.
weights: Weights of the convolution filter.
Raises:
ValueError: When input shapes are incompatible.
"""
input_shape = input_tensor.get_shape()
weights_shape = weights.get_shape()
if (len(input_shape) != 4 or len(weights_shape) != 4 or
input_shape[3] != weights_shape[2]):
raise ValueError('Incompatible shapes for op %s inputs: %s and %s' %
(op_name, input_shape, weights_shape))
def _AssertFCShapes(self, op_name, weights, input_tensor):
"""Makes sure that FC layer inputs have compatible shapes.
Args:
op_name: Operation name, only used in error message.
weights: Weights used in FC layer.
input_tensor: Input into FC layer.
Raises:
ValueError: When input shapes are incompatible.
"""
weights_shape = weights.get_shape()
input_shape = input_tensor.get_shape()
if (len(weights_shape) != 2 or len(input_shape) != 2 or
weights_shape[1] != input_shape[0]):
raise ValueError('Incompatible shapes for op %s inputs: %s and %s' %
(op_name, weights_shape, input_shape))
_OP_CLONER = _OpCloner()
def _AssertShapesMatch(op_name, in_tensor, out_tensor):
"""Makes sure that shapes of input and output tensors are compatible.
Args:
op_name: String, operation name, only used in error message.
in_tensor: Tensor, input tensor.
out_tensor: Tensor, output tensor.
Raises:
ValueError: When input and output tensors have different shapes.
"""
in_shape = in_tensor.get_shape()
out_shape = out_tensor.get_shape()
if not in_shape.is_compatible_with(out_shape):
raise ValueError('%s should not change tensor shape: input %s, '
'output %s' % (op_name, in_shape, out_shape))
def _HasScaling(graph, input_to_ops_map, bn):
r"""Checks if batch norm has scaling enabled.
Difference between batch norm with scaling and without is that with scaling:
Rsqrt -> mul -> mul_1
\-> mul_2
where
mul multiplies gamma by inverse square root of EMA of batch variance,
mul_1 multiplies output of mul with output from the base operation
(convolution, FC or depthwise convolution),
mul_2 multiplies output of mul with EMA of batch mean,
and without scaling:
Rsqrt -> mul
\-> mul_1
where
mul multiplies the inverse square root of EMA of batch variance with output
from the base operation,
mul_1 multiplies inverse square root of EMA of batch variance with EMA
of batch mean.
Args:
graph: Graph to inspect.
input_to_ops_map: InputToOps object containing mapping from tensor's name
to ops that take it as input.
bn: Batch norm layer prefix string.
Returns:
A boolean indicating whether this batch norm layer has scaling enabled.
"""
rsqrt_op = graph.get_operation_by_name(bn + '/BatchNorm/batchnorm_1/Rsqrt')
rsqrt_consumers = input_to_ops_map.ConsumerOperations(rsqrt_op)
return sum(1 for op in rsqrt_consumers if op.type == 'Mul') == 1
class _BatchNormMatch(object):
"""Contains all information related to a found Fused/UnfusedBatchNorm."""
def __init__(self, layer_op, bn_op, output_tensor, input_tensor,
weight_tensor, gamma_tensor, beta_tensor, mean_tensor,
variance_tensor, moving_mean_tensor, moving_variance_tensor,
bn_decay_mean_tensor, bn_decay_var_tensor, batch_epsilon,
batch_to_space_op):
self._layer_op = layer_op
self._bn_op = bn_op
self._output_tensor = output_tensor
self._input_tensor = input_tensor
self._weight_tensor = weight_tensor
self._gamma_tensor = gamma_tensor
self._beta_tensor = beta_tensor
self._mean_tensor = mean_tensor
self._variance_tensor = variance_tensor
self._moving_mean_tensor = moving_mean_tensor
self._moving_variance_tensor = moving_variance_tensor
self._bn_decay_mean_tensor = bn_decay_mean_tensor
self._bn_decay_var_tensor = bn_decay_var_tensor
self._batch_epsilon = batch_epsilon
self._batch_to_space_op = batch_to_space_op
@property
def layer_op(self):
return self._layer_op
@property
def bn_op(self):
return self._bn_op
@property
def output_tensor(self):
return self._output_tensor
@property
def input_tensor(self):
return self._input_tensor
@property
def weight_tensor(self):
return self._weight_tensor
@property
def gamma_tensor(self):
return self._gamma_tensor
@property
def beta_tensor(self):
return self._beta_tensor
@property
def mean_tensor(self):
return self._mean_tensor
@property
def variance_tensor(self):
return self._variance_tensor
@property
def moving_mean_tensor(self):
return self._moving_mean_tensor
@property
def moving_variance_tensor(self):
return self._moving_variance_tensor
@property
def batch_epsilon(self):
return self._batch_epsilon
@property
def bn_decay_mean_tensor(self):
return self._bn_decay_mean_tensor
@property
def bn_decay_var_tensor(self):
return self._bn_decay_var_tensor
@property
def batch_to_space_op(self):
return self._batch_to_space_op
| apache-2.0 |
kuri65536/python-for-android | python-modules/twisted/twisted/test/test_persisted.py | 60 | 8648 |
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
# System Imports
import sys
from twisted.trial import unittest
try:
import cPickle as pickle
except ImportError:
import pickle
try:
import cStringIO as StringIO
except ImportError:
import StringIO
# Twisted Imports
from twisted.persisted import styles, aot, crefutil
class VersionTestCase(unittest.TestCase):
def testNullVersionUpgrade(self):
global NullVersioned
class NullVersioned:
ok = 0
pkcl = pickle.dumps(NullVersioned())
class NullVersioned(styles.Versioned):
persistenceVersion = 1
def upgradeToVersion1(self):
self.ok = 1
mnv = pickle.loads(pkcl)
styles.doUpgrade()
assert mnv.ok, "initial upgrade not run!"
def testVersionUpgrade(self):
global MyVersioned
class MyVersioned(styles.Versioned):
persistenceVersion = 2
persistenceForgets = ['garbagedata']
v3 = 0
v4 = 0
def __init__(self):
self.somedata = 'xxx'
self.garbagedata = lambda q: 'cant persist'
def upgradeToVersion3(self):
self.v3 += 1
def upgradeToVersion4(self):
self.v4 += 1
mv = MyVersioned()
assert not (mv.v3 or mv.v4), "hasn't been upgraded yet"
pickl = pickle.dumps(mv)
MyVersioned.persistenceVersion = 4
obj = pickle.loads(pickl)
styles.doUpgrade()
assert obj.v3, "didn't do version 3 upgrade"
assert obj.v4, "didn't do version 4 upgrade"
pickl = pickle.dumps(obj)
obj = pickle.loads(pickl)
styles.doUpgrade()
assert obj.v3 == 1, "upgraded unnecessarily"
assert obj.v4 == 1, "upgraded unnecessarily"
def testNonIdentityHash(self):
global ClassWithCustomHash
class ClassWithCustomHash(styles.Versioned):
def __init__(self, unique, hash):
self.unique = unique
self.hash = hash
def __hash__(self):
return self.hash
v1 = ClassWithCustomHash('v1', 0)
v2 = ClassWithCustomHash('v2', 0)
pkl = pickle.dumps((v1, v2))
del v1, v2
ClassWithCustomHash.persistenceVersion = 1
ClassWithCustomHash.upgradeToVersion1 = lambda self: setattr(self, 'upgraded', True)
v1, v2 = pickle.loads(pkl)
styles.doUpgrade()
self.assertEquals(v1.unique, 'v1')
self.assertEquals(v2.unique, 'v2')
self.failUnless(v1.upgraded)
self.failUnless(v2.upgraded)
def testUpgradeDeserializesObjectsRequiringUpgrade(self):
global ToyClassA, ToyClassB
class ToyClassA(styles.Versioned):
pass
class ToyClassB(styles.Versioned):
pass
x = ToyClassA()
y = ToyClassB()
pklA, pklB = pickle.dumps(x), pickle.dumps(y)
del x, y
ToyClassA.persistenceVersion = 1
def upgradeToVersion1(self):
self.y = pickle.loads(pklB)
styles.doUpgrade()
ToyClassA.upgradeToVersion1 = upgradeToVersion1
ToyClassB.persistenceVersion = 1
ToyClassB.upgradeToVersion1 = lambda self: setattr(self, 'upgraded', True)
x = pickle.loads(pklA)
styles.doUpgrade()
self.failUnless(x.y.upgraded)
class MyEphemeral(styles.Ephemeral):
def __init__(self, x):
self.x = x
class EphemeralTestCase(unittest.TestCase):
def testEphemeral(self):
o = MyEphemeral(3)
self.assertEquals(o.__class__, MyEphemeral)
self.assertEquals(o.x, 3)
pickl = pickle.dumps(o)
o = pickle.loads(pickl)
self.assertEquals(o.__class__, styles.Ephemeral)
self.assert_(not hasattr(o, 'x'))
class Pickleable:
def __init__(self, x):
self.x = x
def getX(self):
return self.x
class A:
"""
dummy class
"""
def amethod(self):
pass
class B:
"""
dummy class
"""
def bmethod(self):
pass
def funktion():
pass
class PicklingTestCase(unittest.TestCase):
"""Test pickling of extra object types."""
def testModule(self):
pickl = pickle.dumps(styles)
o = pickle.loads(pickl)
self.assertEquals(o, styles)
def testClassMethod(self):
pickl = pickle.dumps(Pickleable.getX)
o = pickle.loads(pickl)
self.assertEquals(o, Pickleable.getX)
def testInstanceMethod(self):
obj = Pickleable(4)
pickl = pickle.dumps(obj.getX)
o = pickle.loads(pickl)
self.assertEquals(o(), 4)
self.assertEquals(type(o), type(obj.getX))
def testStringIO(self):
f = StringIO.StringIO()
f.write("abc")
pickl = pickle.dumps(f)
o = pickle.loads(pickl)
self.assertEquals(type(o), type(f))
self.assertEquals(f.getvalue(), "abc")
class EvilSourceror:
def __init__(self, x):
self.a = self
self.a.b = self
self.a.b.c = x
class NonDictState:
def __getstate__(self):
return self.state
def __setstate__(self, state):
self.state = state
class AOTTestCase(unittest.TestCase):
def testSimpleTypes(self):
obj = (1, 2.0, 3j, True, slice(1, 2, 3), 'hello', u'world', sys.maxint + 1, None, Ellipsis)
rtObj = aot.unjellyFromSource(aot.jellyToSource(obj))
self.assertEquals(obj, rtObj)
def testMethodSelfIdentity(self):
a = A()
b = B()
a.bmethod = b.bmethod
b.a = a
im_ = aot.unjellyFromSource(aot.jellyToSource(b)).a.bmethod
self.assertEquals(im_.im_class, im_.im_self.__class__)
def test_methodNotSelfIdentity(self):
"""
If a class change after an instance has been created,
L{aot.unjellyFromSource} shoud raise a C{TypeError} when trying to
unjelly the instance.
"""
a = A()
b = B()
a.bmethod = b.bmethod
b.a = a
savedbmethod = B.bmethod
del B.bmethod
try:
self.assertRaises(TypeError, aot.unjellyFromSource,
aot.jellyToSource(b))
finally:
B.bmethod = savedbmethod
def test_unsupportedType(self):
"""
L{aot.jellyToSource} should raise a C{TypeError} when trying to jelly
an unknown type.
"""
try:
set
except:
from sets import Set as set
self.assertRaises(TypeError, aot.jellyToSource, set())
def testBasicIdentity(self):
# Anyone wanting to make this datastructure more complex, and thus this
# test more comprehensive, is welcome to do so.
aj = aot.AOTJellier().jellyToAO
d = {'hello': 'world', "method": aj}
l = [1, 2, 3,
"he\tllo\n\n\"x world!",
u"goodbye \n\t\u1010 world!",
1, 1.0, 100 ** 100l, unittest, aot.AOTJellier, d,
funktion
]
t = tuple(l)
l.append(l)
l.append(t)
l.append(t)
uj = aot.unjellyFromSource(aot.jellyToSource([l, l]))
assert uj[0] is uj[1]
assert uj[1][0:5] == l[0:5]
def testNonDictState(self):
a = NonDictState()
a.state = "meringue!"
assert aot.unjellyFromSource(aot.jellyToSource(a)).state == a.state
def testCopyReg(self):
s = "foo_bar"
sio = StringIO.StringIO()
sio.write(s)
uj = aot.unjellyFromSource(aot.jellyToSource(sio))
# print repr(uj.__dict__)
assert uj.getvalue() == s
def testFunkyReferences(self):
o = EvilSourceror(EvilSourceror([]))
j1 = aot.jellyToAOT(o)
oj = aot.unjellyFromAOT(j1)
assert oj.a is oj
assert oj.a.b is oj.b
assert oj.c is not oj.c.c
class CrefUtilTestCase(unittest.TestCase):
"""
Tests for L{crefutil}.
"""
def test_dictUnknownKey(self):
"""
L{crefutil._DictKeyAndValue} only support keys C{0} and C{1}.
"""
d = crefutil._DictKeyAndValue({})
self.assertRaises(RuntimeError, d.__setitem__, 2, 3)
def test_deferSetMultipleTimes(self):
"""
L{crefutil._Defer} can be assigned a key only one time.
"""
d = crefutil._Defer()
d[0] = 1
self.assertRaises(RuntimeError, d.__setitem__, 0, 1)
testCases = [VersionTestCase, EphemeralTestCase, PicklingTestCase]
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.