repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
tavaresdong/courses-notes | ucb_cs61A/lab/lab05/tests/height_depth.py | 3 | 1037 | test = {
'name': 'Tree Height & Depth',
'points': 0,
'suites': [
{
'cases': [
{
'answer': '3',
'choices': [
'2',
'3',
'4',
'5'
],
'hidden': False,
'locked': False,
'question': r"""
What is the height of this tree?
7
/ | \
2 1 19
/ \ \
3 11 20
/ \ /
2 8 15
"""
},
{
'answer': '2',
'choices': [
'2',
'3',
'4',
'5'
],
'hidden': False,
'locked': False,
'question': r"""
What is the depth of the node containing 3?
7
/ | \
2 1 19
/ \ \
3 11 20
/ \ /
2 8 15
"""
}
],
'scored': False,
'type': 'concept'
}
]
} | mit |
StuntsPT/Structure_threader | setup.py | 1 | 3703 | #!/usr/bin/python3
# Copyright 2016-2021 Francisco Pina Martins <f.pinamartins@gmail.com>
# This file is part of structure_threader.
# structure_threader is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# structure_threader is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with structure_threader. If not, see <http://www.gnu.org/licenses/>.
import sys
try:
from setuptools import setup
except ImportError:
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup
class NotSupportedException(BaseException):
pass
if sys.version_info.major < 3:
raise NotSupportedException("Only Python 3.x Supported")
def platform_detection(install_binaries=True):
"""
Detect the platform and adapt the binaries location.
"""
if install_binaries is True:
if sys.platform == "linux":
bin_dir = "structure_threader/bins/linux"
elif sys.platform == "darwin":
bin_dir = "structure_threader/bins/osx"
else:
return None
else:
return None
structure_bin = bin_dir + "/structure"
faststructure_bin = bin_dir + "/fastStructure"
maverick_bin = bin_dir + "/MavericK"
return [('bin', [faststructure_bin, structure_bin, maverick_bin])]
# Set some variables (PKGBUILD inspired)
DATA_FILES = platform_detection()
try:
DATA_FILES[0][1].append("structure_threader/wrappers/alstructure_wrapper.R")
except TypeError:
DATA_FILES = [('bin',
["structure_threader/wrappers/alstructure_wrapper.R"])]
VERSION = "1.3.10"
URL = "https://gitlab.com/StuntsPT/Structure_threader"
setup(
name="structure_threader",
version=VERSION,
packages=["structure_threader",
"structure_threader.evanno",
"structure_threader.plotter",
"structure_threader.sanity_checks",
"structure_threader.colorer",
"structure_threader.wrappers",
"structure_threader.skeletons"],
install_requires=["plotly>=4.1.1",
"colorlover",
"numpy>=1.12.1",
"matplotlib"],
description=("A program to parallelize runs of 'Structure', "
"'fastStructure' and 'MavericK'."),
url=URL,
download_url="{0}/-/archive/{1}/Structure_threader-{1}.tar.gz".format(URL, VERSION),
author="Francisco Pina-Martins",
author_email="f.pinamartins@gmail.com",
license="GPL3",
classifiers=["Intended Audience :: Science/Research",
"License :: OSI Approved :: GNU General Public License v3 ("
"GPLv3)",
"Natural Language :: English",
"Operating System :: POSIX :: Linux",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7"],
data_files=DATA_FILES,
entry_points={
"console_scripts": [
"structure_threader = structure_threader.structure_threader:main",
]
},
)
| gpl-3.0 |
jeenalee/servo | tests/wpt/css-tests/css-fonts-3_dev/html/support/fonts/makegsubfonts.py | 1616 | 14125 |
import os
import textwrap
from xml.etree import ElementTree
from fontTools.ttLib import TTFont, newTable
from fontTools.misc.psCharStrings import T2CharString
from fontTools.ttLib.tables.otTables import GSUB,\
ScriptList, ScriptRecord, Script, DefaultLangSys,\
FeatureList, FeatureRecord, Feature,\
LookupList, Lookup, AlternateSubst, SingleSubst
# paths
directory = os.path.dirname(__file__)
shellSourcePath = os.path.join(directory, "gsubtest-shell.ttx")
shellTempPath = os.path.join(directory, "gsubtest-shell.otf")
featureList = os.path.join(directory, "gsubtest-features.txt")
javascriptData = os.path.join(directory, "gsubtest-features.js")
outputPath = os.path.join(os.path.dirname(directory), "gsubtest-lookup%d")
baseCodepoint = 0xe000
# -------
# Features
# -------
f = open(featureList, "rb")
text = f.read()
f.close()
mapping = []
for line in text.splitlines():
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
# parse
values = line.split("\t")
tag = values.pop(0)
mapping.append(tag);
# --------
# Outlines
# --------
def addGlyphToCFF(glyphName=None, program=None, private=None, globalSubrs=None, charStringsIndex=None, topDict=None, charStrings=None):
charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs)
charStringsIndex.append(charString)
glyphID = len(topDict.charset)
charStrings.charStrings[glyphName] = glyphID
topDict.charset.append(glyphName)
def makeLookup1():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup1")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
# bump this up so that the sequence is the same as the lookup 3 font
cp += 3
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 1
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = SingleSubst()
subtable.Format = 2
subtable.LookupType = 1
subtable.mapping = {
"%s.pass" % tag : "%s.fail" % tag,
"%s.fail" % tag : "%s.pass" % tag,
}
lookup.SubTable.append(subtable)
path = outputPath % 1 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeLookup3():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup3")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
# tag.default
glyphName = "%s.default" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.alt1,2,3
for i in range(1,4):
glyphName = "%s.alt%d" % (tag, i)
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 3
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = AlternateSubst()
subtable.Format = 1
subtable.LookupType = 3
subtable.alternates = {
"%s.default" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt1" % tag : ["%s.pass" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt2" % tag : ["%s.fail" % tag, "%s.pass" % tag, "%s.fail" % tag],
"%s.alt3" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.pass" % tag]
}
lookup.SubTable.append(subtable)
path = outputPath % 3 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeJavascriptData():
features = sorted(mapping)
outStr = []
outStr.append("")
outStr.append("/* This file is autogenerated by makegsubfonts.py */")
outStr.append("")
outStr.append("/* ")
outStr.append(" Features defined in gsubtest fonts with associated base")
outStr.append(" codepoints for each feature:")
outStr.append("")
outStr.append(" cp = codepoint for feature featX")
outStr.append("")
outStr.append(" cp default PASS")
outStr.append(" cp featX=1 FAIL")
outStr.append(" cp featX=2 FAIL")
outStr.append("")
outStr.append(" cp+1 default FAIL")
outStr.append(" cp+1 featX=1 PASS")
outStr.append(" cp+1 featX=2 FAIL")
outStr.append("")
outStr.append(" cp+2 default FAIL")
outStr.append(" cp+2 featX=1 FAIL")
outStr.append(" cp+2 featX=2 PASS")
outStr.append("")
outStr.append("*/")
outStr.append("")
outStr.append("var gFeatures = {");
cp = baseCodepoint
taglist = []
for tag in features:
taglist.append("\"%s\": 0x%x" % (tag, cp))
cp += 4
outStr.append(textwrap.fill(", ".join(taglist), initial_indent=" ", subsequent_indent=" "))
outStr.append("};");
outStr.append("");
if os.path.exists(javascriptData):
os.remove(javascriptData)
f = open(javascriptData, "wb")
f.write("\n".join(outStr))
f.close()
# build fonts
print "Making lookup type 1 font..."
makeLookup1()
print "Making lookup type 3 font..."
makeLookup3()
# output javascript data
print "Making javascript data file..."
makeJavascriptData()
| mpl-2.0 |
blindroot/django | tests/proxy_models/tests.py | 12 | 16067 | from __future__ import unicode_literals
from django.contrib import admin
from django.contrib.auth.models import User as AuthUser
from django.contrib.contenttypes.models import ContentType
from django.core import checks, management
from django.db import DEFAULT_DB_ALIAS, models
from django.db.models import signals
from django.test import TestCase, override_settings
from django.test.utils import isolate_apps
from django.urls import reverse
from .admin import admin as force_admin_model_registration # NOQA
from .models import (
Abstract, BaseUser, Bug, Country, Improvement, Issue, LowerStatusPerson,
MultiUserProxy, MyPerson, MyPersonProxy, OtherPerson, Person, ProxyBug,
ProxyImprovement, ProxyProxyBug, ProxyTrackerUser, State, StateProxy,
StatusPerson, TrackerUser, User, UserProxy, UserProxyProxy,
)
class ProxyModelTests(TestCase):
def test_same_manager_queries(self):
"""
The MyPerson model should be generating the same database queries as
the Person model (when the same manager is used in each case).
"""
my_person_sql = MyPerson.other.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
person_sql = Person.objects.order_by("name").query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
self.assertEqual(my_person_sql, person_sql)
def test_inheritance_new_table(self):
"""
The StatusPerson models should have its own table (it's using ORM-level
inheritance).
"""
sp_sql = StatusPerson.objects.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
p_sql = Person.objects.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
self.assertNotEqual(sp_sql, p_sql)
def test_basic_proxy(self):
"""
Creating a Person makes them accessible through the MyPerson proxy.
"""
person = Person.objects.create(name="Foo McBar")
self.assertEqual(len(Person.objects.all()), 1)
self.assertEqual(len(MyPerson.objects.all()), 1)
self.assertEqual(MyPerson.objects.get(name="Foo McBar").id, person.id)
self.assertFalse(MyPerson.objects.get(id=person.id).has_special_name())
def test_no_proxy(self):
"""
Person is not proxied by StatusPerson subclass.
"""
Person.objects.create(name="Foo McBar")
self.assertEqual(list(StatusPerson.objects.all()), [])
def test_basic_proxy_reverse(self):
"""
A new MyPerson also shows up as a standard Person.
"""
MyPerson.objects.create(name="Bazza del Frob")
self.assertEqual(len(MyPerson.objects.all()), 1)
self.assertEqual(len(Person.objects.all()), 1)
LowerStatusPerson.objects.create(status="low", name="homer")
lsps = [lsp.name for lsp in LowerStatusPerson.objects.all()]
self.assertEqual(lsps, ["homer"])
def test_correct_type_proxy_of_proxy(self):
"""
Correct type when querying a proxy of proxy
"""
Person.objects.create(name="Foo McBar")
MyPerson.objects.create(name="Bazza del Frob")
LowerStatusPerson.objects.create(status="low", name="homer")
pp = sorted(mpp.name for mpp in MyPersonProxy.objects.all())
self.assertEqual(pp, ['Bazza del Frob', 'Foo McBar', 'homer'])
def test_proxy_included_in_ancestors(self):
"""
Proxy models are included in the ancestors for a model's DoesNotExist
and MultipleObjectsReturned
"""
Person.objects.create(name="Foo McBar")
MyPerson.objects.create(name="Bazza del Frob")
LowerStatusPerson.objects.create(status="low", name="homer")
max_id = Person.objects.aggregate(max_id=models.Max('id'))['max_id']
with self.assertRaises(Person.DoesNotExist):
MyPersonProxy.objects.get(name='Zathras')
with self.assertRaises(Person.MultipleObjectsReturned):
MyPersonProxy.objects.get(id__lt=max_id + 1)
with self.assertRaises(Person.DoesNotExist):
StatusPerson.objects.get(name='Zathras')
StatusPerson.objects.create(name='Bazza Jr.')
StatusPerson.objects.create(name='Foo Jr.')
max_id = Person.objects.aggregate(max_id=models.Max('id'))['max_id']
with self.assertRaises(Person.MultipleObjectsReturned):
StatusPerson.objects.get(id__lt=max_id + 1)
def test_abstract_base_with_model_fields(self):
msg = "Abstract base class containing model fields not permitted for proxy model 'NoAbstract'."
with self.assertRaisesMessage(TypeError, msg):
class NoAbstract(Abstract):
class Meta:
proxy = True
def test_too_many_concrete_classes(self):
msg = "Proxy model 'TooManyBases' has more than one non-abstract model base class."
with self.assertRaisesMessage(TypeError, msg):
class TooManyBases(User, Person):
class Meta:
proxy = True
def test_no_base_classes(self):
msg = "Proxy model 'NoBaseClasses' has no non-abstract model base class."
with self.assertRaisesMessage(TypeError, msg):
class NoBaseClasses(models.Model):
class Meta:
proxy = True
@isolate_apps('proxy_models')
def test_new_fields(self):
class NoNewFields(Person):
newfield = models.BooleanField()
class Meta:
proxy = True
errors = NoNewFields.check()
expected = [
checks.Error(
"Proxy model 'NoNewFields' contains model fields.",
id='models.E017',
)
]
self.assertEqual(errors, expected)
@override_settings(TEST_SWAPPABLE_MODEL='proxy_models.AlternateModel')
@isolate_apps('proxy_models')
def test_swappable(self):
class SwappableModel(models.Model):
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
class AlternateModel(models.Model):
pass
# You can't proxy a swapped model
with self.assertRaises(TypeError):
class ProxyModel(SwappableModel):
class Meta:
proxy = True
def test_myperson_manager(self):
Person.objects.create(name="fred")
Person.objects.create(name="wilma")
Person.objects.create(name="barney")
resp = [p.name for p in MyPerson.objects.all()]
self.assertEqual(resp, ['barney', 'fred'])
resp = [p.name for p in MyPerson._default_manager.all()]
self.assertEqual(resp, ['barney', 'fred'])
def test_otherperson_manager(self):
Person.objects.create(name="fred")
Person.objects.create(name="wilma")
Person.objects.create(name="barney")
resp = [p.name for p in OtherPerson.objects.all()]
self.assertEqual(resp, ['barney', 'wilma'])
resp = [p.name for p in OtherPerson.excluder.all()]
self.assertEqual(resp, ['barney', 'fred'])
resp = [p.name for p in OtherPerson._default_manager.all()]
self.assertEqual(resp, ['barney', 'wilma'])
def test_permissions_created(self):
from django.contrib.auth.models import Permission
try:
Permission.objects.get(name="May display users information")
except Permission.DoesNotExist:
self.fail("The permission 'May display users information' has not been created")
def test_proxy_model_signals(self):
"""
Test save signals for proxy models
"""
output = []
def make_handler(model, event):
def _handler(*args, **kwargs):
output.append('%s %s save' % (model, event))
return _handler
h1 = make_handler('MyPerson', 'pre')
h2 = make_handler('MyPerson', 'post')
h3 = make_handler('Person', 'pre')
h4 = make_handler('Person', 'post')
signals.pre_save.connect(h1, sender=MyPerson)
signals.post_save.connect(h2, sender=MyPerson)
signals.pre_save.connect(h3, sender=Person)
signals.post_save.connect(h4, sender=Person)
MyPerson.objects.create(name="dino")
self.assertEqual(output, [
'MyPerson pre save',
'MyPerson post save'
])
output = []
h5 = make_handler('MyPersonProxy', 'pre')
h6 = make_handler('MyPersonProxy', 'post')
signals.pre_save.connect(h5, sender=MyPersonProxy)
signals.post_save.connect(h6, sender=MyPersonProxy)
MyPersonProxy.objects.create(name="pebbles")
self.assertEqual(output, [
'MyPersonProxy pre save',
'MyPersonProxy post save'
])
signals.pre_save.disconnect(h1, sender=MyPerson)
signals.post_save.disconnect(h2, sender=MyPerson)
signals.pre_save.disconnect(h3, sender=Person)
signals.post_save.disconnect(h4, sender=Person)
signals.pre_save.disconnect(h5, sender=MyPersonProxy)
signals.post_save.disconnect(h6, sender=MyPersonProxy)
def test_content_type(self):
ctype = ContentType.objects.get_for_model
self.assertIs(ctype(Person), ctype(OtherPerson))
def test_user_proxy_models(self):
User.objects.create(name='Bruce')
resp = [u.name for u in User.objects.all()]
self.assertEqual(resp, ['Bruce'])
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
resp = [u.name for u in UserProxyProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
self.assertEqual([u.name for u in MultiUserProxy.objects.all()], ['Bruce'])
def test_proxy_for_model(self):
self.assertEqual(UserProxy, UserProxyProxy._meta.proxy_for_model)
def test_concrete_model(self):
self.assertEqual(User, UserProxyProxy._meta.concrete_model)
def test_proxy_delete(self):
"""
Proxy objects can be deleted
"""
User.objects.create(name='Bruce')
u2 = UserProxy.objects.create(name='George')
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce', 'George'])
u2.delete()
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
def test_select_related(self):
"""
We can still use `select_related()` to include related models in our
querysets.
"""
country = Country.objects.create(name='Australia')
State.objects.create(name='New South Wales', country=country)
resp = [s.name for s in State.objects.select_related()]
self.assertEqual(resp, ['New South Wales'])
resp = [s.name for s in StateProxy.objects.select_related()]
self.assertEqual(resp, ['New South Wales'])
self.assertEqual(StateProxy.objects.get(name='New South Wales').name, 'New South Wales')
resp = StateProxy.objects.select_related().get(name='New South Wales')
self.assertEqual(resp.name, 'New South Wales')
def test_filter_proxy_relation_reverse(self):
tu = TrackerUser.objects.create(name='Contributor', status='contrib')
ptu = ProxyTrackerUser.objects.get()
issue = Issue.objects.create(assignee=tu)
self.assertEqual(tu.issues.get(), issue)
self.assertEqual(ptu.issues.get(), issue)
self.assertQuerysetEqual(
TrackerUser.objects.filter(issues=issue),
[tu], lambda x: x
)
self.assertQuerysetEqual(
ProxyTrackerUser.objects.filter(issues=issue),
[ptu], lambda x: x
)
def test_proxy_bug(self):
contributor = ProxyTrackerUser.objects.create(name='Contributor', status='contrib')
someone = BaseUser.objects.create(name='Someone')
Bug.objects.create(summary='fix this', version='1.1beta', assignee=contributor, reporter=someone)
pcontributor = ProxyTrackerUser.objects.create(name='OtherContributor', status='proxy')
Improvement.objects.create(
summary='improve that', version='1.1beta',
assignee=contributor, reporter=pcontributor,
associated_bug=ProxyProxyBug.objects.all()[0],
)
# Related field filter on proxy
resp = ProxyBug.objects.get(version__icontains='beta')
self.assertEqual(repr(resp), '<ProxyBug: ProxyBug:fix this>')
# Select related + filter on proxy
resp = ProxyBug.objects.select_related().get(version__icontains='beta')
self.assertEqual(repr(resp), '<ProxyBug: ProxyBug:fix this>')
# Proxy of proxy, select_related + filter
resp = ProxyProxyBug.objects.select_related().get(
version__icontains='beta'
)
self.assertEqual(repr(resp), '<ProxyProxyBug: ProxyProxyBug:fix this>')
# Select related + filter on a related proxy field
resp = ProxyImprovement.objects.select_related().get(
reporter__name__icontains='butor'
)
self.assertEqual(
repr(resp),
'<ProxyImprovement: ProxyImprovement:improve that>'
)
# Select related + filter on a related proxy of proxy field
resp = ProxyImprovement.objects.select_related().get(
associated_bug__summary__icontains='fix'
)
self.assertEqual(
repr(resp),
'<ProxyImprovement: ProxyImprovement:improve that>'
)
def test_proxy_load_from_fixture(self):
management.call_command('loaddata', 'mypeople.json', verbosity=0)
p = MyPerson.objects.get(pk=100)
self.assertEqual(p.name, 'Elvis Presley')
def test_eq(self):
self.assertEqual(MyPerson(id=100), Person(id=100))
@override_settings(ROOT_URLCONF='proxy_models.urls')
class ProxyModelAdminTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = AuthUser.objects.create(is_superuser=True, is_staff=True)
cls.tu1 = ProxyTrackerUser.objects.create(name='Django Pony', status='emperor')
cls.i1 = Issue.objects.create(summary="Pony's Issue", assignee=cls.tu1)
def test_cascade_delete_proxy_model_admin_warning(self):
"""
Test if admin gives warning about cascade deleting models referenced
to concrete model by deleting proxy object.
"""
tracker_user = TrackerUser.objects.all()[0]
base_user = BaseUser.objects.all()[0]
issue = Issue.objects.all()[0]
with self.assertNumQueries(7):
collector = admin.utils.NestedObjects('default')
collector.collect(ProxyTrackerUser.objects.all())
self.assertIn(tracker_user, collector.edges.get(None, ()))
self.assertIn(base_user, collector.edges.get(None, ()))
self.assertIn(issue, collector.edges.get(tracker_user, ()))
def test_delete_str_in_model_admin(self):
"""
Test if the admin delete page shows the correct string representation
for a proxy model.
"""
user = TrackerUser.objects.get(name='Django Pony')
proxy = ProxyTrackerUser.objects.get(name='Django Pony')
user_str = 'Tracker user: <a href="%s">%s</a>' % (
reverse('admin_proxy:proxy_models_trackeruser_change', args=(user.pk,)), user
)
proxy_str = 'Proxy tracker user: <a href="%s">%s</a>' % (
reverse('admin_proxy:proxy_models_proxytrackeruser_change', args=(proxy.pk,)), proxy
)
self.client.force_login(self.superuser)
response = self.client.get(reverse('admin_proxy:proxy_models_trackeruser_delete', args=(user.pk,)))
delete_str = response.context['deleted_objects'][0]
self.assertEqual(delete_str, user_str)
response = self.client.get(reverse('admin_proxy:proxy_models_proxytrackeruser_delete', args=(proxy.pk,)))
delete_str = response.context['deleted_objects'][0]
self.assertEqual(delete_str, proxy_str)
| bsd-3-clause |
refeed/coala-bears | bears/verilog/VerilogLintBear.py | 16 | 1660 | from coalib.bearlib.abstractions.Linter import linter
from dependency_management.requirements.DistributionRequirement import (
DistributionRequirement)
@linter(executable='verilator',
output_format='regex',
use_stderr=True,
output_regex=r'\%(?:(?P<severity>Error|Warning.*?).*?):'
r'.+?:(?P<line>.+?): (?P<message>.+)')
class VerilogLintBear:
"""
Analyze Verilog code using ``verilator`` and checks for all lint
related and code style related warning messages. It supports the
synthesis subset of Verilog, plus initial statements, proper
blocking/non-blocking assignments, functions, tasks.
It also warns about unused code when a specified signal is never sinked,
and unoptimized code due to some construct, with which the
optimization of the specified signal or block is disabled.
This is done using the ``--lint-only`` command. For more information visit
<http://www.veripool.org/projects/verilator/wiki/Manual-verilator>.
"""
LANGUAGES = {'Verilog'}
REQUIREMENTS = {
DistributionRequirement(
apt_get='verilator',
brew=None,
dnf='verilator',
portage=None,
yum='verilator',
zypper='verilator',
),
}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
ASCIINEMA_URL = 'https://asciinema.org/a/45275'
CAN_DETECT = {'Formatting', 'Code Simplification', 'Syntax', 'Unused Code'}
@staticmethod
def create_arguments(filename, file, config_file):
return '--lint-only', filename
| agpl-3.0 |
Tosh007/DnD5_CharCreator | acces.py | 1 | 2053 |
def initializeTables(ui):
from ValueTable import ValueTable
from ChoiceTable import ActiveChoiceTable, StateTable,ActiveMultiChoiceTable
from ProficiencyTable import ProficiencyTable
from ConfigTable import ConfigTable
from ModifierTable import ModifierTable
global UI,Value,Modifier,Config,Proficiency,State,ActiveState,ActiveMultiState
UI = ui
Config = ConfigTable
Modifier = ModifierTable()
Value = ValueTable()
Proficiency = ProficiencyTable()
State = StateTable
ActiveState = ActiveChoiceTable()
ActiveMultiState = ActiveMultiChoiceTable()
print("global table initialization complete!")
def getValueTable():
return Value
def getProficiencyTable():
return Proficiency
def getActiveStateTable():
return ActiveState
def getActiveMultiStateTable():
return ActiveMultiState
# returns a list of objects as result of a list of strings
# returns a list with one object if a string is given
def _get(obj,name,err):
try:
if type(name) is str:
return ((getattr(obj,name)),)
else:
return (getattr(obj,n) for n in name)
except KeyError as e:
raise KeyError(err.format(name)) from e
def getUI(name):
return _get(UI,name,"getUI({0}) failed")[0]
def getValues(name):
return _get(Value,name,"getValues({0}) failed")
def getValue(name):
return getValues(name)[0]
def getModifier(name):
return _get(Modifier,name,"getModifier({0}) failed")[0]
def getConfig(name):
return _get(Config,name,"getConfig({0}) failed")[0]
def getProficiency(name):
return _get(Proficiency,name,"getProficiency({0}) failed")[0]
def getState(name):
return _get(State, name,"getState({0}) failed")[0]
def getActiveState(name):
if not type(name) is str:
name = name.__name__
return _get(ActiveState,name,"getActiveState({0}) failed")[0]
def getActiveMultiState(name):
if not type(name) is str:
name = name.__name__
return _get(ActiveMultiState, name, "getActiveMultiState({0}) failed")[0]
| gpl-3.0 |
zbyte64/django-hyperadmin | hyperadmin/resources/storages/endpoints.py | 1 | 2971 | from hyperadmin.links import LinkPrototype, Link
from hyperadmin.resources.endpoints import ResourceEndpoint
from hyperadmin.resources.crud.endpoints import ListEndpoint as BaseListEndpoint, CreateEndpoint, DetailEndpoint, DeleteEndpoint
class BoundFile(object):
def __init__(self, storage, name):
self.storage = storage
self.name = name
@property
def pk(self):
return self.name
@property
def url(self):
return self.storage.url(self.name)
def delete(self):
return self.storage.delete(self.name)
def exists(self):
return self.storage.exists(self.name)
def __unicode__(self):
return self.name
class CreateUploadLinkPrototype(LinkPrototype):
def show_link(self, **kwargs):
return self.resource.has_create_permission()
def get_link_kwargs(self, **kwargs):
form_kwargs = kwargs.pop('form_kwargs', None)
if form_kwargs is None:
form_kwargs = {}
form_kwargs = self.resource.get_upload_link_form_kwargs(**form_kwargs)
link_kwargs = {'url': self.get_url(),
'on_submit': self.handle_submission,
'method': 'POST',
'form_kwargs': form_kwargs,
'form_class': self.resource.get_upload_link_form_class(),
'prompt': 'create upload link',
'rel': 'upload-link', }
link_kwargs.update(kwargs)
return super(CreateUploadLinkPrototype, self).get_link_kwargs(**link_kwargs)
def handle_submission(self, link, submit_kwargs):
form = link.get_form(**submit_kwargs)
if form.is_valid():
upload_link = form.save()
return self.on_success(upload_link)
return link.clone(form=form)
def on_success(self, link):
assert isinstance(link, Link)
return link
class ListEndpoint(BaseListEndpoint):
def get_outbound_links(self):
#links = self.create_link_collection()
links = super(ListEndpoint, self).get_outbound_links()
links.add_link('upload', link_factor='LO')
return links
class CreateUploadEndpoint(ResourceEndpoint):
name_suffix = 'upload'
url_suffix = r'^upload/$'
prototype_method_map = {
'GET': 'upload',
'POST': 'upload',
}
create_upload_prototype = CreateUploadLinkPrototype
def get_link_prototypes(self):
return [
(self.create_upload_prototype, {'name':'upload'}),
]
class Base64UploadEndpoint(CreateEndpoint):
name_suffix = 'base64-upload'
url_suffix = r'^base64-upload/$'
prototype_method_map = {
'GET': 'base64-upload',
'POST': 'base64-upload',
}
def get_form_class(self):
return self.resource.get_base64_upload_form_class()
def get_link_prototypes(self):
return [
(self.create_prototype, {'name':'base64-upload'}),
]
| bsd-3-clause |
jerodestapa/jerodestapa.github.io | vendor/cache/ruby/2.0.0/gems/pygments.rb-0.6.3/vendor/pygments-main/pygments/lexers/compiled.py | 28 | 222015 | # -*- coding: utf-8 -*-
"""
pygments.lexers.compiled
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for compiled languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from string import Template
from pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \
this, combined, inherit, do_insertions, default
from pygments.util import get_bool_opt, get_list_opt
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error, Literal, Generic
from pygments.scanner import Scanner
# backwards compatibility
from pygments.lexers.functional import OcamlLexer
from pygments.lexers.jvm import JavaLexer, ScalaLexer
__all__ = ['CLexer', 'CppLexer', 'DLexer', 'DelphiLexer', 'ECLexer',
'NesCLexer', 'DylanLexer', 'ObjectiveCLexer', 'ObjectiveCppLexer',
'FortranLexer', 'GLShaderLexer', 'PrologLexer', 'CythonLexer',
'ValaLexer', 'OocLexer', 'GoLexer', 'FelixLexer', 'AdaLexer',
'Modula2Lexer', 'BlitzMaxLexer', 'BlitzBasicLexer', 'NimrodLexer',
'FantomLexer', 'RustLexer', 'CudaLexer', 'MonkeyLexer', 'SwigLexer',
'DylanLidLexer', 'DylanConsoleLexer', 'CobolLexer',
'CobolFreeformatLexer', 'LogosLexer', 'ClayLexer', 'PikeLexer',
'ChapelLexer', 'EiffelLexer', 'Inform6Lexer', 'Inform7Lexer',
'Inform6TemplateLexer', 'MqlLexer', 'SwiftLexer']
class CFamilyLexer(RegexLexer):
"""
For C family source code. This is used as a base class to avoid repetitious
definitions.
"""
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
#: only one /* */ style comment
_ws1 = r'\s*(?:/[*].*?[*]/\s*)*'
tokens = {
'whitespace': [
# preprocessor directives: without whitespace
('^#if\s+0', Comment.Preproc, 'if0'),
('^#', Comment.Preproc, 'macro'),
# or with whitespace
('^(' + _ws1 + r')(#if\s+0)',
bygroups(using(this), Comment.Preproc), 'if0'),
('^(' + _ws1 + ')(#)',
bygroups(using(this), Comment.Preproc), 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'statements': [
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\d+[LlUu]*', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.]', Punctuation),
(r'(auto|break|case|const|continue|default|do|else|enum|extern|'
r'for|goto|if|register|restricted|return|sizeof|static|struct|'
r'switch|typedef|union|volatile|while)\b', Keyword),
(r'(bool|int|long|float|short|double|char|unsigned|signed|void|'
r'[a-z_][a-z0-9_]*_t)\b',
Keyword.Type),
(r'(_{0,2}inline|naked|restrict|thread|typename)\b', Keyword.Reserved),
# Vector intrinsics
(r'(__(m128i|m128d|m128|m64))\b', Keyword.Reserved),
# Microsoft-isms
(r'__(asm|int8|based|except|int16|stdcall|cdecl|fastcall|int32|'
r'declspec|finally|int64|try|leave|wchar_t|w64|unaligned|'
r'raise|noop|identifier|forceinline|assume)\b', Keyword.Reserved),
(r'(true|false|NULL)\b', Name.Builtin),
(r'([a-zA-Z_]\w*)(\s*)(:)(?!:)', bygroups(Name.Label, Text, Punctuation)),
('[a-zA-Z_]\w*', Name),
],
'root': [
include('whitespace'),
# functions
(r'((?:[\w*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')?({)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation),
'function'),
# function declarations
(r'((?:[\w*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')?(;)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation)),
default('statement'),
],
'statement' : [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'function': [
include('whitespace'),
include('statements'),
(';', Punctuation),
('{', Punctuation, '#push'),
('}', Punctuation, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|'
r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#el(?:se|if).*\n', Comment.Preproc, '#pop'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
stdlib_types = ['size_t', 'ssize_t', 'off_t', 'wchar_t', 'ptrdiff_t',
'sig_atomic_t', 'fpos_t', 'clock_t', 'time_t', 'va_list',
'jmp_buf', 'FILE', 'DIR', 'div_t', 'ldiv_t', 'mbstate_t',
'wctrans_t', 'wint_t', 'wctype_t']
c99_types = ['_Bool', '_Complex', 'int8_t', 'int16_t', 'int32_t', 'int64_t',
'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t', 'int_least8_t',
'int_least16_t', 'int_least32_t', 'int_least64_t',
'uint_least8_t', 'uint_least16_t', 'uint_least32_t',
'uint_least64_t', 'int_fast8_t', 'int_fast16_t', 'int_fast32_t',
'int_fast64_t', 'uint_fast8_t', 'uint_fast16_t', 'uint_fast32_t',
'uint_fast64_t', 'intptr_t', 'uintptr_t', 'intmax_t',
'uintmax_t']
def __init__(self, **options):
self.stdlibhighlighting = get_bool_opt(options,
'stdlibhighlighting', True)
self.c99highlighting = get_bool_opt(options,
'c99highlighting', True)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
if self.stdlibhighlighting and value in self.stdlib_types:
token = Keyword.Type
elif self.c99highlighting and value in self.c99_types:
token = Keyword.Type
yield index, token, value
class CLexer(CFamilyLexer):
"""
For C source code with preprocessor directives.
"""
name = 'C'
aliases = ['c']
filenames = ['*.c', '*.h', '*.idc']
mimetypes = ['text/x-chdr', 'text/x-csrc']
priority = 0.1
def analyse_text(text):
if re.search('#include [<"]', text):
return 0.1
class CppLexer(CFamilyLexer):
"""
For C++ source code with preprocessor directives.
"""
name = 'C++'
aliases = ['cpp', 'c++']
filenames = ['*.cpp', '*.hpp', '*.c++', '*.h++',
'*.cc', '*.hh', '*.cxx', '*.hxx',
'*.C', '*.H', '*.cp', '*.CPP']
mimetypes = ['text/x-c++hdr', 'text/x-c++src']
priority = 0.1
tokens = {
'statements': [
(r'(asm|catch|const_cast|delete|dynamic_cast|explicit|'
r'export|friend|mutable|namespace|new|operator|'
r'private|protected|public|reinterpret_cast|'
r'restrict|static_cast|template|this|throw|throws|'
r'typeid|typename|using|virtual|'
r'constexpr|nullptr|decltype|thread_local|'
r'alignas|alignof|static_assert|noexcept|override|final)\b', Keyword),
(r'(char16_t|char32_t)\b', Keyword.Type),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
inherit,
],
'root': [
inherit,
# C++ Microsoft-isms
(r'__(virtual_inheritance|uuidof|super|single_inheritance|'
r'multiple_inheritance|interface|event)\b', Keyword.Reserved),
# Offload C++ extensions, http://offload.codeplay.com/
(r'(__offload|__blockingoffload|__outer)\b', Keyword.Pseudo),
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
# template specification
(r'\s*(?=>)', Text, '#pop'),
],
}
def analyse_text(text):
if re.search('#include <[a-z]+>', text):
return 0.2
if re.search('using namespace ', text):
return 0.4
class PikeLexer(CppLexer):
"""
For `Pike <http://pike.lysator.liu.se/>`_ source code.
.. versionadded:: 2.0
"""
name = 'Pike'
aliases = ['pike']
filenames = ['*.pike', '*.pmod']
mimetypes = ['text/x-pike']
tokens = {
'statements': [
(r'(catch|new|private|protected|public|gauge|'
r'throw|throws|class|interface|implement|abstract|extends|from|'
r'this|super|new|constant|final|static|import|use|extern|'
r'inline|proto|break|continue|if|else|for|'
r'while|do|switch|case|as|in|version|return|true|false|null|'
r'__VERSION__|__MAJOR__|__MINOR__|__BUILD__|__REAL_VERSION__|'
r'__REAL_MAJOR__|__REAL_MINOR__|__REAL_BUILD__|__DATE__|__TIME__|'
r'__FILE__|__DIR__|__LINE__|__AUTO_BIGNUM__|__NT__|__PIKE__|'
r'__amigaos__|_Pragma|static_assert|defined|sscanf)\b',
Keyword),
(r'(bool|int|long|float|short|double|char|string|object|void|mapping|'
r'array|multiset|program|function|lambda|mixed|'
r'[a-z_][a-z0-9_]*_t)\b',
Keyword.Type),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'[~!%^&*+=|?:<>/-@]', Operator),
inherit,
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
# template specification
(r'\s*(?=>)', Text, '#pop'),
],
}
class SwigLexer(CppLexer):
"""
For `SWIG <http://www.swig.org/>`_ source code.
.. versionadded:: 2.0
"""
name = 'SWIG'
aliases = ['swig']
filenames = ['*.swg', '*.i']
mimetypes = ['text/swig']
priority = 0.04 # Lower than C/C++ and Objective C/C++
tokens = {
'statements': [
# SWIG directives
(r'(%[a-z_][a-z0-9_]*)', Name.Function),
# Special variables
('\$\**\&?\w+', Name),
# Stringification / additional preprocessor directives
(r'##*[a-zA-Z_]\w*', Comment.Preproc),
inherit,
],
}
# This is a far from complete set of SWIG directives
swig_directives = (
# Most common directives
'%apply', '%define', '%director', '%enddef', '%exception', '%extend',
'%feature', '%fragment', '%ignore', '%immutable', '%import', '%include',
'%inline', '%insert', '%module', '%newobject', '%nspace', '%pragma',
'%rename', '%shared_ptr', '%template', '%typecheck', '%typemap',
# Less common directives
'%arg', '%attribute', '%bang', '%begin', '%callback', '%catches', '%clear',
'%constant', '%copyctor', '%csconst', '%csconstvalue', '%csenum',
'%csmethodmodifiers', '%csnothrowexception', '%default', '%defaultctor',
'%defaultdtor', '%defined', '%delete', '%delobject', '%descriptor',
'%exceptionclass', '%exceptionvar', '%extend_smart_pointer', '%fragments',
'%header', '%ifcplusplus', '%ignorewarn', '%implicit', '%implicitconv',
'%init', '%javaconst', '%javaconstvalue', '%javaenum', '%javaexception',
'%javamethodmodifiers', '%kwargs', '%luacode', '%mutable', '%naturalvar',
'%nestedworkaround', '%perlcode', '%pythonabc', '%pythonappend',
'%pythoncallback', '%pythoncode', '%pythondynamic', '%pythonmaybecall',
'%pythonnondynamic', '%pythonprepend', '%refobject', '%shadow', '%sizeof',
'%trackobjects', '%types', '%unrefobject', '%varargs', '%warn', '%warnfilter')
def analyse_text(text):
rv = 0
# Search for SWIG directives, which are conventionally at the beginning of
# a line. The probability of them being within a line is low, so let another
# lexer win in this case.
matches = re.findall(r'^\s*(%[a-z_][a-z0-9_]*)', text, re.M)
for m in matches:
if m in SwigLexer.swig_directives:
rv = 0.98
break
else:
rv = 0.91 # Fraction higher than MatlabLexer
return rv
class ECLexer(CLexer):
"""
For eC source code with preprocessor directives.
.. versionadded:: 1.5
"""
name = 'eC'
aliases = ['ec']
filenames = ['*.ec', '*.eh']
mimetypes = ['text/x-echdr', 'text/x-ecsrc']
tokens = {
'statements': [
(r'(virtual|class|private|public|property|import|delete|new|new0|'
r'renew|renew0|define|get|set|remote|dllexport|dllimport|stdcall|'
r'subclass|__on_register_module|namespace|using|typed_object|'
r'any_object|incref|register|watch|stopwatching|firewatchers|'
r'watchable|class_designer|class_fixed|class_no_expansion|isset|'
r'class_default_property|property_category|class_data|'
r'class_property|virtual|thisclass|'
r'dbtable|dbindex|database_open|dbfield)\b', Keyword),
(r'(uint|uint16|uint32|uint64|bool|byte|unichar|int64)\b',
Keyword.Type),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(null|value|this)\b', Name.Builtin),
inherit,
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
# template specification
(r'\s*(?=>)', Text, '#pop'),
],
}
class NesCLexer(CLexer):
"""
For `nesC <https://github.com/tinyos/nesc>`_ source code with preprocessor
directives.
.. versionadded:: 2.0
"""
name = 'nesC'
aliases = ['nesc']
filenames = ['*.nc']
mimetypes = ['text/x-nescsrc']
tokens = {
'statements': [
(r'(abstract|as|async|atomic|call|command|component|components|'
r'configuration|event|extends|generic|implementation|includes|'
r'interface|module|new|norace|post|provides|signal|task|uses)\b',
Keyword),
(r'(nx_struct|nx_union|nx_int8_t|nx_int16_t|nx_int32_t|nx_int64_t|'
r'nx_uint8_t|nx_uint16_t|nx_uint32_t|nx_uint64_t)\b',
Keyword.Type),
inherit,
],
}
class ClayLexer(RegexLexer):
"""
For `Clay <http://claylabs.com/clay/>`_ source.
.. versionadded:: 2.0
"""
name = 'Clay'
filenames = ['*.clay']
aliases = ['clay']
mimetypes = ['text/x-clay']
tokens = {
'root': [
(r'\s', Text),
(r'//.*?$', Comment.Singleline),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'\b(public|private|import|as|record|variant|instance'
r'|define|overload|default|external|alias'
r'|rvalue|ref|forward|inline|noinline|forceinline'
r'|enum|var|and|or|not|if|else|goto|return|while'
r'|switch|case|break|continue|for|in|true|false|try|catch|throw'
r'|finally|onerror|staticassert|eval|when|newtype'
r'|__FILE__|__LINE__|__COLUMN__|__ARG__'
r')\b', Keyword),
(r'[~!%^&*+=|:<>/-]', Operator),
(r'[#(){}\[\],;.]', Punctuation),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'\d+[LlUu]*', Number.Integer),
(r'\b(true|false)\b', Name.Builtin),
(r'(?i)[a-z_?][a-z_?0-9]*', Name),
(r'"""', String, 'tdqs'),
(r'"', String, 'dqs'),
],
'strings': [
(r'(?i)\\(x[0-9a-f]{2}|.)', String.Escape),
(r'.', String),
],
'nl': [
(r'\n', String),
],
'dqs': [
(r'"', String, '#pop'),
include('strings'),
],
'tdqs': [
(r'"""', String, '#pop'),
include('strings'),
include('nl'),
],
}
class DLexer(RegexLexer):
"""
For D source.
.. versionadded:: 1.2
"""
name = 'D'
filenames = ['*.d', '*.di']
aliases = ['d']
mimetypes = ['text/x-dsrc']
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
#(r'\\\n', Text), # line continuations
# Comments
(r'//(.*?)\n', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'/\+', Comment.Multiline, 'nested_comment'),
# Keywords
(r'(abstract|alias|align|asm|assert|auto|body|break|case|cast'
r'|catch|class|const|continue|debug|default|delegate|delete'
r'|deprecated|do|else|enum|export|extern|finally|final'
r'|foreach_reverse|foreach|for|function|goto|if|immutable|import'
r'|interface|invariant|inout|in|is|lazy|mixin|module|new|nothrow|out'
r'|override|package|pragma|private|protected|public|pure|ref|return'
r'|scope|shared|static|struct|super|switch|synchronized|template|this'
r'|throw|try|typedef|typeid|typeof|union|unittest|version|volatile'
r'|while|with|__gshared|__traits|__vector|__parameters)\b', Keyword
),
(r'(bool|byte|cdouble|cent|cfloat|char|creal|dchar|double|float'
r'|idouble|ifloat|int|ireal|long|real|short|ubyte|ucent|uint|ulong'
r'|ushort|void|wchar)\b', Keyword.Type
),
(r'(false|true|null)\b', Keyword.Constant),
(r'(__FILE__|__MODULE__|__LINE__|__FUNCTION__|__PRETTY_FUNCTION__'
r'|__DATE__|__EOF__|__TIME__|__TIMESTAMP__|__VENDOR__|__VERSION__)\b',
Keyword.Pseudo),
(r'macro\b', Keyword.Reserved),
(r'(string|wstring|dstring|size_t|ptrdiff_t)\b', Name.Builtin),
# FloatLiteral
# -- HexFloat
(r'0[xX]([0-9a-fA-F_]*\.[0-9a-fA-F_]+|[0-9a-fA-F_]+)'
r'[pP][+\-]?[0-9_]+[fFL]?[i]?', Number.Float),
# -- DecimalFloat
(r'[0-9_]+(\.[0-9_]+[eE][+\-]?[0-9_]+|'
r'\.[0-9_]*|[eE][+\-]?[0-9_]+)[fFL]?[i]?', Number.Float),
(r'\.(0|[1-9][0-9_]*)([eE][+\-]?[0-9_]+)?[fFL]?[i]?', Number.Float),
# IntegerLiteral
# -- Binary
(r'0[Bb][01_]+', Number.Bin),
# -- Octal
(r'0[0-7_]+', Number.Oct),
# -- Hexadecimal
(r'0[xX][0-9a-fA-F_]+', Number.Hex),
# -- Decimal
(r'(0|[1-9][0-9_]*)([LUu]|Lu|LU|uL|UL)?', Number.Integer),
# CharacterLiteral
(r"""'(\\['"?\\abfnrtv]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}"""
r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\&\w+;|.)'""",
String.Char
),
# StringLiteral
# -- WysiwygString
(r'r"[^"]*"[cwd]?', String),
# -- AlternateWysiwygString
(r'`[^`]*`[cwd]?', String),
# -- DoubleQuotedString
(r'"(\\\\|\\"|[^"])*"[cwd]?', String),
# -- EscapeSequence
(r"\\(['\"?\\abfnrtv]|x[0-9a-fA-F]{2}|[0-7]{1,3}"
r"|u[0-9a-fA-F]{4}|U[0-9a-fA-F]{8}|&\w+;)",
String
),
# -- HexString
(r'x"[0-9a-fA-F_\s]*"[cwd]?', String),
# -- DelimitedString
(r'q"\[', String, 'delimited_bracket'),
(r'q"\(', String, 'delimited_parenthesis'),
(r'q"<', String, 'delimited_angle'),
(r'q"{', String, 'delimited_curly'),
(r'q"([a-zA-Z_]\w*)\n.*?\n\1"', String),
(r'q"(.).*?\1"', String),
# -- TokenString
(r'q{', String, 'token_string'),
# Attributes
(r'@([a-zA-Z_]\w*)?', Name.Decorator),
# Tokens
(r'(~=|\^=|%=|\*=|==|!>=|!<=|!<>=|!<>|!<|!>|!=|>>>=|>>>|>>=|>>|>='
r'|<>=|<>|<<=|<<|<=|\+\+|\+=|--|-=|\|\||\|=|&&|&=|\.\.\.|\.\.|/=)'
r'|[/.&|\-+<>!()\[\]{}?,;:$=*%^~]', Punctuation
),
# Identifier
(r'[a-zA-Z_]\w*', Name),
# Line
(r'#line\s.*\n', Comment.Special),
],
'nested_comment': [
(r'[^+/]+', Comment.Multiline),
(r'/\+', Comment.Multiline, '#push'),
(r'\+/', Comment.Multiline, '#pop'),
(r'[+/]', Comment.Multiline),
],
'token_string': [
(r'{', Punctuation, 'token_string_nest'),
(r'}', String, '#pop'),
include('root'),
],
'token_string_nest': [
(r'{', Punctuation, '#push'),
(r'}', Punctuation, '#pop'),
include('root'),
],
'delimited_bracket': [
(r'[^\[\]]+', String),
(r'\[', String, 'delimited_inside_bracket'),
(r'\]"', String, '#pop'),
],
'delimited_inside_bracket': [
(r'[^\[\]]+', String),
(r'\[', String, '#push'),
(r'\]', String, '#pop'),
],
'delimited_parenthesis': [
(r'[^\(\)]+', String),
(r'\(', String, 'delimited_inside_parenthesis'),
(r'\)"', String, '#pop'),
],
'delimited_inside_parenthesis': [
(r'[^\(\)]+', String),
(r'\(', String, '#push'),
(r'\)', String, '#pop'),
],
'delimited_angle': [
(r'[^<>]+', String),
(r'<', String, 'delimited_inside_angle'),
(r'>"', String, '#pop'),
],
'delimited_inside_angle': [
(r'[^<>]+', String),
(r'<', String, '#push'),
(r'>', String, '#pop'),
],
'delimited_curly': [
(r'[^{}]+', String),
(r'{', String, 'delimited_inside_curly'),
(r'}"', String, '#pop'),
],
'delimited_inside_curly': [
(r'[^{}]+', String),
(r'{', String, '#push'),
(r'}', String, '#pop'),
],
}
class DelphiLexer(Lexer):
"""
For `Delphi <http://www.borland.com/delphi/>`_ (Borland Object Pascal),
Turbo Pascal and Free Pascal source code.
Additional options accepted:
`turbopascal`
Highlight Turbo Pascal specific keywords (default: ``True``).
`delphi`
Highlight Borland Delphi specific keywords (default: ``True``).
`freepascal`
Highlight Free Pascal specific keywords (default: ``True``).
`units`
A list of units that should be considered builtin, supported are
``System``, ``SysUtils``, ``Classes`` and ``Math``.
Default is to consider all of them builtin.
"""
name = 'Delphi'
aliases = ['delphi', 'pas', 'pascal', 'objectpascal']
filenames = ['*.pas']
mimetypes = ['text/x-pascal']
TURBO_PASCAL_KEYWORDS = [
'absolute', 'and', 'array', 'asm', 'begin', 'break', 'case',
'const', 'constructor', 'continue', 'destructor', 'div', 'do',
'downto', 'else', 'end', 'file', 'for', 'function', 'goto',
'if', 'implementation', 'in', 'inherited', 'inline', 'interface',
'label', 'mod', 'nil', 'not', 'object', 'of', 'on', 'operator',
'or', 'packed', 'procedure', 'program', 'record', 'reintroduce',
'repeat', 'self', 'set', 'shl', 'shr', 'string', 'then', 'to',
'type', 'unit', 'until', 'uses', 'var', 'while', 'with', 'xor'
]
DELPHI_KEYWORDS = [
'as', 'class', 'except', 'exports', 'finalization', 'finally',
'initialization', 'is', 'library', 'on', 'property', 'raise',
'threadvar', 'try'
]
FREE_PASCAL_KEYWORDS = [
'dispose', 'exit', 'false', 'new', 'true'
]
BLOCK_KEYWORDS = set([
'begin', 'class', 'const', 'constructor', 'destructor', 'end',
'finalization', 'function', 'implementation', 'initialization',
'label', 'library', 'operator', 'procedure', 'program', 'property',
'record', 'threadvar', 'type', 'unit', 'uses', 'var'
])
FUNCTION_MODIFIERS = set([
'alias', 'cdecl', 'export', 'inline', 'interrupt', 'nostackframe',
'pascal', 'register', 'safecall', 'softfloat', 'stdcall',
'varargs', 'name', 'dynamic', 'near', 'virtual', 'external',
'override', 'assembler'
])
# XXX: those aren't global. but currently we know no way for defining
# them just for the type context.
DIRECTIVES = set([
'absolute', 'abstract', 'assembler', 'cppdecl', 'default', 'far',
'far16', 'forward', 'index', 'oldfpccall', 'private', 'protected',
'published', 'public'
])
BUILTIN_TYPES = set([
'ansichar', 'ansistring', 'bool', 'boolean', 'byte', 'bytebool',
'cardinal', 'char', 'comp', 'currency', 'double', 'dword',
'extended', 'int64', 'integer', 'iunknown', 'longbool', 'longint',
'longword', 'pansichar', 'pansistring', 'pbool', 'pboolean',
'pbyte', 'pbytearray', 'pcardinal', 'pchar', 'pcomp', 'pcurrency',
'pdate', 'pdatetime', 'pdouble', 'pdword', 'pextended', 'phandle',
'pint64', 'pinteger', 'plongint', 'plongword', 'pointer',
'ppointer', 'pshortint', 'pshortstring', 'psingle', 'psmallint',
'pstring', 'pvariant', 'pwidechar', 'pwidestring', 'pword',
'pwordarray', 'pwordbool', 'real', 'real48', 'shortint',
'shortstring', 'single', 'smallint', 'string', 'tclass', 'tdate',
'tdatetime', 'textfile', 'thandle', 'tobject', 'ttime', 'variant',
'widechar', 'widestring', 'word', 'wordbool'
])
BUILTIN_UNITS = {
'System': [
'abs', 'acquireexceptionobject', 'addr', 'ansitoutf8',
'append', 'arctan', 'assert', 'assigned', 'assignfile',
'beginthread', 'blockread', 'blockwrite', 'break', 'chdir',
'chr', 'close', 'closefile', 'comptocurrency', 'comptodouble',
'concat', 'continue', 'copy', 'cos', 'dec', 'delete',
'dispose', 'doubletocomp', 'endthread', 'enummodules',
'enumresourcemodules', 'eof', 'eoln', 'erase', 'exceptaddr',
'exceptobject', 'exclude', 'exit', 'exp', 'filepos', 'filesize',
'fillchar', 'finalize', 'findclasshinstance', 'findhinstance',
'findresourcehinstance', 'flush', 'frac', 'freemem',
'get8087cw', 'getdir', 'getlasterror', 'getmem',
'getmemorymanager', 'getmodulefilename', 'getvariantmanager',
'halt', 'hi', 'high', 'inc', 'include', 'initialize', 'insert',
'int', 'ioresult', 'ismemorymanagerset', 'isvariantmanagerset',
'length', 'ln', 'lo', 'low', 'mkdir', 'move', 'new', 'odd',
'olestrtostring', 'olestrtostrvar', 'ord', 'paramcount',
'paramstr', 'pi', 'pos', 'pred', 'ptr', 'pucs4chars', 'random',
'randomize', 'read', 'readln', 'reallocmem',
'releaseexceptionobject', 'rename', 'reset', 'rewrite', 'rmdir',
'round', 'runerror', 'seek', 'seekeof', 'seekeoln',
'set8087cw', 'setlength', 'setlinebreakstyle',
'setmemorymanager', 'setstring', 'settextbuf',
'setvariantmanager', 'sin', 'sizeof', 'slice', 'sqr', 'sqrt',
'str', 'stringofchar', 'stringtoolestr', 'stringtowidechar',
'succ', 'swap', 'trunc', 'truncate', 'typeinfo',
'ucs4stringtowidestring', 'unicodetoutf8', 'uniquestring',
'upcase', 'utf8decode', 'utf8encode', 'utf8toansi',
'utf8tounicode', 'val', 'vararrayredim', 'varclear',
'widecharlentostring', 'widecharlentostrvar',
'widechartostring', 'widechartostrvar',
'widestringtoucs4string', 'write', 'writeln'
],
'SysUtils': [
'abort', 'addexitproc', 'addterminateproc', 'adjustlinebreaks',
'allocmem', 'ansicomparefilename', 'ansicomparestr',
'ansicomparetext', 'ansidequotedstr', 'ansiextractquotedstr',
'ansilastchar', 'ansilowercase', 'ansilowercasefilename',
'ansipos', 'ansiquotedstr', 'ansisamestr', 'ansisametext',
'ansistrcomp', 'ansistricomp', 'ansistrlastchar', 'ansistrlcomp',
'ansistrlicomp', 'ansistrlower', 'ansistrpos', 'ansistrrscan',
'ansistrscan', 'ansistrupper', 'ansiuppercase',
'ansiuppercasefilename', 'appendstr', 'assignstr', 'beep',
'booltostr', 'bytetocharindex', 'bytetocharlen', 'bytetype',
'callterminateprocs', 'changefileext', 'charlength',
'chartobyteindex', 'chartobytelen', 'comparemem', 'comparestr',
'comparetext', 'createdir', 'createguid', 'currentyear',
'currtostr', 'currtostrf', 'date', 'datetimetofiledate',
'datetimetostr', 'datetimetostring', 'datetimetosystemtime',
'datetimetotimestamp', 'datetostr', 'dayofweek', 'decodedate',
'decodedatefully', 'decodetime', 'deletefile', 'directoryexists',
'diskfree', 'disksize', 'disposestr', 'encodedate', 'encodetime',
'exceptionerrormessage', 'excludetrailingbackslash',
'excludetrailingpathdelimiter', 'expandfilename',
'expandfilenamecase', 'expanduncfilename', 'extractfiledir',
'extractfiledrive', 'extractfileext', 'extractfilename',
'extractfilepath', 'extractrelativepath', 'extractshortpathname',
'fileage', 'fileclose', 'filecreate', 'filedatetodatetime',
'fileexists', 'filegetattr', 'filegetdate', 'fileisreadonly',
'fileopen', 'fileread', 'filesearch', 'fileseek', 'filesetattr',
'filesetdate', 'filesetreadonly', 'filewrite', 'finalizepackage',
'findclose', 'findcmdlineswitch', 'findfirst', 'findnext',
'floattocurr', 'floattodatetime', 'floattodecimal', 'floattostr',
'floattostrf', 'floattotext', 'floattotextfmt', 'fmtloadstr',
'fmtstr', 'forcedirectories', 'format', 'formatbuf', 'formatcurr',
'formatdatetime', 'formatfloat', 'freeandnil', 'getcurrentdir',
'getenvironmentvariable', 'getfileversion', 'getformatsettings',
'getlocaleformatsettings', 'getmodulename', 'getpackagedescription',
'getpackageinfo', 'gettime', 'guidtostring', 'incamonth',
'includetrailingbackslash', 'includetrailingpathdelimiter',
'incmonth', 'initializepackage', 'interlockeddecrement',
'interlockedexchange', 'interlockedexchangeadd',
'interlockedincrement', 'inttohex', 'inttostr', 'isdelimiter',
'isequalguid', 'isleapyear', 'ispathdelimiter', 'isvalidident',
'languages', 'lastdelimiter', 'loadpackage', 'loadstr',
'lowercase', 'msecstotimestamp', 'newstr', 'nextcharindex', 'now',
'outofmemoryerror', 'quotedstr', 'raiselastoserror',
'raiselastwin32error', 'removedir', 'renamefile', 'replacedate',
'replacetime', 'safeloadlibrary', 'samefilename', 'sametext',
'setcurrentdir', 'showexception', 'sleep', 'stralloc', 'strbufsize',
'strbytetype', 'strcat', 'strcharlength', 'strcomp', 'strcopy',
'strdispose', 'strecopy', 'strend', 'strfmt', 'stricomp',
'stringreplace', 'stringtoguid', 'strlcat', 'strlcomp', 'strlcopy',
'strlen', 'strlfmt', 'strlicomp', 'strlower', 'strmove', 'strnew',
'strnextchar', 'strpas', 'strpcopy', 'strplcopy', 'strpos',
'strrscan', 'strscan', 'strtobool', 'strtobooldef', 'strtocurr',
'strtocurrdef', 'strtodate', 'strtodatedef', 'strtodatetime',
'strtodatetimedef', 'strtofloat', 'strtofloatdef', 'strtoint',
'strtoint64', 'strtoint64def', 'strtointdef', 'strtotime',
'strtotimedef', 'strupper', 'supports', 'syserrormessage',
'systemtimetodatetime', 'texttofloat', 'time', 'timestamptodatetime',
'timestamptomsecs', 'timetostr', 'trim', 'trimleft', 'trimright',
'tryencodedate', 'tryencodetime', 'tryfloattocurr', 'tryfloattodatetime',
'trystrtobool', 'trystrtocurr', 'trystrtodate', 'trystrtodatetime',
'trystrtofloat', 'trystrtoint', 'trystrtoint64', 'trystrtotime',
'unloadpackage', 'uppercase', 'widecomparestr', 'widecomparetext',
'widefmtstr', 'wideformat', 'wideformatbuf', 'widelowercase',
'widesamestr', 'widesametext', 'wideuppercase', 'win32check',
'wraptext'
],
'Classes': [
'activateclassgroup', 'allocatehwnd', 'bintohex', 'checksynchronize',
'collectionsequal', 'countgenerations', 'deallocatehwnd', 'equalrect',
'extractstrings', 'findclass', 'findglobalcomponent', 'getclass',
'groupdescendantswith', 'hextobin', 'identtoint',
'initinheritedcomponent', 'inttoident', 'invalidpoint',
'isuniqueglobalcomponentname', 'linestart', 'objectbinarytotext',
'objectresourcetotext', 'objecttexttobinary', 'objecttexttoresource',
'pointsequal', 'readcomponentres', 'readcomponentresex',
'readcomponentresfile', 'rect', 'registerclass', 'registerclassalias',
'registerclasses', 'registercomponents', 'registerintegerconsts',
'registernoicon', 'registernonactivex', 'smallpoint', 'startclassgroup',
'teststreamformat', 'unregisterclass', 'unregisterclasses',
'unregisterintegerconsts', 'unregistermoduleclasses',
'writecomponentresfile'
],
'Math': [
'arccos', 'arccosh', 'arccot', 'arccoth', 'arccsc', 'arccsch', 'arcsec',
'arcsech', 'arcsin', 'arcsinh', 'arctan2', 'arctanh', 'ceil',
'comparevalue', 'cosecant', 'cosh', 'cot', 'cotan', 'coth', 'csc',
'csch', 'cycletodeg', 'cycletograd', 'cycletorad', 'degtocycle',
'degtograd', 'degtorad', 'divmod', 'doubledecliningbalance',
'ensurerange', 'floor', 'frexp', 'futurevalue', 'getexceptionmask',
'getprecisionmode', 'getroundmode', 'gradtocycle', 'gradtodeg',
'gradtorad', 'hypot', 'inrange', 'interestpayment', 'interestrate',
'internalrateofreturn', 'intpower', 'isinfinite', 'isnan', 'iszero',
'ldexp', 'lnxp1', 'log10', 'log2', 'logn', 'max', 'maxintvalue',
'maxvalue', 'mean', 'meanandstddev', 'min', 'minintvalue', 'minvalue',
'momentskewkurtosis', 'netpresentvalue', 'norm', 'numberofperiods',
'payment', 'periodpayment', 'poly', 'popnstddev', 'popnvariance',
'power', 'presentvalue', 'radtocycle', 'radtodeg', 'radtograd',
'randg', 'randomrange', 'roundto', 'samevalue', 'sec', 'secant',
'sech', 'setexceptionmask', 'setprecisionmode', 'setroundmode',
'sign', 'simpleroundto', 'sincos', 'sinh', 'slndepreciation', 'stddev',
'sum', 'sumint', 'sumofsquares', 'sumsandsquares', 'syddepreciation',
'tan', 'tanh', 'totalvariance', 'variance'
]
}
ASM_REGISTERS = set([
'ah', 'al', 'ax', 'bh', 'bl', 'bp', 'bx', 'ch', 'cl', 'cr0',
'cr1', 'cr2', 'cr3', 'cr4', 'cs', 'cx', 'dh', 'di', 'dl', 'dr0',
'dr1', 'dr2', 'dr3', 'dr4', 'dr5', 'dr6', 'dr7', 'ds', 'dx',
'eax', 'ebp', 'ebx', 'ecx', 'edi', 'edx', 'es', 'esi', 'esp',
'fs', 'gs', 'mm0', 'mm1', 'mm2', 'mm3', 'mm4', 'mm5', 'mm6',
'mm7', 'si', 'sp', 'ss', 'st0', 'st1', 'st2', 'st3', 'st4', 'st5',
'st6', 'st7', 'xmm0', 'xmm1', 'xmm2', 'xmm3', 'xmm4', 'xmm5',
'xmm6', 'xmm7'
])
ASM_INSTRUCTIONS = set([
'aaa', 'aad', 'aam', 'aas', 'adc', 'add', 'and', 'arpl', 'bound',
'bsf', 'bsr', 'bswap', 'bt', 'btc', 'btr', 'bts', 'call', 'cbw',
'cdq', 'clc', 'cld', 'cli', 'clts', 'cmc', 'cmova', 'cmovae',
'cmovb', 'cmovbe', 'cmovc', 'cmovcxz', 'cmove', 'cmovg',
'cmovge', 'cmovl', 'cmovle', 'cmovna', 'cmovnae', 'cmovnb',
'cmovnbe', 'cmovnc', 'cmovne', 'cmovng', 'cmovnge', 'cmovnl',
'cmovnle', 'cmovno', 'cmovnp', 'cmovns', 'cmovnz', 'cmovo',
'cmovp', 'cmovpe', 'cmovpo', 'cmovs', 'cmovz', 'cmp', 'cmpsb',
'cmpsd', 'cmpsw', 'cmpxchg', 'cmpxchg486', 'cmpxchg8b', 'cpuid',
'cwd', 'cwde', 'daa', 'das', 'dec', 'div', 'emms', 'enter', 'hlt',
'ibts', 'icebp', 'idiv', 'imul', 'in', 'inc', 'insb', 'insd',
'insw', 'int', 'int01', 'int03', 'int1', 'int3', 'into', 'invd',
'invlpg', 'iret', 'iretd', 'iretw', 'ja', 'jae', 'jb', 'jbe',
'jc', 'jcxz', 'jcxz', 'je', 'jecxz', 'jg', 'jge', 'jl', 'jle',
'jmp', 'jna', 'jnae', 'jnb', 'jnbe', 'jnc', 'jne', 'jng', 'jnge',
'jnl', 'jnle', 'jno', 'jnp', 'jns', 'jnz', 'jo', 'jp', 'jpe',
'jpo', 'js', 'jz', 'lahf', 'lar', 'lcall', 'lds', 'lea', 'leave',
'les', 'lfs', 'lgdt', 'lgs', 'lidt', 'ljmp', 'lldt', 'lmsw',
'loadall', 'loadall286', 'lock', 'lodsb', 'lodsd', 'lodsw',
'loop', 'loope', 'loopne', 'loopnz', 'loopz', 'lsl', 'lss', 'ltr',
'mov', 'movd', 'movq', 'movsb', 'movsd', 'movsw', 'movsx',
'movzx', 'mul', 'neg', 'nop', 'not', 'or', 'out', 'outsb', 'outsd',
'outsw', 'pop', 'popa', 'popad', 'popaw', 'popf', 'popfd', 'popfw',
'push', 'pusha', 'pushad', 'pushaw', 'pushf', 'pushfd', 'pushfw',
'rcl', 'rcr', 'rdmsr', 'rdpmc', 'rdshr', 'rdtsc', 'rep', 'repe',
'repne', 'repnz', 'repz', 'ret', 'retf', 'retn', 'rol', 'ror',
'rsdc', 'rsldt', 'rsm', 'sahf', 'sal', 'salc', 'sar', 'sbb',
'scasb', 'scasd', 'scasw', 'seta', 'setae', 'setb', 'setbe',
'setc', 'setcxz', 'sete', 'setg', 'setge', 'setl', 'setle',
'setna', 'setnae', 'setnb', 'setnbe', 'setnc', 'setne', 'setng',
'setnge', 'setnl', 'setnle', 'setno', 'setnp', 'setns', 'setnz',
'seto', 'setp', 'setpe', 'setpo', 'sets', 'setz', 'sgdt', 'shl',
'shld', 'shr', 'shrd', 'sidt', 'sldt', 'smi', 'smint', 'smintold',
'smsw', 'stc', 'std', 'sti', 'stosb', 'stosd', 'stosw', 'str',
'sub', 'svdc', 'svldt', 'svts', 'syscall', 'sysenter', 'sysexit',
'sysret', 'test', 'ud1', 'ud2', 'umov', 'verr', 'verw', 'wait',
'wbinvd', 'wrmsr', 'wrshr', 'xadd', 'xbts', 'xchg', 'xlat',
'xlatb', 'xor'
])
def __init__(self, **options):
Lexer.__init__(self, **options)
self.keywords = set()
if get_bool_opt(options, 'turbopascal', True):
self.keywords.update(self.TURBO_PASCAL_KEYWORDS)
if get_bool_opt(options, 'delphi', True):
self.keywords.update(self.DELPHI_KEYWORDS)
if get_bool_opt(options, 'freepascal', True):
self.keywords.update(self.FREE_PASCAL_KEYWORDS)
self.builtins = set()
for unit in get_list_opt(options, 'units', list(self.BUILTIN_UNITS)):
self.builtins.update(self.BUILTIN_UNITS[unit])
def get_tokens_unprocessed(self, text):
scanner = Scanner(text, re.DOTALL | re.MULTILINE | re.IGNORECASE)
stack = ['initial']
in_function_block = False
in_property_block = False
was_dot = False
next_token_is_function = False
next_token_is_property = False
collect_labels = False
block_labels = set()
brace_balance = [0, 0]
while not scanner.eos:
token = Error
if stack[-1] == 'initial':
if scanner.scan(r'\s+'):
token = Text
elif scanner.scan(r'\{.*?\}|\(\*.*?\*\)'):
if scanner.match.startswith('$'):
token = Comment.Preproc
else:
token = Comment.Multiline
elif scanner.scan(r'//.*?$'):
token = Comment.Single
elif scanner.scan(r'[-+*\/=<>:;,.@\^]'):
token = Operator
# stop label highlighting on next ";"
if collect_labels and scanner.match == ';':
collect_labels = False
elif scanner.scan(r'[\(\)\[\]]+'):
token = Punctuation
# abort function naming ``foo = Function(...)``
next_token_is_function = False
# if we are in a function block we count the open
# braces because ootherwise it's impossible to
# determine the end of the modifier context
if in_function_block or in_property_block:
if scanner.match == '(':
brace_balance[0] += 1
elif scanner.match == ')':
brace_balance[0] -= 1
elif scanner.match == '[':
brace_balance[1] += 1
elif scanner.match == ']':
brace_balance[1] -= 1
elif scanner.scan(r'[A-Za-z_][A-Za-z_0-9]*'):
lowercase_name = scanner.match.lower()
if lowercase_name == 'result':
token = Name.Builtin.Pseudo
elif lowercase_name in self.keywords:
token = Keyword
# if we are in a special block and a
# block ending keyword occours (and the parenthesis
# is balanced) we end the current block context
if (in_function_block or in_property_block) and \
lowercase_name in self.BLOCK_KEYWORDS and \
brace_balance[0] <= 0 and \
brace_balance[1] <= 0:
in_function_block = False
in_property_block = False
brace_balance = [0, 0]
block_labels = set()
if lowercase_name in ('label', 'goto'):
collect_labels = True
elif lowercase_name == 'asm':
stack.append('asm')
elif lowercase_name == 'property':
in_property_block = True
next_token_is_property = True
elif lowercase_name in ('procedure', 'operator',
'function', 'constructor',
'destructor'):
in_function_block = True
next_token_is_function = True
# we are in a function block and the current name
# is in the set of registered modifiers. highlight
# it as pseudo keyword
elif in_function_block and \
lowercase_name in self.FUNCTION_MODIFIERS:
token = Keyword.Pseudo
# if we are in a property highlight some more
# modifiers
elif in_property_block and \
lowercase_name in ('read', 'write'):
token = Keyword.Pseudo
next_token_is_function = True
# if the last iteration set next_token_is_function
# to true we now want this name highlighted as
# function. so do that and reset the state
elif next_token_is_function:
# Look if the next token is a dot. If yes it's
# not a function, but a class name and the
# part after the dot a function name
if scanner.test(r'\s*\.\s*'):
token = Name.Class
# it's not a dot, our job is done
else:
token = Name.Function
next_token_is_function = False
# same for properties
elif next_token_is_property:
token = Name.Property
next_token_is_property = False
# Highlight this token as label and add it
# to the list of known labels
elif collect_labels:
token = Name.Label
block_labels.add(scanner.match.lower())
# name is in list of known labels
elif lowercase_name in block_labels:
token = Name.Label
elif lowercase_name in self.BUILTIN_TYPES:
token = Keyword.Type
elif lowercase_name in self.DIRECTIVES:
token = Keyword.Pseudo
# builtins are just builtins if the token
# before isn't a dot
elif not was_dot and lowercase_name in self.builtins:
token = Name.Builtin
else:
token = Name
elif scanner.scan(r"'"):
token = String
stack.append('string')
elif scanner.scan(r'\#(\d+|\$[0-9A-Fa-f]+)'):
token = String.Char
elif scanner.scan(r'\$[0-9A-Fa-f]+'):
token = Number.Hex
elif scanner.scan(r'\d+(?![eE]|\.[^.])'):
token = Number.Integer
elif scanner.scan(r'\d+(\.\d+([eE][+-]?\d+)?|[eE][+-]?\d+)'):
token = Number.Float
else:
# if the stack depth is deeper than once, pop
if len(stack) > 1:
stack.pop()
scanner.get_char()
elif stack[-1] == 'string':
if scanner.scan(r"''"):
token = String.Escape
elif scanner.scan(r"'"):
token = String
stack.pop()
elif scanner.scan(r"[^']*"):
token = String
else:
scanner.get_char()
stack.pop()
elif stack[-1] == 'asm':
if scanner.scan(r'\s+'):
token = Text
elif scanner.scan(r'end'):
token = Keyword
stack.pop()
elif scanner.scan(r'\{.*?\}|\(\*.*?\*\)'):
if scanner.match.startswith('$'):
token = Comment.Preproc
else:
token = Comment.Multiline
elif scanner.scan(r'//.*?$'):
token = Comment.Single
elif scanner.scan(r"'"):
token = String
stack.append('string')
elif scanner.scan(r'@@[A-Za-z_][A-Za-z_0-9]*'):
token = Name.Label
elif scanner.scan(r'[A-Za-z_][A-Za-z_0-9]*'):
lowercase_name = scanner.match.lower()
if lowercase_name in self.ASM_INSTRUCTIONS:
token = Keyword
elif lowercase_name in self.ASM_REGISTERS:
token = Name.Builtin
else:
token = Name
elif scanner.scan(r'[-+*\/=<>:;,.@\^]+'):
token = Operator
elif scanner.scan(r'[\(\)\[\]]+'):
token = Punctuation
elif scanner.scan(r'\$[0-9A-Fa-f]+'):
token = Number.Hex
elif scanner.scan(r'\d+(?![eE]|\.[^.])'):
token = Number.Integer
elif scanner.scan(r'\d+(\.\d+([eE][+-]?\d+)?|[eE][+-]?\d+)'):
token = Number.Float
else:
scanner.get_char()
stack.pop()
# save the dot!!!11
if scanner.match.strip():
was_dot = scanner.match == '.'
yield scanner.start_pos, token, scanner.match or ''
class DylanLexer(RegexLexer):
"""
For the `Dylan <http://www.opendylan.org/>`_ language.
.. versionadded:: 0.7
"""
name = 'Dylan'
aliases = ['dylan']
filenames = ['*.dylan', '*.dyl', '*.intr']
mimetypes = ['text/x-dylan']
flags = re.IGNORECASE
builtins = set([
'subclass', 'abstract', 'block', 'concrete', 'constant', 'class',
'compiler-open', 'compiler-sideways', 'domain', 'dynamic',
'each-subclass', 'exception', 'exclude', 'function', 'generic',
'handler', 'inherited', 'inline', 'inline-only', 'instance',
'interface', 'import', 'keyword', 'library', 'macro', 'method',
'module', 'open', 'primary', 'required', 'sealed', 'sideways',
'singleton', 'slot', 'thread', 'variable', 'virtual'])
keywords = set([
'above', 'afterwards', 'begin', 'below', 'by', 'case', 'cleanup',
'create', 'define', 'else', 'elseif', 'end', 'export', 'finally',
'for', 'from', 'if', 'in', 'let', 'local', 'otherwise', 'rename',
'select', 'signal', 'then', 'to', 'unless', 'until', 'use', 'when',
'while'])
operators = set([
'~', '+', '-', '*', '|', '^', '=', '==', '~=', '~==', '<', '<=',
'>', '>=', '&', '|'])
functions = set([
'abort', 'abs', 'add', 'add!', 'add-method', 'add-new', 'add-new!',
'all-superclasses', 'always', 'any?', 'applicable-method?', 'apply',
'aref', 'aref-setter', 'as', 'as-lowercase', 'as-lowercase!',
'as-uppercase', 'as-uppercase!', 'ash', 'backward-iteration-protocol',
'break', 'ceiling', 'ceiling/', 'cerror', 'check-type', 'choose',
'choose-by', 'complement', 'compose', 'concatenate', 'concatenate-as',
'condition-format-arguments', 'condition-format-string', 'conjoin',
'copy-sequence', 'curry', 'default-handler', 'dimension', 'dimensions',
'direct-subclasses', 'direct-superclasses', 'disjoin', 'do',
'do-handlers', 'element', 'element-setter', 'empty?', 'error', 'even?',
'every?', 'false-or', 'fill!', 'find-key', 'find-method', 'first',
'first-setter', 'floor', 'floor/', 'forward-iteration-protocol',
'function-arguments', 'function-return-values',
'function-specializers', 'gcd', 'generic-function-mandatory-keywords',
'generic-function-methods', 'head', 'head-setter', 'identity',
'initialize', 'instance?', 'integral?', 'intersection',
'key-sequence', 'key-test', 'last', 'last-setter', 'lcm', 'limited',
'list', 'logand', 'logbit?', 'logior', 'lognot', 'logxor', 'make',
'map', 'map-as', 'map-into', 'max', 'member?', 'merge-hash-codes',
'min', 'modulo', 'negative', 'negative?', 'next-method',
'object-class', 'object-hash', 'odd?', 'one-of', 'pair', 'pop',
'pop-last', 'positive?', 'push', 'push-last', 'range', 'rank',
'rcurry', 'reduce', 'reduce1', 'remainder', 'remove', 'remove!',
'remove-duplicates', 'remove-duplicates!', 'remove-key!',
'remove-method', 'replace-elements!', 'replace-subsequence!',
'restart-query', 'return-allowed?', 'return-description',
'return-query', 'reverse', 'reverse!', 'round', 'round/',
'row-major-index', 'second', 'second-setter', 'shallow-copy',
'signal', 'singleton', 'size', 'size-setter', 'slot-initialized?',
'sort', 'sort!', 'sorted-applicable-methods', 'subsequence-position',
'subtype?', 'table-protocol', 'tail', 'tail-setter', 'third',
'third-setter', 'truncate', 'truncate/', 'type-error-expected-type',
'type-error-value', 'type-for-copy', 'type-union', 'union', 'values',
'vector', 'zero?'])
valid_name = '\\\\?[a-z0-9' + re.escape('!&*<>|^$%@_-+~?/=') + ']+'
def get_tokens_unprocessed(self, text):
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
lowercase_value = value.lower()
if lowercase_value in self.builtins:
yield index, Name.Builtin, value
continue
if lowercase_value in self.keywords:
yield index, Keyword, value
continue
if lowercase_value in self.functions:
yield index, Name.Builtin, value
continue
if lowercase_value in self.operators:
yield index, Operator, value
continue
yield index, token, value
tokens = {
'root': [
# Whitespace
(r'\s+', Text),
# single line comment
(r'//.*?\n', Comment.Single),
# lid header
(r'([a-z0-9-]+)(:)([ \t]*)(.*(?:\n[ \t].+)*)',
bygroups(Name.Attribute, Operator, Text, String)),
('', Text, 'code') # no header match, switch to code
],
'code': [
# Whitespace
(r'\s+', Text),
# single line comment
(r'//.*?\n', Comment.Single),
# multi-line comment
(r'/\*', Comment.Multiline, 'comment'),
# strings and characters
(r'"', String, 'string'),
(r"'(\\.|\\[0-7]{1,3}|\\x[a-f0-9]{1,2}|[^\\\'\n])'", String.Char),
# binary integer
(r'#[bB][01]+', Number.Bin),
# octal integer
(r'#[oO][0-7]+', Number.Oct),
# floating point
(r'[-+]?(\d*\.\d+(e[-+]?\d+)?|\d+(\.\d*)?e[-+]?\d+)', Number.Float),
# decimal integer
(r'[-+]?\d+', Number.Integer),
# hex integer
(r'#[xX][0-9a-f]+', Number.Hex),
# Macro parameters
(r'(\?' + valid_name + ')(:)'
r'(token|name|variable|expression|body|case-body|\*)',
bygroups(Name.Tag, Operator, Name.Builtin)),
(r'(\?)(:)(token|name|variable|expression|body|case-body|\*)',
bygroups(Name.Tag, Operator, Name.Builtin)),
(r'\?' + valid_name, Name.Tag),
# Punctuation
(r'(=>|::|#\(|#\[|##|\?|\?\?|\?=|[(){}\[\],\.;])', Punctuation),
# Most operators are picked up as names and then re-flagged.
# This one isn't valid in a name though, so we pick it up now.
(r':=', Operator),
# Pick up #t / #f before we match other stuff with #.
(r'#[tf]', Literal),
# #"foo" style keywords
(r'#"', String.Symbol, 'keyword'),
# #rest, #key, #all-keys, etc.
(r'#[a-z0-9-]+', Keyword),
# required-init-keyword: style keywords.
(valid_name + ':', Keyword),
# class names
(r'<' + valid_name + '>', Name.Class),
# define variable forms.
(r'\*' + valid_name + '\*', Name.Variable.Global),
# define constant forms.
(r'\$' + valid_name, Name.Constant),
# everything else. We re-flag some of these in the method above.
(valid_name, Name),
],
'comment': [
(r'[^*/]', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
'keyword': [
(r'"', String.Symbol, '#pop'),
(r'[^\\"]+', String.Symbol), # all other characters
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-f0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
]
}
class DylanLidLexer(RegexLexer):
"""
For Dylan LID (Library Interchange Definition) files.
.. versionadded:: 1.6
"""
name = 'DylanLID'
aliases = ['dylan-lid', 'lid']
filenames = ['*.lid', '*.hdp']
mimetypes = ['text/x-dylan-lid']
flags = re.IGNORECASE
tokens = {
'root': [
# Whitespace
(r'\s+', Text),
# single line comment
(r'//.*?\n', Comment.Single),
# lid header
(r'(.*?)(:)([ \t]*)(.*(?:\n[ \t].+)*)',
bygroups(Name.Attribute, Operator, Text, String)),
]
}
class DylanConsoleLexer(Lexer):
"""
For Dylan interactive console output like:
.. sourcecode:: dylan-console
? let a = 1;
=> 1
? a
=> 1
This is based on a copy of the RubyConsoleLexer.
.. versionadded:: 1.6
"""
name = 'Dylan session'
aliases = ['dylan-console', 'dylan-repl']
filenames = ['*.dylan-console']
mimetypes = ['text/x-dylan-console']
_line_re = re.compile('.*?\n')
_prompt_re = re.compile('\?| ')
def get_tokens_unprocessed(self, text):
dylexer = DylanLexer(**self.options)
curcode = ''
insertions = []
for match in self._line_re.finditer(text):
line = match.group()
m = self._prompt_re.match(line)
if m is not None:
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
for item in do_insertions(insertions,
dylexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
dylexer.get_tokens_unprocessed(curcode)):
yield item
def objective(baselexer):
"""
Generate a subclass of baselexer that accepts the Objective-C syntax
extensions.
"""
# Have to be careful not to accidentally match JavaDoc/Doxygen syntax here,
# since that's quite common in ordinary C/C++ files. It's OK to match
# JavaDoc/Doxygen keywords that only apply to Objective-C, mind.
#
# The upshot of this is that we CANNOT match @class or @interface
_oc_keywords = re.compile(r'@(?:end|implementation|protocol)')
# Matches [ <ws>? identifier <ws> ( identifier <ws>? ] | identifier? : )
# (note the identifier is *optional* when there is a ':'!)
_oc_message = re.compile(r'\[\s*[a-zA-Z_]\w*\s+'
r'(?:[a-zA-Z_]\w*\s*\]|'
r'(?:[a-zA-Z_]\w*)?:)')
class GeneratedObjectiveCVariant(baselexer):
"""
Implements Objective-C syntax on top of an existing C family lexer.
"""
tokens = {
'statements': [
(r'@"', String, 'string'),
(r'@(YES|NO)', Number),
(r"@'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'@(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'@(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'@0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'@0[0-7]+[Ll]?', Number.Oct),
(r'@\d+[Ll]?', Number.Integer),
(r'@\(', Literal, 'literal_number'),
(r'@\[', Literal, 'literal_array'),
(r'@\{', Literal, 'literal_dictionary'),
(r'(@selector|@private|@protected|@public|@encode|'
r'@synchronized|@try|@throw|@catch|@finally|@end|@property|@synthesize|'
r'__bridge|__bridge_transfer|__autoreleasing|__block|__weak|__strong|'
r'weak|strong|copy|retain|assign|unsafe_unretained|atomic|nonatomic|'
r'readonly|readwrite|setter|getter|typeof|in|out|inout|release|class|'
r'@dynamic|@optional|@required|@autoreleasepool)\b', Keyword),
(r'(id|instancetype|Class|IMP|SEL|BOOL|IBOutlet|IBAction|unichar)\b',
Keyword.Type),
(r'@(true|false|YES|NO)\n', Name.Builtin),
(r'(YES|NO|nil|self|super)\b', Name.Builtin),
# Carbon types
(r'(Boolean|UInt8|SInt8|UInt16|SInt16|UInt32|SInt32)\b', Keyword.Type),
# Carbon built-ins
(r'(TRUE|FALSE)\b', Name.Builtin),
(r'(@interface|@implementation)(\s+)', bygroups(Keyword, Text),
('#pop', 'oc_classname')),
(r'(@class|@protocol)(\s+)', bygroups(Keyword, Text),
('#pop', 'oc_forward_classname')),
# @ can also prefix other expressions like @{...} or @(...)
(r'@', Punctuation),
inherit,
],
'oc_classname' : [
# interface definition that inherits
('([a-zA-Z$_][\w$]*)(\s*:\s*)([a-zA-Z$_][\w$]*)?(\s*)({)',
bygroups(Name.Class, Text, Name.Class, Text, Punctuation),
('#pop', 'oc_ivars')),
('([a-zA-Z$_][\w$]*)(\s*:\s*)([a-zA-Z$_][\w$]*)?',
bygroups(Name.Class, Text, Name.Class), '#pop'),
# interface definition for a category
('([a-zA-Z$_][\w$]*)(\s*)(\([a-zA-Z$_][\w$]*\))(\s*)({)',
bygroups(Name.Class, Text, Name.Label, Text, Punctuation),
('#pop', 'oc_ivars')),
('([a-zA-Z$_][\w$]*)(\s*)(\([a-zA-Z$_][\w$]*\))',
bygroups(Name.Class, Text, Name.Label), '#pop'),
# simple interface / implementation
('([a-zA-Z$_][\w$]*)(\s*)({)',
bygroups(Name.Class, Text, Punctuation), ('#pop', 'oc_ivars')),
('([a-zA-Z$_][\w$]*)', Name.Class, '#pop')
],
'oc_forward_classname' : [
('([a-zA-Z$_][\w$]*)(\s*,\s*)',
bygroups(Name.Class, Text), 'oc_forward_classname'),
('([a-zA-Z$_][\w$]*)(\s*;?)',
bygroups(Name.Class, Text), '#pop')
],
'oc_ivars' : [
include('whitespace'),
include('statements'),
(';', Punctuation),
('{', Punctuation, '#push'),
('}', Punctuation, '#pop'),
],
'root': [
# methods
(r'^([-+])(\s*)' # method marker
r'(\(.*?\))?(\s*)' # return type
r'([a-zA-Z$_][\w$]*:?)', # begin of method name
bygroups(Punctuation, Text, using(this),
Text, Name.Function),
'method'),
inherit,
],
'method': [
include('whitespace'),
# TODO unsure if ellipses are allowed elsewhere, see
# discussion in Issue 789
(r',', Punctuation),
(r'\.\.\.', Punctuation),
(r'(\(.*?\))(\s*)([a-zA-Z$_][\w$]*)',
bygroups(using(this), Text, Name.Variable)),
(r'[a-zA-Z$_][\w$]*:', Name.Function),
(';', Punctuation, '#pop'),
('{', Punctuation, 'function'),
('', Text, '#pop'),
],
'literal_number': [
(r'\(', Punctuation, 'literal_number_inner'),
(r'\)', Literal, '#pop'),
include('statement'),
],
'literal_number_inner': [
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop'),
include('statement'),
],
'literal_array': [
(r'\[', Punctuation, 'literal_array_inner'),
(r'\]', Literal, '#pop'),
include('statement'),
],
'literal_array_inner': [
(r'\[', Punctuation, '#push'),
(r'\]', Punctuation, '#pop'),
include('statement'),
],
'literal_dictionary': [
(r'\}', Literal, '#pop'),
include('statement'),
],
}
def analyse_text(text):
if _oc_keywords.search(text):
return 1.0
elif '@"' in text: # strings
return 0.8
elif re.search('@[0-9]+', text):
return 0.7
elif _oc_message.search(text):
return 0.8
return 0
def get_tokens_unprocessed(self, text):
from pygments.lexers._cocoabuiltins import COCOA_INTERFACES, \
COCOA_PROTOCOLS, COCOA_PRIMITIVES
for index, token, value in \
baselexer.get_tokens_unprocessed(self, text):
if token is Name or token is Name.Class:
if value in COCOA_INTERFACES or value in COCOA_PROTOCOLS \
or value in COCOA_PRIMITIVES:
token = Name.Builtin.Pseudo
yield index, token, value
return GeneratedObjectiveCVariant
class ObjectiveCLexer(objective(CLexer)):
"""
For Objective-C source code with preprocessor directives.
"""
name = 'Objective-C'
aliases = ['objective-c', 'objectivec', 'obj-c', 'objc']
filenames = ['*.m', '*.h']
mimetypes = ['text/x-objective-c']
priority = 0.05 # Lower than C
class ObjectiveCppLexer(objective(CppLexer)):
"""
For Objective-C++ source code with preprocessor directives.
"""
name = 'Objective-C++'
aliases = ['objective-c++', 'objectivec++', 'obj-c++', 'objc++']
filenames = ['*.mm', '*.hh']
mimetypes = ['text/x-objective-c++']
priority = 0.05 # Lower than C++
class FortranLexer(RegexLexer):
"""
Lexer for FORTRAN 90 code.
.. versionadded:: 0.10
"""
name = 'Fortran'
aliases = ['fortran']
filenames = ['*.f', '*.f90', '*.F', '*.F90']
mimetypes = ['text/x-fortran']
flags = re.IGNORECASE
# Data Types: INTEGER, REAL, COMPLEX, LOGICAL, CHARACTER and DOUBLE PRECISION
# Operators: **, *, +, -, /, <, >, <=, >=, ==, /=
# Logical (?): NOT, AND, OR, EQV, NEQV
# Builtins:
# http://gcc.gnu.org/onlinedocs/gcc-3.4.6/g77/Table-of-Intrinsic-Functions.html
tokens = {
'root': [
(r'!.*\n', Comment),
include('strings'),
include('core'),
(r'[a-z]\w*', Name.Variable),
include('nums'),
(r'[\s]+', Text),
],
'core': [
# Statements
(r'\b(ABSTRACT|ACCEPT|ALLOCATABLE|ALLOCATE|ARRAY|ASSIGN|ASYNCHRONOUS|'
r'BACKSPACE|BIND|BLOCK( DATA)?|BYTE|CALL|CASE|CLASS|CLOSE|COMMON|CONTAINS|'
r'CONTINUE|CYCLE|DATA|DEALLOCATE|DECODE|DEFERRED|DIMENSION|DO|'
r'ELEMENTAL|ELSE|ENCODE|END( FILE)?|ENDIF|ENTRY|ENUMERATOR|EQUIVALENCE|'
r'EXIT|EXTERNAL|EXTRINSIC|FINAL|FORALL|FORMAT|FUNCTION|GENERIC|'
r'GOTO|IF|IMPLICIT|IMPORT|INCLUDE|INQUIRE|INTENT|INTERFACE|'
r'INTRINSIC|MODULE|NAMELIST|NULLIFY|NONE|NON_INTRINSIC|'
r'NON_OVERRIDABLE|NOPASS|OPEN|OPTIONAL|OPTIONS|PARAMETER|PASS|'
r'PAUSE|POINTER|PRINT|PRIVATE|PROGRAM|PROTECTED|PUBLIC|PURE|READ|'
r'RECURSIVE|RESULT|RETURN|REWIND|SAVE|SELECT|SEQUENCE|STOP|SUBROUTINE|'
r'TARGET|THEN|TYPE|USE|VALUE|VOLATILE|WHERE|WRITE|WHILE)\s*\b',
Keyword),
# Data Types
(r'\b(CHARACTER|COMPLEX|DOUBLE PRECISION|DOUBLE COMPLEX|INTEGER|'
r'LOGICAL|REAL|C_INT|C_SHORT|C_LONG|C_LONG_LONG|C_SIGNED_CHAR|'
r'C_SIZE_T|C_INT8_T|C_INT16_T|C_INT32_T|C_INT64_T|C_INT_LEAST8_T|'
r'C_INT_LEAST16_T|C_INT_LEAST32_T|C_INT_LEAST64_T|C_INT_FAST8_T|'
r'C_INT_FAST16_T|C_INT_FAST32_T|C_INT_FAST64_T|C_INTMAX_T|'
r'C_INTPTR_T|C_FLOAT|C_DOUBLE|C_LONG_DOUBLE|C_FLOAT_COMPLEX|'
r'C_DOUBLE_COMPLEX|C_LONG_DOUBLE_COMPLEX|C_BOOL|C_CHAR|C_PTR|'
r'C_FUNPTR)\s*\b',
Keyword.Type),
# Operators
(r'(\*\*|\*|\+|-|\/|<|>|<=|>=|==|\/=|=)', Operator),
(r'(::)', Keyword.Declaration),
(r'[()\[\],:&%;]', Punctuation),
# Intrinsics
(r'\b(Abort|Abs|Access|AChar|ACos|AdjustL|AdjustR|AImag|AInt|Alarm|'
r'All|Allocated|ALog|AMax|AMin|AMod|And|ANInt|Any|ASin|Associated|'
r'ATan|BesJ|BesJN|BesY|BesYN|Bit_Size|BTest|CAbs|CCos|Ceiling|'
r'CExp|Char|ChDir|ChMod|CLog|Cmplx|Command_Argument_Count|Complex|'
r'Conjg|Cos|CosH|Count|CPU_Time|CShift|CSin|CSqRt|CTime|C_Funloc|'
r'C_Loc|C_Associated|C_Null_Ptr|C_Null_Funptr|C_F_Pointer|'
r'C_Null_Char|C_Alert|C_Backspace|C_Form_Feed|C_New_Line|'
r'C_Carriage_Return|C_Horizontal_Tab|C_Vertical_Tab|'
r'DAbs|DACos|DASin|DATan|Date_and_Time|DbesJ|'
r'DbesJ|DbesJN|DbesY|DbesY|DbesYN|Dble|DCos|DCosH|DDiM|DErF|DErFC|'
r'DExp|Digits|DiM|DInt|DLog|DLog|DMax|DMin|DMod|DNInt|Dot_Product|'
r'DProd|DSign|DSinH|DSin|DSqRt|DTanH|DTan|DTime|EOShift|Epsilon|'
r'ErF|ErFC|ETime|Exit|Exp|Exponent|Extends_Type_Of|FDate|FGet|'
r'FGetC|Float|Floor|Flush|FNum|FPutC|FPut|Fraction|FSeek|FStat|'
r'FTell|GError|GetArg|Get_Command|Get_Command_Argument|'
r'Get_Environment_Variable|GetCWD|GetEnv|GetGId|GetLog|GetPId|'
r'GetUId|GMTime|HostNm|Huge|IAbs|IAChar|IAnd|IArgC|IBClr|IBits|'
r'IBSet|IChar|IDate|IDiM|IDInt|IDNInt|IEOr|IErrNo|IFix|Imag|'
r'ImagPart|Index|Int|IOr|IRand|IsaTty|IShft|IShftC|ISign|'
r'Iso_C_Binding|Is_Iostat_End|Is_Iostat_Eor|ITime|Kill|Kind|'
r'LBound|Len|Len_Trim|LGe|LGt|Link|LLe|LLt|LnBlnk|Loc|Log|'
r'Logical|Long|LShift|LStat|LTime|MatMul|Max|MaxExponent|MaxLoc|'
r'MaxVal|MClock|Merge|Move_Alloc|Min|MinExponent|MinLoc|MinVal|'
r'Mod|Modulo|MvBits|Nearest|New_Line|NInt|Not|Or|Pack|PError|'
r'Precision|Present|Product|Radix|Rand|Random_Number|Random_Seed|'
r'Range|Real|RealPart|Rename|Repeat|Reshape|RRSpacing|RShift|'
r'Same_Type_As|Scale|Scan|Second|Selected_Int_Kind|'
r'Selected_Real_Kind|Set_Exponent|Shape|Short|Sign|Signal|SinH|'
r'Sin|Sleep|Sngl|Spacing|Spread|SqRt|SRand|Stat|Sum|SymLnk|'
r'System|System_Clock|Tan|TanH|Time|Tiny|Transfer|Transpose|Trim|'
r'TtyNam|UBound|UMask|Unlink|Unpack|Verify|XOr|ZAbs|ZCos|ZExp|'
r'ZLog|ZSin|ZSqRt)\s*\b',
Name.Builtin),
# Booleans
(r'\.(true|false)\.', Name.Builtin),
# Comparing Operators
(r'\.(eq|ne|lt|le|gt|ge|not|and|or|eqv|neqv)\.', Operator.Word),
],
'strings': [
(r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
],
'nums': [
(r'\d+(?![.e])(_[a-z]\w+)?', Number.Integer),
(r'[+-]?\d*\.\d+(e[-+]?\d+)?(_[a-z]\w+)?', Number.Float),
(r'[+-]?\d+\.\d*(e[-+]?\d+)?(_[a-z]\w+)?', Number.Float),
],
}
class GLShaderLexer(RegexLexer):
"""
GLSL (OpenGL Shader) lexer.
.. versionadded:: 1.1
"""
name = 'GLSL'
aliases = ['glsl']
filenames = ['*.vert', '*.frag', '*.geo']
mimetypes = ['text/x-glslsrc']
tokens = {
'root': [
(r'^#.*', Comment.Preproc),
(r'//.*', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'\+|-|~|!=?|\*|/|%|<<|>>|<=?|>=?|==?|&&?|\^|\|\|?',
Operator),
(r'[?:]', Operator), # quick hack for ternary
(r'\bdefined\b', Operator),
(r'[;{}(),\[\]]', Punctuation),
#FIXME when e is present, no decimal point needed
(r'[+-]?\d*\.\d+([eE][-+]?\d+)?', Number.Float),
(r'[+-]?\d+\.\d*([eE][-+]?\d+)?', Number.Float),
(r'0[xX][0-9a-fA-F]*', Number.Hex),
(r'0[0-7]*', Number.Oct),
(r'[1-9][0-9]*', Number.Integer),
(r'\b(attribute|const|uniform|varying|centroid|break|continue|'
r'do|for|while|if|else|in|out|inout|float|int|void|bool|true|'
r'false|invariant|discard|return|mat[234]|mat[234]x[234]|'
r'vec[234]|[ib]vec[234]|sampler[123]D|samplerCube|'
r'sampler[12]DShadow|struct)\b', Keyword),
(r'\b(asm|class|union|enum|typedef|template|this|packed|goto|'
r'switch|default|inline|noinline|volatile|public|static|extern|'
r'external|interface|long|short|double|half|fixed|unsigned|'
r'lowp|mediump|highp|precision|input|output|hvec[234]|'
r'[df]vec[234]|sampler[23]DRect|sampler2DRectShadow|sizeof|'
r'cast|namespace|using)\b', Keyword), #future use
(r'[a-zA-Z_][a-zA-Z_0-9]*', Name),
(r'\.', Punctuation),
(r'\s+', Text),
],
}
class PrologLexer(RegexLexer):
"""
Lexer for Prolog files.
"""
name = 'Prolog'
aliases = ['prolog']
filenames = ['*.prolog', '*.pro', '*.pl']
mimetypes = ['text/x-prolog']
flags = re.UNICODE
tokens = {
'root': [
(r'^#.*', Comment.Single),
(r'/\*', Comment.Multiline, 'nested-comment'),
(r'%.*', Comment.Single),
# character literal
(r'0\'.', String.Char),
(r'0b[01]+', Number.Bin),
(r'0o[0-7]+', Number.Oct),
(r'0x[0-9a-fA-F]+', Number.Hex),
# literal with prepended base
(r'\d\d?\'[a-zA-Z0-9]+', Number.Integer),
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+', Number.Integer),
(r'[\[\](){}|.,;!]', Punctuation),
(r':-|-->', Punctuation),
(r'"(?:\\x[0-9a-fA-F]+\\|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|'
r'\\[0-7]+\\|\\["\nabcefnrstv]|[^\\"])*"', String.Double),
(r"'(?:''|[^'])*'", String.Atom), # quoted atom
# Needs to not be followed by an atom.
#(r'=(?=\s|[a-zA-Z\[])', Operator),
(r'is\b', Operator),
(r'(<|>|=<|>=|==|=:=|=|/|//|\*|\+|-)(?=\s|[a-zA-Z0-9\[])',
Operator),
(r'(mod|div|not)\b', Operator),
(r'_', Keyword), # The don't-care variable
(r'([a-z]+)(:)', bygroups(Name.Namespace, Punctuation)),
(u'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
u'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)'
u'(\\s*)(:-|-->)',
bygroups(Name.Function, Text, Operator)), # function defn
(u'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
u'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)'
u'(\\s*)(\\()',
bygroups(Name.Function, Text, Punctuation)),
(u'[a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
u'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*',
String.Atom), # atom, characters
# This one includes !
(u'[#&*+\\-./:<=>?@\\\\^~\u00a1-\u00bf\u2010-\u303f]+',
String.Atom), # atom, graphics
(r'[A-Z_]\w*', Name.Variable),
(u'\\s+|[\u2000-\u200f\ufff0-\ufffe\uffef]', Text),
],
'nested-comment': [
(r'\*/', Comment.Multiline, '#pop'),
(r'/\*', Comment.Multiline, '#push'),
(r'[^*/]+', Comment.Multiline),
(r'[*/]', Comment.Multiline),
],
}
def analyse_text(text):
return ':-' in text
class CythonLexer(RegexLexer):
"""
For Pyrex and `Cython <http://cython.org>`_ source code.
.. versionadded:: 1.1
"""
name = 'Cython'
aliases = ['cython', 'pyx', 'pyrex']
filenames = ['*.pyx', '*.pxd', '*.pxi']
mimetypes = ['text/x-cython', 'application/x-cython']
tokens = {
'root': [
(r'\n', Text),
(r'^(\s*)("""(?:.|\n)*?""")', bygroups(Text, String.Doc)),
(r"^(\s*)('''(?:.|\n)*?''')", bygroups(Text, String.Doc)),
(r'[^\S\n]+', Text),
(r'#.*$', Comment),
(r'[]{}:(),;[]', Punctuation),
(r'\\\n', Text),
(r'\\', Text),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'(<)([a-zA-Z0-9.?]+)(>)',
bygroups(Punctuation, Keyword.Type, Punctuation)),
(r'!=|==|<<|>>|[-~+/*%=<>&^|.?]', Operator),
(r'(from)(\d+)(<=)(\s+)(<)(\d+)(:)',
bygroups(Keyword, Number.Integer, Operator, Name, Operator,
Name, Punctuation)),
include('keywords'),
(r'(def|property)(\s+)', bygroups(Keyword, Text), 'funcname'),
(r'(cp?def)(\s+)', bygroups(Keyword, Text), 'cdef'),
(r'(class|struct)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(from)(\s+)', bygroups(Keyword, Text), 'fromimport'),
(r'(c?import)(\s+)', bygroups(Keyword, Text), 'import'),
include('builtins'),
include('backtick'),
('(?:[rR]|[uU][rR]|[rR][uU])"""', String, 'tdqs'),
("(?:[rR]|[uU][rR]|[rR][uU])'''", String, 'tsqs'),
('(?:[rR]|[uU][rR]|[rR][uU])"', String, 'dqs'),
("(?:[rR]|[uU][rR]|[rR][uU])'", String, 'sqs'),
('[uU]?"""', String, combined('stringescape', 'tdqs')),
("[uU]?'''", String, combined('stringescape', 'tsqs')),
('[uU]?"', String, combined('stringescape', 'dqs')),
("[uU]?'", String, combined('stringescape', 'sqs')),
include('name'),
include('numbers'),
],
'keywords': [
(r'(assert|break|by|continue|ctypedef|del|elif|else|except\??|exec|'
r'finally|for|gil|global|if|include|lambda|nogil|pass|print|raise|'
r'return|try|while|yield|as|with)\b', Keyword),
(r'(DEF|IF|ELIF|ELSE)\b', Comment.Preproc),
],
'builtins': [
(r'(?<!\.)(__import__|abs|all|any|apply|basestring|bin|bool|buffer|'
r'bytearray|bytes|callable|chr|classmethod|cmp|coerce|compile|'
r'complex|delattr|dict|dir|divmod|enumerate|eval|execfile|exit|'
r'file|filter|float|frozenset|getattr|globals|hasattr|hash|hex|id|'
r'input|int|intern|isinstance|issubclass|iter|len|list|locals|'
r'long|map|max|min|next|object|oct|open|ord|pow|property|range|'
r'raw_input|reduce|reload|repr|reversed|round|set|setattr|slice|'
r'sorted|staticmethod|str|sum|super|tuple|type|unichr|unicode|'
r'vars|xrange|zip)\b', Name.Builtin),
(r'(?<!\.)(self|None|Ellipsis|NotImplemented|False|True|NULL'
r')\b', Name.Builtin.Pseudo),
(r'(?<!\.)(ArithmeticError|AssertionError|AttributeError|'
r'BaseException|DeprecationWarning|EOFError|EnvironmentError|'
r'Exception|FloatingPointError|FutureWarning|GeneratorExit|IOError|'
r'ImportError|ImportWarning|IndentationError|IndexError|KeyError|'
r'KeyboardInterrupt|LookupError|MemoryError|NameError|'
r'NotImplemented|NotImplementedError|OSError|OverflowError|'
r'OverflowWarning|PendingDeprecationWarning|ReferenceError|'
r'RuntimeError|RuntimeWarning|StandardError|StopIteration|'
r'SyntaxError|SyntaxWarning|SystemError|SystemExit|TabError|'
r'TypeError|UnboundLocalError|UnicodeDecodeError|'
r'UnicodeEncodeError|UnicodeError|UnicodeTranslateError|'
r'UnicodeWarning|UserWarning|ValueError|Warning|ZeroDivisionError'
r')\b', Name.Exception),
],
'numbers': [
(r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'0\d+', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer)
],
'backtick': [
('`.*?`', String.Backtick),
],
'name': [
(r'@\w+', Name.Decorator),
('[a-zA-Z_]\w*', Name),
],
'funcname': [
('[a-zA-Z_]\w*', Name.Function, '#pop')
],
'cdef': [
(r'(public|readonly|extern|api|inline)\b', Keyword.Reserved),
(r'(struct|enum|union|class)\b', Keyword),
(r'([a-zA-Z_]\w*)(\s*)(?=[(:#=]|$)',
bygroups(Name.Function, Text), '#pop'),
(r'([a-zA-Z_]\w*)(\s*)(,)',
bygroups(Name.Function, Text, Punctuation)),
(r'from\b', Keyword, '#pop'),
(r'as\b', Keyword),
(r':', Punctuation, '#pop'),
(r'(?=["\'])', Text, '#pop'),
(r'[a-zA-Z_]\w*', Keyword.Type),
(r'.', Text),
],
'classname': [
('[a-zA-Z_]\w*', Name.Class, '#pop')
],
'import': [
(r'(\s+)(as)(\s+)', bygroups(Text, Keyword, Text)),
(r'[a-zA-Z_][\w.]*', Name.Namespace),
(r'(\s*)(,)(\s*)', bygroups(Text, Operator, Text)),
default('#pop') # all else: go back
],
'fromimport': [
(r'(\s+)(c?import)\b', bygroups(Text, Keyword), '#pop'),
(r'[a-zA-Z_.][\w.]*', Name.Namespace),
# ``cdef foo from "header"``, or ``for foo from 0 < i < 10``
default('#pop'),
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N{.*?}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'strings': [
(r'%(\([a-zA-Z0-9]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[diouxXeEfFgGcrs%]', String.Interpol),
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'%', String)
# newlines are an error (use "nl" state)
],
'nl': [
(r'\n', String)
],
'dqs': [
(r'"', String, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape), # included here again for raw strings
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r"\\\\|\\'|\\\n", String.Escape), # included here again for raw strings
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
include('strings'),
include('nl')
],
'tsqs': [
(r"'''", String, '#pop'),
include('strings'),
include('nl')
],
}
class ValaLexer(RegexLexer):
"""
For Vala source code with preprocessor directives.
.. versionadded:: 1.1
"""
name = 'Vala'
aliases = ['vala', 'vapi']
filenames = ['*.vala', '*.vapi']
mimetypes = ['text/x-vala']
tokens = {
'whitespace': [
(r'^\s*#if\s+0', Comment.Preproc, 'if0'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'statements': [
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'(\[)(Compact|Immutable|(?:Boolean|Simple)Type)(\])',
bygroups(Punctuation, Name.Decorator, Punctuation)),
# TODO: "correctly" parse complex code attributes
(r'(\[)(CCode|(?:Integer|Floating)Type)',
bygroups(Punctuation, Name.Decorator)),
(r'[()\[\],.]', Punctuation),
(r'(as|base|break|case|catch|construct|continue|default|delete|do|'
r'else|enum|finally|for|foreach|get|if|in|is|lock|new|out|params|'
r'return|set|sizeof|switch|this|throw|try|typeof|while|yield)\b',
Keyword),
(r'(abstract|const|delegate|dynamic|ensures|extern|inline|internal|'
r'override|owned|private|protected|public|ref|requires|signal|'
r'static|throws|unowned|var|virtual|volatile|weak|yields)\b',
Keyword.Declaration),
(r'(namespace|using)(\s+)', bygroups(Keyword.Namespace, Text),
'namespace'),
(r'(class|errordomain|interface|struct)(\s+)',
bygroups(Keyword.Declaration, Text), 'class'),
(r'(\.)([a-zA-Z_]\w*)',
bygroups(Operator, Name.Attribute)),
# void is an actual keyword, others are in glib-2.0.vapi
(r'(void|bool|char|double|float|int|int8|int16|int32|int64|long|'
r'short|size_t|ssize_t|string|time_t|uchar|uint|uint8|uint16|'
r'uint32|uint64|ulong|unichar|ushort)\b', Keyword.Type),
(r'(true|false|null)\b', Name.Builtin),
('[a-zA-Z_]\w*', Name),
],
'root': [
include('whitespace'),
('', Text, 'statement'),
],
'statement' : [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#el(?:se|if).*\n', Comment.Preproc, '#pop'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
],
'class': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'namespace': [
(r'[a-zA-Z_][\w.]*', Name.Namespace, '#pop')
],
}
class OocLexer(RegexLexer):
"""
For `Ooc <http://ooc-lang.org/>`_ source code
.. versionadded:: 1.2
"""
name = 'Ooc'
aliases = ['ooc']
filenames = ['*.ooc']
mimetypes = ['text/x-ooc']
tokens = {
'root': [
(r'\b(class|interface|implement|abstract|extends|from|'
r'this|super|new|const|final|static|import|use|extern|'
r'inline|proto|break|continue|fallthrough|operator|if|else|for|'
r'while|do|switch|case|as|in|version|return|true|false|null)\b',
Keyword),
(r'include\b', Keyword, 'include'),
(r'(cover)([ \t]+)(from)([ \t]+)(\w+[*@]?)',
bygroups(Keyword, Text, Keyword, Text, Name.Class)),
(r'(func)((?:[ \t]|\\\n)+)(~[a-z_]\w*)',
bygroups(Keyword, Text, Name.Function)),
(r'\bfunc\b', Keyword),
# Note: %= and ^= not listed on http://ooc-lang.org/syntax
(r'//.*', Comment),
(r'(?s)/\*.*?\*/', Comment.Multiline),
(r'(==?|\+=?|-[=>]?|\*=?|/=?|:=|!=?|%=?|\?|>{1,3}=?|<{1,3}=?|\.\.|'
r'&&?|\|\|?|\^=?)', Operator),
(r'(\.)([ \t]*)([a-z]\w*)', bygroups(Operator, Text,
Name.Function)),
(r'[A-Z][A-Z0-9_]+', Name.Constant),
(r'[A-Z]\w*([@*]|\[[ \t]*\])?', Name.Class),
(r'([a-z]\w*(?:~[a-z]\w*)?)((?:[ \t]|\\\n)*)(?=\()',
bygroups(Name.Function, Text)),
(r'[a-z]\w*', Name.Variable),
# : introduces types
(r'[:(){}\[\];,]', Punctuation),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'0c[0-9]+', Number.Oct),
(r'0b[01]+', Number.Bin),
(r'[0-9_]\.[0-9_]*(?!\.)', Number.Float),
(r'[0-9_]+', Number.Decimal),
(r'"(?:\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\"])*"',
String.Double),
(r"'(?:\\.|\\[0-9]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
String.Char),
(r'@', Punctuation), # pointer dereference
(r'\.', Punctuation), # imports or chain operator
(r'\\[ \t\n]', Text),
(r'[ \t]+', Text),
],
'include': [
(r'[\w/]+', Name),
(r',', Punctuation),
(r'[ \t]', Text),
(r'[;\n]', Text, '#pop'),
],
}
class GoLexer(RegexLexer):
"""
For `Go <http://golang.org>`_ source.
"""
name = 'Go'
filenames = ['*.go']
aliases = ['go']
mimetypes = ['text/x-gosrc']
flags = re.MULTILINE | re.UNICODE
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuations
(r'//(.*?)\n', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'(import|package)\b', Keyword.Namespace),
(r'(var|func|struct|map|chan|type|interface|const)\b', Keyword.Declaration),
(r'(break|default|select|case|defer|go'
r'|else|goto|switch|fallthrough|if|range'
r'|continue|for|return)\b', Keyword),
(r'(true|false|iota|nil)\b', Keyword.Constant),
# It seems the builtin types aren't actually keywords, but
# can be used as functions. So we need two declarations.
(r'(uint|uint8|uint16|uint32|uint64'
r'|int|int8|int16|int32|int64'
r'|float|float32|float64'
r'|complex64|complex128|byte|rune'
r'|string|bool|error|uintptr'
r'|print|println|panic|recover|close|complex|real|imag'
r'|len|cap|append|copy|delete|new|make)\b(\()',
bygroups(Name.Builtin, Punctuation)),
(r'(uint|uint8|uint16|uint32|uint64'
r'|int|int8|int16|int32|int64'
r'|float|float32|float64'
r'|complex64|complex128|byte|rune'
r'|string|bool|error|uintptr)\b', Keyword.Type),
# imaginary_lit
(r'\d+i', Number),
(r'\d+\.\d*([Ee][-+]\d+)?i', Number),
(r'\.\d+([Ee][-+]\d+)?i', Number),
(r'\d+[Ee][-+]\d+i', Number),
# float_lit
(r'\d+(\.\d+[eE][+\-]?\d+|'
r'\.\d*|[eE][+\-]?\d+)', Number.Float),
(r'\.\d+([eE][+\-]?\d+)?', Number.Float),
# int_lit
# -- octal_lit
(r'0[0-7]+', Number.Oct),
# -- hex_lit
(r'0[xX][0-9a-fA-F]+', Number.Hex),
# -- decimal_lit
(r'(0|[1-9][0-9]*)', Number.Integer),
# char_lit
(r"""'(\\['"\\abfnrtv]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}"""
r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|[^\\])'""",
String.Char
),
# StringLiteral
# -- raw_string_lit
(r'`[^`]*`', String),
# -- interpreted_string_lit
(r'"(\\\\|\\"|[^"])*"', String),
# Tokens
(r'(<<=|>>=|<<|>>|<=|>=|&\^=|&\^|\+=|-=|\*=|/=|%=|&=|\|=|&&|\|\|'
r'|<-|\+\+|--|==|!=|:=|\.\.\.|[+\-*/%&])', Operator),
(r'[|^<>=!()\[\]{}.,;:]', Punctuation),
# identifier
(r'[^\W\d]\w*', Name.Other),
]
}
class FelixLexer(RegexLexer):
"""
For `Felix <http://www.felix-lang.org>`_ source code.
.. versionadded:: 1.2
"""
name = 'Felix'
aliases = ['felix', 'flx']
filenames = ['*.flx', '*.flxh']
mimetypes = ['text/x-felix']
preproc = [
'elif', 'else', 'endif', 'if', 'ifdef', 'ifndef',
]
keywords = [
'_', '_deref', 'all', 'as',
'assert', 'attempt', 'call', 'callback', 'case', 'caseno', 'cclass',
'code', 'compound', 'ctypes', 'do', 'done', 'downto', 'elif', 'else',
'endattempt', 'endcase', 'endif', 'endmatch', 'enum', 'except',
'exceptions', 'expect', 'finally', 'for', 'forall', 'forget', 'fork',
'functor', 'goto', 'ident', 'if', 'incomplete', 'inherit', 'instance',
'interface', 'jump', 'lambda', 'loop', 'match', 'module', 'namespace',
'new', 'noexpand', 'nonterm', 'obj', 'of', 'open', 'parse', 'raise',
'regexp', 'reglex', 'regmatch', 'rename', 'return', 'the', 'then',
'to', 'type', 'typecase', 'typedef', 'typematch', 'typeof', 'upto',
'when', 'whilst', 'with', 'yield',
]
keyword_directives = [
'_gc_pointer', '_gc_type', 'body', 'comment', 'const', 'export',
'header', 'inline', 'lval', 'macro', 'noinline', 'noreturn',
'package', 'private', 'pod', 'property', 'public', 'publish',
'requires', 'todo', 'virtual', 'use',
]
keyword_declarations = [
'def', 'let', 'ref', 'val', 'var',
]
keyword_types = [
'unit', 'void', 'any', 'bool',
'byte', 'offset',
'address', 'caddress', 'cvaddress', 'vaddress',
'tiny', 'short', 'int', 'long', 'vlong',
'utiny', 'ushort', 'vshort', 'uint', 'ulong', 'uvlong',
'int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float', 'double', 'ldouble',
'complex', 'dcomplex', 'lcomplex',
'imaginary', 'dimaginary', 'limaginary',
'char', 'wchar', 'uchar',
'charp', 'charcp', 'ucharp', 'ucharcp',
'string', 'wstring', 'ustring',
'cont',
'array', 'varray', 'list',
'lvalue', 'opt', 'slice',
]
keyword_constants = [
'false', 'true',
]
operator_words = [
'and', 'not', 'in', 'is', 'isin', 'or', 'xor',
]
name_builtins = [
'_svc', 'while',
]
name_pseudo = [
'root', 'self', 'this',
]
decimal_suffixes = '([tTsSiIlLvV]|ll|LL|([iIuU])(8|16|32|64))?'
tokens = {
'root': [
include('whitespace'),
# Keywords
(r'(axiom|ctor|fun|gen|proc|reduce|union)\b', Keyword,
'funcname'),
(r'(class|cclass|cstruct|obj|struct)\b', Keyword, 'classname'),
(r'(instance|module|typeclass)\b', Keyword, 'modulename'),
(r'(%s)\b' % '|'.join(keywords), Keyword),
(r'(%s)\b' % '|'.join(keyword_directives), Name.Decorator),
(r'(%s)\b' % '|'.join(keyword_declarations), Keyword.Declaration),
(r'(%s)\b' % '|'.join(keyword_types), Keyword.Type),
(r'(%s)\b' % '|'.join(keyword_constants), Keyword.Constant),
# Operators
include('operators'),
# Float Literal
# -- Hex Float
(r'0[xX]([0-9a-fA-F_]*\.[0-9a-fA-F_]+|[0-9a-fA-F_]+)'
r'[pP][+\-]?[0-9_]+[lLfFdD]?', Number.Float),
# -- DecimalFloat
(r'[0-9_]+(\.[0-9_]+[eE][+\-]?[0-9_]+|'
r'\.[0-9_]*|[eE][+\-]?[0-9_]+)[lLfFdD]?', Number.Float),
(r'\.(0|[1-9][0-9_]*)([eE][+\-]?[0-9_]+)?[lLfFdD]?',
Number.Float),
# IntegerLiteral
# -- Binary
(r'0[Bb][01_]+%s' % decimal_suffixes, Number.Bin),
# -- Octal
(r'0[0-7_]+%s' % decimal_suffixes, Number.Oct),
# -- Hexadecimal
(r'0[xX][0-9a-fA-F_]+%s' % decimal_suffixes, Number.Hex),
# -- Decimal
(r'(0|[1-9][0-9_]*)%s' % decimal_suffixes, Number.Integer),
# Strings
('([rR][cC]?|[cC][rR])"""', String, 'tdqs'),
("([rR][cC]?|[cC][rR])'''", String, 'tsqs'),
('([rR][cC]?|[cC][rR])"', String, 'dqs'),
("([rR][cC]?|[cC][rR])'", String, 'sqs'),
('[cCfFqQwWuU]?"""', String, combined('stringescape', 'tdqs')),
("[cCfFqQwWuU]?'''", String, combined('stringescape', 'tsqs')),
('[cCfFqQwWuU]?"', String, combined('stringescape', 'dqs')),
("[cCfFqQwWuU]?'", String, combined('stringescape', 'sqs')),
# Punctuation
(r'[\[\]{}:(),;?]', Punctuation),
# Labels
(r'[a-zA-Z_]\w*:>', Name.Label),
# Identifiers
(r'(%s)\b' % '|'.join(name_builtins), Name.Builtin),
(r'(%s)\b' % '|'.join(name_pseudo), Name.Builtin.Pseudo),
(r'[a-zA-Z_]\w*', Name),
],
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
include('comment'),
# Preprocessor
(r'#\s*if\s+0', Comment.Preproc, 'if0'),
(r'#', Comment.Preproc, 'macro'),
],
'operators': [
(r'(%s)\b' % '|'.join(operator_words), Operator.Word),
(r'!=|==|<<|>>|\|\||&&|[-~+/*%=<>&^|.$]', Operator),
],
'comment': [
(r'//(.*?)\n', Comment.Single),
(r'/[*]', Comment.Multiline, 'comment2'),
],
'comment2': [
(r'[^\/*]', Comment.Multiline),
(r'/[*]', Comment.Multiline, '#push'),
(r'[*]/', Comment.Multiline, '#pop'),
(r'[\/*]', Comment.Multiline),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment, '#push'),
(r'^\s*#endif.*?(?<!\\)\n', Comment, '#pop'),
(r'.*?\n', Comment),
],
'macro': [
include('comment'),
(r'(import|include)(\s+)(<[^>]*?>)',
bygroups(Comment.Preproc, Text, String), '#pop'),
(r'(import|include)(\s+)("[^"]*?")',
bygroups(Comment.Preproc, Text, String), '#pop'),
(r"(import|include)(\s+)('[^']*?')",
bygroups(Comment.Preproc, Text, String), '#pop'),
(r'[^/\n]+', Comment.Preproc),
##(r'/[*](.|\n)*?[*]/', Comment),
##(r'//.*?\n', Comment, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'funcname': [
include('whitespace'),
(r'[a-zA-Z_]\w*', Name.Function, '#pop'),
# anonymous functions
(r'(?=\()', Text, '#pop'),
],
'classname': [
include('whitespace'),
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
# anonymous classes
(r'(?=\{)', Text, '#pop'),
],
'modulename': [
include('whitespace'),
(r'\[', Punctuation, ('modulename2', 'tvarlist')),
default('modulename2'),
],
'modulename2': [
include('whitespace'),
(r'([a-zA-Z_]\w*)', Name.Namespace, '#pop:2'),
],
'tvarlist': [
include('whitespace'),
include('operators'),
(r'\[', Punctuation, '#push'),
(r'\]', Punctuation, '#pop'),
(r',', Punctuation),
(r'(with|where)\b', Keyword),
(r'[a-zA-Z_]\w*', Name),
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N{.*?}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'strings': [
(r'%(\([a-zA-Z0-9]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[diouxXeEfFgGcrs%]', String.Interpol),
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'%', String)
# newlines are an error (use "nl" state)
],
'nl': [
(r'\n', String)
],
'dqs': [
(r'"', String, '#pop'),
# included here again for raw strings
(r'\\\\|\\"|\\\n', String.Escape),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
# included here again for raw strings
(r"\\\\|\\'|\\\n", String.Escape),
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
include('strings'),
include('nl')
],
'tsqs': [
(r"'''", String, '#pop'),
include('strings'),
include('nl')
],
}
class AdaLexer(RegexLexer):
"""
For Ada source code.
.. versionadded:: 1.3
"""
name = 'Ada'
aliases = ['ada', 'ada95' 'ada2005']
filenames = ['*.adb', '*.ads', '*.ada']
mimetypes = ['text/x-ada']
flags = re.MULTILINE | re.I # Ignore case
tokens = {
'root': [
(r'[^\S\n]+', Text),
(r'--.*?\n', Comment.Single),
(r'[^\S\n]+', Text),
(r'function|procedure|entry', Keyword.Declaration, 'subprogram'),
(r'(subtype|type)(\s+)([a-z0-9_]+)',
bygroups(Keyword.Declaration, Text, Keyword.Type), 'type_def'),
(r'task|protected', Keyword.Declaration),
(r'(subtype)(\s+)', bygroups(Keyword.Declaration, Text)),
(r'(end)(\s+)', bygroups(Keyword.Reserved, Text), 'end'),
(r'(pragma)(\s+)(\w+)', bygroups(Keyword.Reserved, Text,
Comment.Preproc)),
(r'(true|false|null)\b', Keyword.Constant),
(r'(Address|Byte|Boolean|Character|Controlled|Count|Cursor|'
r'Duration|File_Mode|File_Type|Float|Generator|Integer|Long_Float|'
r'Long_Integer|Long_Long_Float|Long_Long_Integer|Natural|Positive|'
r'Reference_Type|Short_Float|Short_Integer|Short_Short_Float|'
r'Short_Short_Integer|String|Wide_Character|Wide_String)\b',
Keyword.Type),
(r'(and(\s+then)?|in|mod|not|or(\s+else)|rem)\b', Operator.Word),
(r'generic|private', Keyword.Declaration),
(r'package', Keyword.Declaration, 'package'),
(r'array\b', Keyword.Reserved, 'array_def'),
(r'(with|use)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r'([a-z0-9_]+)(\s*)(:)(\s*)(constant)',
bygroups(Name.Constant, Text, Punctuation, Text,
Keyword.Reserved)),
(r'<<[a-z0-9_]+>>', Name.Label),
(r'([a-z0-9_]+)(\s*)(:)(\s*)(declare|begin|loop|for|while)',
bygroups(Name.Label, Text, Punctuation, Text, Keyword.Reserved)),
(r'\b(abort|abs|abstract|accept|access|aliased|all|array|at|begin|'
r'body|case|constant|declare|delay|delta|digits|do|else|elsif|end|'
r'entry|exception|exit|interface|for|goto|if|is|limited|loop|new|'
r'null|of|or|others|out|overriding|pragma|protected|raise|range|'
r'record|renames|requeue|return|reverse|select|separate|subtype|'
r'synchronized|task|tagged|terminate|then|type|until|when|while|'
r'xor)\b',
Keyword.Reserved),
(r'"[^"]*"', String),
include('attribute'),
include('numbers'),
(r"'[^']'", String.Character),
(r'([a-z0-9_]+)(\s*|[(,])', bygroups(Name, using(this))),
(r"(<>|=>|:=|[()|:;,.'])", Punctuation),
(r'[*<>+=/&-]', Operator),
(r'\n+', Text),
],
'numbers' : [
(r'[0-9_]+#[0-9a-f]+#', Number.Hex),
(r'[0-9_]+\.[0-9_]*', Number.Float),
(r'[0-9_]+', Number.Integer),
],
'attribute' : [
(r"(')(\w+)", bygroups(Punctuation, Name.Attribute)),
],
'subprogram' : [
(r'\(', Punctuation, ('#pop', 'formal_part')),
(r';', Punctuation, '#pop'),
(r'is\b', Keyword.Reserved, '#pop'),
(r'"[^"]+"|[a-z0-9_]+', Name.Function),
include('root'),
],
'end' : [
('(if|case|record|loop|select)', Keyword.Reserved),
('"[^"]+"|[\w.]+', Name.Function),
('\s+', Text),
(';', Punctuation, '#pop'),
],
'type_def': [
(r';', Punctuation, '#pop'),
(r'\(', Punctuation, 'formal_part'),
(r'with|and|use', Keyword.Reserved),
(r'array\b', Keyword.Reserved, ('#pop', 'array_def')),
(r'record\b', Keyword.Reserved, ('record_def')),
(r'(null record)(;)', bygroups(Keyword.Reserved, Punctuation), '#pop'),
include('root'),
],
'array_def' : [
(r';', Punctuation, '#pop'),
(r'([a-z0-9_]+)(\s+)(range)', bygroups(Keyword.Type, Text,
Keyword.Reserved)),
include('root'),
],
'record_def' : [
(r'end record', Keyword.Reserved, '#pop'),
include('root'),
],
'import': [
(r'[a-z0-9_.]+', Name.Namespace, '#pop'),
default('#pop'),
],
'formal_part' : [
(r'\)', Punctuation, '#pop'),
(r'[a-z0-9_]+', Name.Variable),
(r',|:[^=]', Punctuation),
(r'(in|not|null|out|access)\b', Keyword.Reserved),
include('root'),
],
'package': [
('body', Keyword.Declaration),
('is\s+new|renames', Keyword.Reserved),
('is', Keyword.Reserved, '#pop'),
(';', Punctuation, '#pop'),
('\(', Punctuation, 'package_instantiation'),
('([\w.]+)', Name.Class),
include('root'),
],
'package_instantiation': [
(r'("[^"]+"|[a-z0-9_]+)(\s+)(=>)', bygroups(Name.Variable,
Text, Punctuation)),
(r'[a-z0-9._\'"]', Text),
(r'\)', Punctuation, '#pop'),
include('root'),
],
}
class Modula2Lexer(RegexLexer):
"""
For `Modula-2 <http://www.modula2.org/>`_ source code.
Additional options that determine which keywords are highlighted:
`pim`
Select PIM Modula-2 dialect (default: True).
`iso`
Select ISO Modula-2 dialect (default: False).
`objm2`
Select Objective Modula-2 dialect (default: False).
`gm2ext`
Also highlight GNU extensions (default: False).
.. versionadded:: 1.3
"""
name = 'Modula-2'
aliases = ['modula2', 'm2']
filenames = ['*.def', '*.mod']
mimetypes = ['text/x-modula2']
flags = re.MULTILINE | re.DOTALL
tokens = {
'whitespace': [
(r'\n+', Text), # blank lines
(r'\s+', Text), # whitespace
],
'identifiers': [
(r'([a-zA-Z_\$][\w\$]*)', Name),
],
'numliterals': [
(r'[01]+B', Number.Bin), # binary number (ObjM2)
(r'[0-7]+B', Number.Oct), # octal number (PIM + ISO)
(r'[0-7]+C', Number.Oct), # char code (PIM + ISO)
(r'[0-9A-F]+C', Number.Hex), # char code (ObjM2)
(r'[0-9A-F]+H', Number.Hex), # hexadecimal number
(r'[0-9]+\.[0-9]+E[+-][0-9]+', Number.Float), # real number
(r'[0-9]+\.[0-9]+', Number.Float), # real number
(r'[0-9]+', Number.Integer), # decimal whole number
],
'strings': [
(r"'(\\\\|\\'|[^'])*'", String), # single quoted string
(r'"(\\\\|\\"|[^"])*"', String), # double quoted string
],
'operators': [
(r'[*/+=#~&<>\^-]', Operator),
(r':=', Operator), # assignment
(r'@', Operator), # pointer deref (ISO)
(r'\.\.', Operator), # ellipsis or range
(r'`', Operator), # Smalltalk message (ObjM2)
(r'::', Operator), # type conversion (ObjM2)
],
'punctuation': [
(r'[\(\)\[\]{},.:;|]', Punctuation),
],
'comments': [
(r'//.*?\n', Comment.Single), # ObjM2
(r'/\*(.*?)\*/', Comment.Multiline), # ObjM2
(r'\(\*([^\$].*?)\*\)', Comment.Multiline),
# TO DO: nesting of (* ... *) comments
],
'pragmas': [
(r'\(\*\$(.*?)\*\)', Comment.Preproc), # PIM
(r'<\*(.*?)\*>', Comment.Preproc), # ISO + ObjM2
],
'root': [
include('whitespace'),
include('comments'),
include('pragmas'),
include('identifiers'),
include('numliterals'),
include('strings'),
include('operators'),
include('punctuation'),
]
}
pim_reserved_words = [
# 40 reserved words
'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION',
'DIV', 'DO', 'ELSE', 'ELSIF', 'END', 'EXIT', 'EXPORT', 'FOR',
'FROM', 'IF', 'IMPLEMENTATION', 'IMPORT', 'IN', 'LOOP', 'MOD',
'MODULE', 'NOT', 'OF', 'OR', 'POINTER', 'PROCEDURE', 'QUALIFIED',
'RECORD', 'REPEAT', 'RETURN', 'SET', 'THEN', 'TO', 'TYPE',
'UNTIL', 'VAR', 'WHILE', 'WITH',
]
pim_pervasives = [
# 31 pervasives
'ABS', 'BITSET', 'BOOLEAN', 'CAP', 'CARDINAL', 'CHAR', 'CHR', 'DEC',
'DISPOSE', 'EXCL', 'FALSE', 'FLOAT', 'HALT', 'HIGH', 'INC', 'INCL',
'INTEGER', 'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NEW', 'NIL', 'ODD',
'ORD', 'PROC', 'REAL', 'SIZE', 'TRUE', 'TRUNC', 'VAL',
]
iso_reserved_words = [
# 46 reserved words
'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', 'DIV',
'DO', 'ELSE', 'ELSIF', 'END', 'EXCEPT', 'EXIT', 'EXPORT', 'FINALLY',
'FOR', 'FORWARD', 'FROM', 'IF', 'IMPLEMENTATION', 'IMPORT', 'IN',
'LOOP', 'MOD', 'MODULE', 'NOT', 'OF', 'OR', 'PACKEDSET', 'POINTER',
'PROCEDURE', 'QUALIFIED', 'RECORD', 'REPEAT', 'REM', 'RETRY',
'RETURN', 'SET', 'THEN', 'TO', 'TYPE', 'UNTIL', 'VAR', 'WHILE',
'WITH',
]
iso_pervasives = [
# 42 pervasives
'ABS', 'BITSET', 'BOOLEAN', 'CAP', 'CARDINAL', 'CHAR', 'CHR', 'CMPLX',
'COMPLEX', 'DEC', 'DISPOSE', 'EXCL', 'FALSE', 'FLOAT', 'HALT', 'HIGH',
'IM', 'INC', 'INCL', 'INT', 'INTEGER', 'INTERRUPTIBLE', 'LENGTH',
'LFLOAT', 'LONGCOMPLEX', 'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NEW',
'NIL', 'ODD', 'ORD', 'PROC', 'PROTECTION', 'RE', 'REAL', 'SIZE',
'TRUE', 'TRUNC', 'UNINTERRUBTIBLE', 'VAL',
]
objm2_reserved_words = [
# base language, 42 reserved words
'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', 'DIV',
'DO', 'ELSE', 'ELSIF', 'END', 'ENUM', 'EXIT', 'FOR', 'FROM', 'IF',
'IMMUTABLE', 'IMPLEMENTATION', 'IMPORT', 'IN', 'IS', 'LOOP', 'MOD',
'MODULE', 'NOT', 'OF', 'OPAQUE', 'OR', 'POINTER', 'PROCEDURE',
'RECORD', 'REPEAT', 'RETURN', 'SET', 'THEN', 'TO', 'TYPE',
'UNTIL', 'VAR', 'VARIADIC', 'WHILE',
# OO extensions, 16 reserved words
'BYCOPY', 'BYREF', 'CLASS', 'CONTINUE', 'CRITICAL', 'INOUT', 'METHOD',
'ON', 'OPTIONAL', 'OUT', 'PRIVATE', 'PROTECTED', 'PROTOCOL', 'PUBLIC',
'SUPER', 'TRY',
]
objm2_pervasives = [
# base language, 38 pervasives
'ABS', 'BITSET', 'BOOLEAN', 'CARDINAL', 'CHAR', 'CHR', 'DISPOSE',
'FALSE', 'HALT', 'HIGH', 'INTEGER', 'INRANGE', 'LENGTH', 'LONGCARD',
'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NEG', 'NEW', 'NEXTV', 'NIL',
'OCTET', 'ODD', 'ORD', 'PRED', 'PROC', 'READ', 'REAL', 'SUCC', 'TMAX',
'TMIN', 'TRUE', 'TSIZE', 'UNICHAR', 'VAL', 'WRITE', 'WRITEF',
# OO extensions, 3 pervasives
'OBJECT', 'NO', 'YES',
]
gnu_reserved_words = [
# 10 additional reserved words
'ASM', '__ATTRIBUTE__', '__BUILTIN__', '__COLUMN__', '__DATE__',
'__FILE__', '__FUNCTION__', '__LINE__', '__MODULE__', 'VOLATILE',
]
gnu_pervasives = [
# 21 identifiers, actually from pseudo-module SYSTEM
# but we will highlight them as if they were pervasives
'BITSET8', 'BITSET16', 'BITSET32', 'CARDINAL8', 'CARDINAL16',
'CARDINAL32', 'CARDINAL64', 'COMPLEX32', 'COMPLEX64', 'COMPLEX96',
'COMPLEX128', 'INTEGER8', 'INTEGER16', 'INTEGER32', 'INTEGER64',
'REAL8', 'REAL16', 'REAL32', 'REAL96', 'REAL128', 'THROW',
]
def __init__(self, **options):
self.reserved_words = set()
self.pervasives = set()
# ISO Modula-2
if get_bool_opt(options, 'iso', False):
self.reserved_words.update(self.iso_reserved_words)
self.pervasives.update(self.iso_pervasives)
# Objective Modula-2
elif get_bool_opt(options, 'objm2', False):
self.reserved_words.update(self.objm2_reserved_words)
self.pervasives.update(self.objm2_pervasives)
# PIM Modula-2 (DEFAULT)
else:
self.reserved_words.update(self.pim_reserved_words)
self.pervasives.update(self.pim_pervasives)
# GNU extensions
if get_bool_opt(options, 'gm2ext', False):
self.reserved_words.update(self.gnu_reserved_words)
self.pervasives.update(self.gnu_pervasives)
# initialise
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
# check for reserved words and pervasives
if token is Name:
if value in self.reserved_words:
token = Keyword.Reserved
elif value in self.pervasives:
token = Keyword.Pervasive
# return result
yield index, token, value
class BlitzMaxLexer(RegexLexer):
"""
For `BlitzMax <http://blitzbasic.com>`_ source code.
.. versionadded:: 1.4
"""
name = 'BlitzMax'
aliases = ['blitzmax', 'bmax']
filenames = ['*.bmx']
mimetypes = ['text/x-bmx']
bmax_vopwords = r'\b(Shl|Shr|Sar|Mod)\b'
bmax_sktypes = r'@{1,2}|[!#$%]'
bmax_lktypes = r'\b(Int|Byte|Short|Float|Double|Long)\b'
bmax_name = r'[a-z_]\w*'
bmax_var = (r'(%s)(?:(?:([ \t]*)(%s)|([ \t]*:[ \t]*\b(?:Shl|Shr|Sar|Mod)\b)'
r'|([ \t]*)(:)([ \t]*)(?:%s|(%s)))(?:([ \t]*)(Ptr))?)') % \
(bmax_name, bmax_sktypes, bmax_lktypes, bmax_name)
bmax_func = bmax_var + r'?((?:[ \t]|\.\.\n)*)([(])'
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
# Text
(r'[ \t]+', Text),
(r'\.\.\n', Text), # Line continuation
# Comments
(r"'.*?\n", Comment.Single),
(r'([ \t]*)\bRem\n(\n|.)*?\s*\bEnd([ \t]*)Rem', Comment.Multiline),
# Data types
('"', String.Double, 'string'),
# Numbers
(r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
(r'\.[0-9]*(?!\.)', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\$[0-9a-f]+', Number.Hex),
(r'\%[10]+', Number.Bin),
# Other
(r'(?:(?:(:)?([ \t]*)(:?%s|([+\-*/&|~]))|Or|And|Not|[=<>^]))' %
(bmax_vopwords), Operator),
(r'[(),.:\[\]]', Punctuation),
(r'(?:#[\w \t]*)', Name.Label),
(r'(?:\?[\w \t]*)', Comment.Preproc),
# Identifiers
(r'\b(New)\b([ \t]?)([(]?)(%s)' % (bmax_name),
bygroups(Keyword.Reserved, Text, Punctuation, Name.Class)),
(r'\b(Import|Framework|Module)([ \t]+)(%s\.%s)' %
(bmax_name, bmax_name),
bygroups(Keyword.Reserved, Text, Keyword.Namespace)),
(bmax_func, bygroups(Name.Function, Text, Keyword.Type,
Operator, Text, Punctuation, Text,
Keyword.Type, Name.Class, Text,
Keyword.Type, Text, Punctuation)),
(bmax_var, bygroups(Name.Variable, Text, Keyword.Type, Operator,
Text, Punctuation, Text, Keyword.Type,
Name.Class, Text, Keyword.Type)),
(r'\b(Type|Extends)([ \t]+)(%s)' % (bmax_name),
bygroups(Keyword.Reserved, Text, Name.Class)),
# Keywords
(r'\b(Ptr)\b', Keyword.Type),
(r'\b(Pi|True|False|Null|Self|Super)\b', Keyword.Constant),
(r'\b(Local|Global|Const|Field)\b', Keyword.Declaration),
(r'\b(TNullMethodException|TNullFunctionException|'
r'TNullObjectException|TArrayBoundsException|'
r'TRuntimeException)\b', Name.Exception),
(r'\b(Strict|SuperStrict|Module|ModuleInfo|'
r'End|Return|Continue|Exit|Public|Private|'
r'Var|VarPtr|Chr|Len|Asc|SizeOf|Sgn|Abs|Min|Max|'
r'New|Release|Delete|'
r'Incbin|IncbinPtr|IncbinLen|'
r'Framework|Include|Import|Extern|EndExtern|'
r'Function|EndFunction|'
r'Type|EndType|Extends|'
r'Method|EndMethod|'
r'Abstract|Final|'
r'If|Then|Else|ElseIf|EndIf|'
r'For|To|Next|Step|EachIn|'
r'While|Wend|EndWhile|'
r'Repeat|Until|Forever|'
r'Select|Case|Default|EndSelect|'
r'Try|Catch|EndTry|Throw|Assert|'
r'Goto|DefData|ReadData|RestoreData)\b', Keyword.Reserved),
# Final resolve (for variable names and such)
(r'(%s)' % (bmax_name), Name.Variable),
],
'string': [
(r'""', String.Double),
(r'"C?', String.Double, '#pop'),
(r'[^"]+', String.Double),
],
}
class BlitzBasicLexer(RegexLexer):
"""
For `BlitzBasic <http://blitzbasic.com>`_ source code.
.. versionadded:: 2.0
"""
name = 'BlitzBasic'
aliases = ['blitzbasic', 'b3d', 'bplus']
filenames = ['*.bb', '*.decls']
mimetypes = ['text/x-bb']
bb_vopwords = (r'\b(Shl|Shr|Sar|Mod|Or|And|Not|'
r'Abs|Sgn|Handle|Int|Float|Str|'
r'First|Last|Before|After)\b')
bb_sktypes = r'@{1,2}|[#$%]'
bb_name = r'[a-z]\w*'
bb_var = (r'(%s)(?:([ \t]*)(%s)|([ \t]*)([.])([ \t]*)(?:(%s)))?') % \
(bb_name, bb_sktypes, bb_name)
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
# Text
(r'[ \t]+', Text),
# Comments
(r";.*?\n", Comment.Single),
# Data types
('"', String.Double, 'string'),
# Numbers
(r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
(r'\.[0-9]+(?!\.)', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\$[0-9a-f]+', Number.Hex),
(r'\%[10]+', Number.Bin),
# Other
(r'(?:%s|([+\-*/~=<>^]))' % (bb_vopwords), Operator),
(r'[(),:\[\]\\]', Punctuation),
(r'\.([ \t]*)(%s)' % bb_name, Name.Label),
# Identifiers
(r'\b(New)\b([ \t]+)(%s)' % (bb_name),
bygroups(Keyword.Reserved, Text, Name.Class)),
(r'\b(Gosub|Goto)\b([ \t]+)(%s)' % (bb_name),
bygroups(Keyword.Reserved, Text, Name.Label)),
(r'\b(Object)\b([ \t]*)([.])([ \t]*)(%s)\b' % (bb_name),
bygroups(Operator, Text, Punctuation, Text, Name.Class)),
(r'\b%s\b([ \t]*)(\()' % bb_var,
bygroups(Name.Function, Text, Keyword.Type,Text, Punctuation,
Text, Name.Class, Text, Punctuation)),
(r'\b(Function)\b([ \t]+)%s' % bb_var,
bygroups(Keyword.Reserved, Text, Name.Function, Text, Keyword.Type,
Text, Punctuation, Text, Name.Class)),
(r'\b(Type)([ \t]+)(%s)' % (bb_name),
bygroups(Keyword.Reserved, Text, Name.Class)),
# Keywords
(r'\b(Pi|True|False|Null)\b', Keyword.Constant),
(r'\b(Local|Global|Const|Field|Dim)\b', Keyword.Declaration),
(r'\b(End|Return|Exit|'
r'Chr|Len|Asc|'
r'New|Delete|Insert|'
r'Include|'
r'Function|'
r'Type|'
r'If|Then|Else|ElseIf|EndIf|'
r'For|To|Next|Step|Each|'
r'While|Wend|'
r'Repeat|Until|Forever|'
r'Select|Case|Default|'
r'Goto|Gosub|Data|Read|Restore)\b', Keyword.Reserved),
# Final resolve (for variable names and such)
# (r'(%s)' % (bb_name), Name.Variable),
(bb_var, bygroups(Name.Variable, Text, Keyword.Type,
Text, Punctuation, Text, Name.Class)),
],
'string': [
(r'""', String.Double),
(r'"C?', String.Double, '#pop'),
(r'[^"]+', String.Double),
],
}
class NimrodLexer(RegexLexer):
"""
For `Nimrod <http://nimrod-code.org/>`_ source code.
.. versionadded:: 1.5
"""
name = 'Nimrod'
aliases = ['nimrod', 'nim']
filenames = ['*.nim', '*.nimrod']
mimetypes = ['text/x-nimrod']
flags = re.MULTILINE | re.IGNORECASE | re.UNICODE
def underscorize(words):
newWords = []
new = ""
for word in words:
for ch in word:
new += (ch + "_?")
newWords.append(new)
new = ""
return "|".join(newWords)
keywords = [
'addr', 'and', 'as', 'asm', 'atomic', 'bind', 'block', 'break',
'case', 'cast', 'const', 'continue', 'converter', 'discard',
'distinct', 'div', 'elif', 'else', 'end', 'enum', 'except', 'finally',
'for', 'generic', 'if', 'implies', 'in', 'yield',
'is', 'isnot', 'iterator', 'lambda', 'let', 'macro', 'method',
'mod', 'not', 'notin', 'object', 'of', 'or', 'out', 'proc',
'ptr', 'raise', 'ref', 'return', 'shl', 'shr', 'template', 'try',
'tuple', 'type' , 'when', 'while', 'with', 'without', 'xor'
]
keywordsPseudo = [
'nil', 'true', 'false'
]
opWords = [
'and', 'or', 'not', 'xor', 'shl', 'shr', 'div', 'mod', 'in',
'notin', 'is', 'isnot'
]
types = [
'int', 'int8', 'int16', 'int32', 'int64', 'float', 'float32', 'float64',
'bool', 'char', 'range', 'array', 'seq', 'set', 'string'
]
tokens = {
'root': [
(r'##.*$', String.Doc),
(r'#.*$', Comment),
(r'\*|=|>|<|\+|-|/|@|\$|~|&|%|\!|\?|\||\\|\[|\]', Operator),
(r'\.\.|\.|,|\[\.|\.\]|{\.|\.}|\(\.|\.\)|{|}|\(|\)|:|\^|`|;',
Punctuation),
# Strings
(r'(?:[\w]+)"', String, 'rdqs'),
(r'"""', String, 'tdqs'),
('"', String, 'dqs'),
# Char
("'", String.Char, 'chars'),
# Keywords
(r'(%s)\b' % underscorize(opWords), Operator.Word),
(r'(p_?r_?o_?c_?\s)(?![\(\[\]])', Keyword, 'funcname'),
(r'(%s)\b' % underscorize(keywords), Keyword),
(r'(%s)\b' % underscorize(['from', 'import', 'include']),
Keyword.Namespace),
(r'(v_?a_?r)\b', Keyword.Declaration),
(r'(%s)\b' % underscorize(types), Keyword.Type),
(r'(%s)\b' % underscorize(keywordsPseudo), Keyword.Pseudo),
# Identifiers
(r'\b((?![_\d])\w)(((?!_)\w)|(_(?!_)\w))*', Name),
# Numbers
(r'[0-9][0-9_]*(?=([eE.]|\'[fF](32|64)))',
Number.Float, ('float-suffix', 'float-number')),
(r'0[xX][a-f0-9][a-f0-9_]*', Number.Hex, 'int-suffix'),
(r'0[bB][01][01_]*', Number.Bin, 'int-suffix'),
(r'0o[0-7][0-7_]*', Number.Oct, 'int-suffix'),
(r'[0-9][0-9_]*', Number.Integer, 'int-suffix'),
# Whitespace
(r'\s+', Text),
(r'.+$', Error),
],
'chars': [
(r'\\([\\abcefnrtvl"\']|x[a-f0-9]{2}|[0-9]{1,3})', String.Escape),
(r"'", String.Char, '#pop'),
(r".", String.Char)
],
'strings': [
(r'(?<!\$)\$(\d+|#|\w+)+', String.Interpol),
(r'[^\\\'"\$\n]+', String),
# quotes, dollars and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'\$', String)
# newlines are an error (use "nl" state)
],
'dqs': [
(r'\\([\\abcefnrtvl"\']|\n|x[a-f0-9]{2}|[0-9]{1,3})',
String.Escape),
(r'"', String, '#pop'),
include('strings')
],
'rdqs': [
(r'"(?!")', String, '#pop'),
(r'""', String.Escape),
include('strings')
],
'tdqs': [
(r'"""(?!")', String, '#pop'),
include('strings'),
include('nl')
],
'funcname': [
(r'((?![\d_])\w)(((?!_)\w)|(_(?!_)\w))*', Name.Function, '#pop'),
(r'`.+`', Name.Function, '#pop')
],
'nl': [
(r'\n', String)
],
'float-number': [
(r'\.(?!\.)[0-9_]*', Number.Float),
(r'[eE][+-]?[0-9][0-9_]*', Number.Float),
default('#pop')
],
'float-suffix': [
(r'\'[fF](32|64)', Number.Float),
default('#pop')
],
'int-suffix': [
(r'\'[iI](32|64)', Number.Integer.Long),
(r'\'[iI](8|16)', Number.Integer),
default('#pop')
],
}
class FantomLexer(RegexLexer):
"""
For Fantom source code.
.. versionadded:: 1.5
"""
name = 'Fantom'
aliases = ['fan']
filenames = ['*.fan']
mimetypes = ['application/x-fantom']
# often used regexes
def s(str):
return Template(str).substitute(
dict (
pod = r'[\"\w\.]+',
eos = r'\n|;',
id = r'[a-zA-Z_]\w*',
# all chars which can be part of type definition. Starts with
# either letter, or [ (maps), or | (funcs)
type = r'(?:\[|[a-zA-Z_]|\|)[:\w\[\]\|\->\?]*?',
)
)
tokens = {
'comments': [
(r'(?s)/\*.*?\*/', Comment.Multiline), #Multiline
(r'//.*?\n', Comment.Single), #Single line
#todo: highlight references in fandocs
(r'\*\*.*?\n', Comment.Special), #Fandoc
(r'#.*\n', Comment.Single) #Shell-style
],
'literals': [
(r'\b-?[\d_]+(ns|ms|sec|min|hr|day)', Number), #Duration
(r'\b-?[\d_]*\.[\d_]+(ns|ms|sec|min|hr|day)', Number),
#Duration with dot
(r'\b-?(\d+)?\.\d+(f|F|d|D)?', Number.Float), #Float/Decimal
(r'\b-?0x[0-9a-fA-F_]+', Number.Hex), #Hex
(r'\b-?[\d_]+', Number.Integer), #Int
(r"'\\.'|'[^\\]'|'\\u[0-9a-f]{4}'", String.Char), #Char
(r'"', Punctuation, 'insideStr'), #Opening quote
(r'`', Punctuation, 'insideUri'), #Opening accent
(r'\b(true|false|null)\b', Keyword.Constant), #Bool & null
(r'(?:(\w+)(::))?(\w+)(<\|)(.*?)(\|>)', #DSL
bygroups(Name.Namespace, Punctuation, Name.Class,
Punctuation, String, Punctuation)),
(r'(?:(\w+)(::))?(\w+)?(#)(\w+)?', #Type/slot literal
bygroups(Name.Namespace, Punctuation, Name.Class,
Punctuation, Name.Function)),
(r'\[,\]', Literal), # Empty list
(s(r'($type)(\[,\])'), # Typed empty list
bygroups(using(this, state = 'inType'), Literal)),
(r'\[:\]', Literal), # Empty Map
(s(r'($type)(\[:\])'),
bygroups(using(this, state = 'inType'), Literal)),
],
'insideStr': [
(r'\\\\', String.Escape), #Escaped backslash
(r'\\"', String.Escape), #Escaped "
(r'\\`', String.Escape), #Escaped `
(r'\$\w+', String.Interpol), #Subst var
(r'\${.*?}', String.Interpol), #Subst expr
(r'"', Punctuation, '#pop'), #Closing quot
(r'.', String) #String content
],
'insideUri': [ #TODO: remove copy/paste str/uri
(r'\\\\', String.Escape), #Escaped backslash
(r'\\"', String.Escape), #Escaped "
(r'\\`', String.Escape), #Escaped `
(r'\$\w+', String.Interpol), #Subst var
(r'\${.*?}', String.Interpol), #Subst expr
(r'`', Punctuation, '#pop'), #Closing tick
(r'.', String.Backtick) #URI content
],
'protectionKeywords': [
(r'\b(public|protected|private|internal)\b', Keyword),
],
'typeKeywords': [
(r'\b(abstract|final|const|native|facet|enum)\b', Keyword),
],
'methodKeywords': [
(r'\b(abstract|native|once|override|static|virtual|final)\b',
Keyword),
],
'fieldKeywords': [
(r'\b(abstract|const|final|native|override|static|virtual|'
r'readonly)\b', Keyword)
],
'otherKeywords': [
(r'\b(try|catch|throw|finally|for|if|else|while|as|is|isnot|'
r'switch|case|default|continue|break|do|return|get|set)\b',
Keyword),
(r'\b(it|this|super)\b', Name.Builtin.Pseudo),
],
'operators': [
(r'\+\+|\-\-|\+|\-|\*|/|\|\||&&|<=>|<=|<|>=|>|=|!|\[|\]', Operator)
],
'inType': [
(r'[\[\]\|\->:\?]', Punctuation),
(s(r'$id'), Name.Class),
default('#pop'),
],
'root': [
include('comments'),
include('protectionKeywords'),
include('typeKeywords'),
include('methodKeywords'),
include('fieldKeywords'),
include('literals'),
include('otherKeywords'),
include('operators'),
(r'using\b', Keyword.Namespace, 'using'), # Using stmt
(r'@\w+', Name.Decorator, 'facet'), # Symbol
(r'(class|mixin)(\s+)(\w+)', bygroups(Keyword, Text, Name.Class),
'inheritance'), # Inheritance list
### Type var := val
(s(r'($type)([ \t]+)($id)(\s*)(:=)'),
bygroups(using(this, state = 'inType'), Text,
Name.Variable, Text, Operator)),
### var := val
(s(r'($id)(\s*)(:=)'),
bygroups(Name.Variable, Text, Operator)),
### .someId( or ->someId( ###
(s(r'(\.|(?:\->))($id)(\s*)(\()'),
bygroups(Operator, Name.Function, Text, Punctuation),
'insideParen'),
### .someId or ->someId
(s(r'(\.|(?:\->))($id)'),
bygroups(Operator, Name.Function)),
### new makeXXX ( ####
(r'(new)(\s+)(make\w*)(\s*)(\()',
bygroups(Keyword, Text, Name.Function, Text, Punctuation),
'insideMethodDeclArgs'),
### Type name ( ####
(s(r'($type)([ \t]+)' #Return type and whitespace
r'($id)(\s*)(\()'), #method name + open brace
bygroups(using(this, state = 'inType'), Text,
Name.Function, Text, Punctuation),
'insideMethodDeclArgs'),
### ArgType argName, #####
(s(r'($type)(\s+)($id)(\s*)(,)'),
bygroups(using(this, state='inType'), Text, Name.Variable,
Text, Punctuation)),
#### ArgType argName) ####
## Covered in 'insideParen' state
### ArgType argName -> ArgType| ###
(s(r'($type)(\s+)($id)(\s*)(\->)(\s*)($type)(\|)'),
bygroups(using(this, state='inType'), Text, Name.Variable,
Text, Punctuation, Text, using(this, state = 'inType'),
Punctuation)),
### ArgType argName| ###
(s(r'($type)(\s+)($id)(\s*)(\|)'),
bygroups(using(this, state='inType'), Text, Name.Variable,
Text, Punctuation)),
### Type var
(s(r'($type)([ \t]+)($id)'),
bygroups(using(this, state='inType'), Text,
Name.Variable)),
(r'\(', Punctuation, 'insideParen'),
(r'\{', Punctuation, 'insideBrace'),
(r'.', Text)
],
'insideParen': [
(r'\)', Punctuation, '#pop'),
include('root'),
],
'insideMethodDeclArgs': [
(r'\)', Punctuation, '#pop'),
(s(r'($type)(\s+)($id)(\s*)(\))'),
bygroups(using(this, state='inType'), Text, Name.Variable,
Text, Punctuation), '#pop'),
include('root'),
],
'insideBrace': [
(r'\}', Punctuation, '#pop'),
include('root'),
],
'inheritance': [
(r'\s+', Text), #Whitespace
(r':|,', Punctuation),
(r'(?:(\w+)(::))?(\w+)',
bygroups(Name.Namespace, Punctuation, Name.Class)),
(r'{', Punctuation, '#pop')
],
'using': [
(r'[ \t]+', Text), # consume whitespaces
(r'(\[)(\w+)(\])',
bygroups(Punctuation, Comment.Special, Punctuation)), #ffi
(r'(\")?([\w\.]+)(\")?',
bygroups(Punctuation, Name.Namespace, Punctuation)), #podname
(r'::', Punctuation, 'usingClass'),
default('#pop')
],
'usingClass': [
(r'[ \t]+', Text), # consume whitespaces
(r'(as)(\s+)(\w+)',
bygroups(Keyword.Declaration, Text, Name.Class), '#pop:2'),
(r'[\w\$]+', Name.Class),
default('#pop:2') # jump out to root state
],
'facet': [
(r'\s+', Text),
(r'{', Punctuation, 'facetFields'),
default('#pop')
],
'facetFields': [
include('comments'),
include('literals'),
include('operators'),
(r'\s+', Text),
(r'(\s*)(\w+)(\s*)(=)', bygroups(Text, Name, Text, Operator)),
(r'}', Punctuation, '#pop'),
(r'.', Text)
],
}
class RustLexer(RegexLexer):
"""
Lexer for the Rust programming language (version 0.9).
.. versionadded:: 1.6
"""
name = 'Rust'
filenames = ['*.rs']
aliases = ['rust']
mimetypes = ['text/x-rustsrc']
tokens = {
'root': [
# Whitespace and Comments
(r'\n', Text),
(r'\s+', Text),
(r'//[/!](.*?)\n', Comment.Doc),
(r'//(.*?)\n', Comment.Single),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
# Keywords
(r'(as|box|break|continue'
r'|do|else|enum|extern'
r'|fn|for|if|impl|in'
r'|loop|match|mut|priv|proc|pub'
r'|ref|return|static|\'static|struct|trait|true|type'
r'|unsafe|while)\b',
Keyword),
(r'(alignof|be|const|offsetof|pure|sizeof|typeof|once|unsized'
r'|yield)\b', Keyword.Reserved),
(r'(mod|use)\b', Keyword.Namespace),
(r'(true|false)\b', Keyword.Constant),
(r'let\b', Keyword.Declaration),
(r'(u8|u16|u32|u64|i8|i16|i32|i64|uint|int|f32|f64'
r'|str|bool)\b', Keyword.Type),
(r'self\b', Name.Builtin.Pseudo),
# Prelude
(r'(Freeze|Pod|Send|Sized|Add|Sub|Mul|Div|Rem|Neg|Not|BitAnd'
r'|BitOr|BitXor|Drop|Shl|Shr|Index|Option|Some|None|Result'
r'|Ok|Err|from_str|range|print|println|Any|AnyOwnExt|AnyRefExt'
r'|AnyMutRefExt|Ascii|AsciiCast|OnwedAsciiCast|AsciiStr'
r'|IntoBytes|Bool|ToCStr|Char|Clone|DeepClone|Eq|ApproxEq'
r'|Ord|TotalEq|Ordering|Less|Equal|Greater|Equiv|Container'
r'|Mutable|Map|MutableMap|Set|MutableSet|Default|FromStr'
r'|Hash|FromIterator|Extendable|Iterator|DoubleEndedIterator'
r'|RandomAccessIterator|CloneableIterator|OrdIterator'
r'|MutableDoubleEndedIterator|ExactSize|Times|Algebraic'
r'|Trigonometric|Exponential|Hyperbolic|Bitwise|BitCount'
r'|Bounded|Integer|Fractional|Real|RealExt|Num|NumCast'
r'|CheckedAdd|CheckedSub|CheckedMul|Orderable|Signed'
r'|Unsigned|Round|Primitive|Int|Float|ToStrRadix'
r'|ToPrimitive|FromPrimitive|GenericPath|Path|PosixPath'
r'|WindowsPath|RawPtr|Buffer|Writer|Reader|Seek'
r'|SendStr|SendStrOwned|SendStrStatic|IntoSendStr|Str'
r'|StrVector|StrSlice|OwnedStr|IterBytes|ToStr|IntoStr'
r'|CopyableTuple|ImmutableTuple|ImmutableTuple\d+'
r'|Tuple\d+|ImmutableEqVector|ImmutableTotalOrdVector'
r'|ImmutableCopyableVector|OwnedVector|OwnedCopyableVector'
r'|OwnedEqVector|MutableVector|MutableTotalOrdVector'
r'|Vector|VectorVector|CopyableVector|ImmutableVector'
r'|Port|Chan|SharedChan|spawn|drop)\b', Name.Builtin),
# Borrowed pointer
(r'(&)(\'[A-Za-z_]\w*)?', bygroups(Operator, Name)),
# Labels
(r'\'[A-Za-z_]\w*:', Name.Label),
# Character Literal
(r"""'(\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}"""
r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|.)'""",
String.Char),
# Lifetime
(r"""'[a-zA-Z_]\w*""", Name.Label),
# Binary Literal
(r'0b[01_]+', Number.Bin, 'number_lit'),
# Octal Literal
(r'0o[0-7_]+', Number.Oct, 'number_lit'),
# Hexadecimal Literal
(r'0[xX][0-9a-fA-F_]+', Number.Hex, 'number_lit'),
# Decimal Literal
(r'[0-9][0-9_]*(\.[0-9_]+[eE][+\-]?[0-9_]+|'
r'\.[0-9_]*|[eE][+\-]?[0-9_]+)', Number.Float, 'number_lit'),
(r'[0-9][0-9_]*', Number.Integer, 'number_lit'),
# String Literal
(r'"', String, 'string'),
(r'r(#*)".*?"\1', String.Raw),
# Operators and Punctuation
(r'[{}()\[\],.;]', Punctuation),
(r'[+\-*/%&|<>^!~@=:?]', Operator),
# Identifier
(r'[a-zA-Z_]\w*', Name),
# Attributes
(r'#\[', Comment.Preproc, 'attribute['),
# Macros
(r'([A-Za-z_]\w*)!\s*([A-Za-z_]\w*)?\s*\{',
bygroups(Comment.Preproc, Name), 'macro{'),
(r'([A-Za-z_]\w*)!\s*([A-Za-z_]\w*)?\(',
bygroups(Comment.Preproc, Name), 'macro('),
],
'number_lit': [
(r'(([ui](8|16|32|64)?)|(f(32|64)?))?', Keyword, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r"""\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}"""
r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}""", String.Escape),
(r'[^\\"]+', String),
(r'\\', String),
],
'macro{': [
(r'\{', Operator, '#push'),
(r'\}', Operator, '#pop'),
],
'macro(': [
(r'\(', Operator, '#push'),
(r'\)', Operator, '#pop'),
],
'attribute_common': [
(r'"', String, 'string'),
(r'\[', Comment.Preproc, 'attribute['),
(r'\(', Comment.Preproc, 'attribute('),
],
'attribute[': [
include('attribute_common'),
(r'\];?', Comment.Preproc, '#pop'),
(r'[^"\]]+', Comment.Preproc),
],
'attribute(': [
include('attribute_common'),
(r'\);?', Comment.Preproc, '#pop'),
(r'[^"\)]+', Comment.Preproc),
],
}
class CudaLexer(CLexer):
"""
For NVIDIA `CUDA™ <http://developer.nvidia.com/category/zone/cuda-zone>`_
source.
.. versionadded:: 1.6
"""
name = 'CUDA'
filenames = ['*.cu', '*.cuh']
aliases = ['cuda', 'cu']
mimetypes = ['text/x-cuda']
function_qualifiers = ['__device__', '__global__', '__host__',
'__noinline__', '__forceinline__']
variable_qualifiers = ['__device__', '__constant__', '__shared__',
'__restrict__']
vector_types = ['char1', 'uchar1', 'char2', 'uchar2', 'char3', 'uchar3',
'char4', 'uchar4', 'short1', 'ushort1', 'short2', 'ushort2',
'short3', 'ushort3', 'short4', 'ushort4', 'int1', 'uint1',
'int2', 'uint2', 'int3', 'uint3', 'int4', 'uint4', 'long1',
'ulong1', 'long2', 'ulong2', 'long3', 'ulong3', 'long4',
'ulong4', 'longlong1', 'ulonglong1', 'longlong2',
'ulonglong2', 'float1', 'float2', 'float3', 'float4',
'double1', 'double2', 'dim3']
variables = ['gridDim', 'blockIdx', 'blockDim', 'threadIdx', 'warpSize']
functions = ['__threadfence_block', '__threadfence', '__threadfence_system',
'__syncthreads', '__syncthreads_count', '__syncthreads_and',
'__syncthreads_or']
execution_confs = ['<<<', '>>>']
def get_tokens_unprocessed(self, text):
for index, token, value in \
CLexer.get_tokens_unprocessed(self, text):
if token is Name:
if value in self.variable_qualifiers:
token = Keyword.Type
elif value in self.vector_types:
token = Keyword.Type
elif value in self.variables:
token = Name.Builtin
elif value in self.execution_confs:
token = Keyword.Pseudo
elif value in self.function_qualifiers:
token = Keyword.Reserved
elif value in self.functions:
token = Name.Function
yield index, token, value
class MonkeyLexer(RegexLexer):
"""
For
`Monkey <https://en.wikipedia.org/wiki/Monkey_(programming_language)>`_
source code.
.. versionadded:: 1.6
"""
name = 'Monkey'
aliases = ['monkey']
filenames = ['*.monkey']
mimetypes = ['text/x-monkey']
name_variable = r'[a-z_]\w*'
name_function = r'[A-Z]\w*'
name_constant = r'[A-Z_][A-Z0-9_]*'
name_class = r'[A-Z]\w*'
name_module = r'[a-z0-9_]*'
keyword_type = r'(?:Int|Float|String|Bool|Object|Array|Void)'
# ? == Bool // % == Int // # == Float // $ == String
keyword_type_special = r'[?%#$]'
flags = re.MULTILINE
tokens = {
'root': [
#Text
(r'\s+', Text),
# Comments
(r"'.*", Comment),
(r'(?i)^#rem\b', Comment.Multiline, 'comment'),
# preprocessor directives
(r'(?i)^(?:#If|#ElseIf|#Else|#EndIf|#End|#Print|#Error)\b', Comment.Preproc),
# preprocessor variable (any line starting with '#' that is not a directive)
(r'^#', Comment.Preproc, 'variables'),
# String
('"', String.Double, 'string'),
# Numbers
(r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
(r'\.[0-9]+(?!\.)', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\$[0-9a-fA-Z]+', Number.Hex),
(r'\%[10]+', Number.Bin),
# Native data types
(r'\b%s\b' % keyword_type, Keyword.Type),
# Exception handling
(r'(?i)\b(?:Try|Catch|Throw)\b', Keyword.Reserved),
(r'Throwable', Name.Exception),
# Builtins
(r'(?i)\b(?:Null|True|False)\b', Name.Builtin),
(r'(?i)\b(?:Self|Super)\b', Name.Builtin.Pseudo),
(r'\b(?:HOST|LANG|TARGET|CONFIG)\b', Name.Constant),
# Keywords
(r'(?i)^(Import)(\s+)(.*)(\n)',
bygroups(Keyword.Namespace, Text, Name.Namespace, Text)),
(r'(?i)^Strict\b.*\n', Keyword.Reserved),
(r'(?i)(Const|Local|Global|Field)(\s+)',
bygroups(Keyword.Declaration, Text), 'variables'),
(r'(?i)(New|Class|Interface|Extends|Implements)(\s+)',
bygroups(Keyword.Reserved, Text), 'classname'),
(r'(?i)(Function|Method)(\s+)',
bygroups(Keyword.Reserved, Text), 'funcname'),
(r'(?i)(?:End|Return|Public|Private|Extern|Property|'
r'Final|Abstract)\b', Keyword.Reserved),
# Flow Control stuff
(r'(?i)(?:If|Then|Else|ElseIf|EndIf|'
r'Select|Case|Default|'
r'While|Wend|'
r'Repeat|Until|Forever|'
r'For|To|Until|Step|EachIn|Next|'
r'Exit|Continue)\s+', Keyword.Reserved),
# not used yet
(r'(?i)\b(?:Module|Inline)\b', Keyword.Reserved),
# Array
(r'[\[\]]', Punctuation),
# Other
(r'<=|>=|<>|\*=|/=|\+=|-=|&=|~=|\|=|[-&*/^+=<>|~]', Operator),
(r'(?i)(?:Not|Mod|Shl|Shr|And|Or)', Operator.Word),
(r'[\(\){}!#,.:]', Punctuation),
# catch the rest
(r'%s\b' % name_constant, Name.Constant),
(r'%s\b' % name_function, Name.Function),
(r'%s\b' % name_variable, Name.Variable),
],
'funcname': [
(r'(?i)%s\b' % name_function, Name.Function),
(r':', Punctuation, 'classname'),
(r'\s+', Text),
(r'\(', Punctuation, 'variables'),
(r'\)', Punctuation, '#pop')
],
'classname': [
(r'%s\.' % name_module, Name.Namespace),
(r'%s\b' % keyword_type, Keyword.Type),
(r'%s\b' % name_class, Name.Class),
# array (of given size)
(r'(\[)(\s*)(\d*)(\s*)(\])',
bygroups(Punctuation, Text, Number.Integer, Text, Punctuation)),
# generics
(r'\s+(?!<)', Text, '#pop'),
(r'<', Punctuation, '#push'),
(r'>', Punctuation, '#pop'),
(r'\n', Text, '#pop'),
default('#pop')
],
'variables': [
(r'%s\b' % name_constant, Name.Constant),
(r'%s\b' % name_variable, Name.Variable),
(r'%s' % keyword_type_special, Keyword.Type),
(r'\s+', Text),
(r':', Punctuation, 'classname'),
(r',', Punctuation, '#push'),
default('#pop')
],
'string': [
(r'[^"~]+', String.Double),
(r'~q|~n|~r|~t|~z|~~', String.Escape),
(r'"', String.Double, '#pop'),
],
'comment' : [
(r'(?i)^#rem.*?', Comment.Multiline, "#push"),
(r'(?i)^#end.*?', Comment.Multiline, "#pop"),
(r'\n', Comment.Multiline),
(r'.+', Comment.Multiline),
],
}
class CobolLexer(RegexLexer):
"""
Lexer for OpenCOBOL code.
.. versionadded:: 1.6
"""
name = 'COBOL'
aliases = ['cobol']
filenames = ['*.cob', '*.COB', '*.cpy', '*.CPY']
mimetypes = ['text/x-cobol']
flags = re.IGNORECASE | re.MULTILINE
# Data Types: by PICTURE and USAGE
# Operators: **, *, +, -, /, <, >, <=, >=, =, <>
# Logical (?): NOT, AND, OR
# Reserved words:
# http://opencobol.add1tocobol.com/#reserved-words
# Intrinsics:
# http://opencobol.add1tocobol.com/#does-opencobol-implement-any-intrinsic-functions
tokens = {
'root': [
include('comment'),
include('strings'),
include('core'),
include('nums'),
(r'[a-z0-9]([_a-z0-9\-]*[a-z0-9]+)?', Name.Variable),
# (r'[\s]+', Text),
(r'[ \t]+', Text),
],
'comment': [
(r'(^.{6}[*/].*\n|^.{6}|\*>.*\n)', Comment),
],
'core': [
# Figurative constants
(r'(^|(?<=[^0-9a-z_\-]))(ALL\s+)?'
r'((ZEROES)|(HIGH-VALUE|LOW-VALUE|QUOTE|SPACE|ZERO)(S)?)'
r'\s*($|(?=[^0-9a-z_\-]))',
Name.Constant),
# Reserved words STATEMENTS and other bolds
(r'(^|(?<=[^0-9a-z_\-]))'
r'(ACCEPT|ADD|ALLOCATE|CALL|CANCEL|CLOSE|COMPUTE|'
r'CONFIGURATION|CONTINUE|'
r'DATA|DELETE|DISPLAY|DIVIDE|DIVISION|ELSE|END|END-ACCEPT|'
r'END-ADD|END-CALL|END-COMPUTE|END-DELETE|END-DISPLAY|'
r'END-DIVIDE|END-EVALUATE|END-IF|END-MULTIPLY|END-OF-PAGE|'
r'END-PERFORM|END-READ|END-RETURN|END-REWRITE|END-SEARCH|'
r'END-START|END-STRING|END-SUBTRACT|END-UNSTRING|END-WRITE|'
r'ENVIRONMENT|EVALUATE|EXIT|FD|FILE|FILE-CONTROL|FOREVER|'
r'FREE|GENERATE|GO|GOBACK|'
r'IDENTIFICATION|IF|INITIALIZE|'
r'INITIATE|INPUT-OUTPUT|INSPECT|INVOKE|I-O-CONTROL|LINKAGE|'
r'LOCAL-STORAGE|MERGE|MOVE|MULTIPLY|OPEN|'
r'PERFORM|PROCEDURE|PROGRAM-ID|RAISE|READ|RELEASE|RESUME|'
r'RETURN|REWRITE|SCREEN|'
r'SD|SEARCH|SECTION|SET|SORT|START|STOP|STRING|SUBTRACT|'
r'SUPPRESS|TERMINATE|THEN|UNLOCK|UNSTRING|USE|VALIDATE|'
r'WORKING-STORAGE|WRITE)'
r'\s*($|(?=[^0-9a-z_\-]))', Keyword.Reserved),
# Reserved words
(r'(^|(?<=[^0-9a-z_\-]))'
r'(ACCESS|ADDRESS|ADVANCING|AFTER|ALL|'
r'ALPHABET|ALPHABETIC|ALPHABETIC-LOWER|ALPHABETIC-UPPER|'
r'ALPHANUMERIC|ALPHANUMERIC-EDITED|ALSO|ALTER|ALTERNATE'
r'ANY|ARE|AREA|AREAS|ARGUMENT-NUMBER|ARGUMENT-VALUE|AS|'
r'ASCENDING|ASSIGN|AT|AUTO|AUTO-SKIP|AUTOMATIC|AUTOTERMINATE|'
r'BACKGROUND-COLOR|BASED|BEEP|BEFORE|BELL|'
r'BLANK|'
r'BLINK|BLOCK|BOTTOM|BY|BYTE-LENGTH|CHAINING|'
r'CHARACTER|CHARACTERS|CLASS|CODE|CODE-SET|COL|COLLATING|'
r'COLS|COLUMN|COLUMNS|COMMA|COMMAND-LINE|COMMIT|COMMON|'
r'CONSTANT|CONTAINS|CONTENT|CONTROL|'
r'CONTROLS|CONVERTING|COPY|CORR|CORRESPONDING|COUNT|CRT|'
r'CURRENCY|CURSOR|CYCLE|DATE|DAY|DAY-OF-WEEK|DE|DEBUGGING|'
r'DECIMAL-POINT|DECLARATIVES|DEFAULT|DELIMITED|'
r'DELIMITER|DEPENDING|DESCENDING|DETAIL|DISK|'
r'DOWN|DUPLICATES|DYNAMIC|EBCDIC|'
r'ENTRY|ENVIRONMENT-NAME|ENVIRONMENT-VALUE|EOL|EOP|'
r'EOS|ERASE|ERROR|ESCAPE|EXCEPTION|'
r'EXCLUSIVE|EXTEND|EXTERNAL|'
r'FILE-ID|FILLER|FINAL|FIRST|FIXED|FLOAT-LONG|FLOAT-SHORT|'
r'FOOTING|FOR|FOREGROUND-COLOR|FORMAT|FROM|FULL|FUNCTION|'
r'FUNCTION-ID|GIVING|GLOBAL|GROUP|'
r'HEADING|HIGHLIGHT|I-O|ID|'
r'IGNORE|IGNORING|IN|INDEX|INDEXED|INDICATE|'
r'INITIAL|INITIALIZED|INPUT|'
r'INTO|INTRINSIC|INVALID|IS|JUST|JUSTIFIED|KEY|LABEL|'
r'LAST|LEADING|LEFT|LENGTH|LIMIT|LIMITS|LINAGE|'
r'LINAGE-COUNTER|LINE|LINES|LOCALE|LOCK|'
r'LOWLIGHT|MANUAL|MEMORY|MINUS|MODE|'
r'MULTIPLE|NATIONAL|NATIONAL-EDITED|NATIVE|'
r'NEGATIVE|NEXT|NO|NULL|NULLS|NUMBER|NUMBERS|NUMERIC|'
r'NUMERIC-EDITED|OBJECT-COMPUTER|OCCURS|OF|OFF|OMITTED|ON|ONLY|'
r'OPTIONAL|ORDER|ORGANIZATION|OTHER|OUTPUT|OVERFLOW|'
r'OVERLINE|PACKED-DECIMAL|PADDING|PAGE|PARAGRAPH|'
r'PLUS|POINTER|POSITION|POSITIVE|PRESENT|PREVIOUS|'
r'PRINTER|PRINTING|PROCEDURE-POINTER|PROCEDURES|'
r'PROCEED|PROGRAM|PROGRAM-POINTER|PROMPT|QUOTE|'
r'QUOTES|RANDOM|RD|RECORD|RECORDING|RECORDS|RECURSIVE|'
r'REDEFINES|REEL|REFERENCE|RELATIVE|REMAINDER|REMOVAL|'
r'RENAMES|REPLACING|REPORT|REPORTING|REPORTS|REPOSITORY|'
r'REQUIRED|RESERVE|RETURNING|REVERSE-VIDEO|REWIND|'
r'RIGHT|ROLLBACK|ROUNDED|RUN|SAME|SCROLL|'
r'SECURE|SEGMENT-LIMIT|SELECT|SENTENCE|SEPARATE|'
r'SEQUENCE|SEQUENTIAL|SHARING|SIGN|SIGNED|SIGNED-INT|'
r'SIGNED-LONG|SIGNED-SHORT|SIZE|SORT-MERGE|SOURCE|'
r'SOURCE-COMPUTER|SPECIAL-NAMES|STANDARD|'
r'STANDARD-1|STANDARD-2|STATUS|SUM|'
r'SYMBOLIC|SYNC|SYNCHRONIZED|TALLYING|TAPE|'
r'TEST|THROUGH|THRU|TIME|TIMES|TO|TOP|TRAILING|'
r'TRANSFORM|TYPE|UNDERLINE|UNIT|UNSIGNED|'
r'UNSIGNED-INT|UNSIGNED-LONG|UNSIGNED-SHORT|UNTIL|UP|'
r'UPDATE|UPON|USAGE|USING|VALUE|VALUES|VARYING|WAIT|WHEN|'
r'WITH|WORDS|YYYYDDD|YYYYMMDD)'
r'\s*($|(?=[^0-9a-z_\-]))', Keyword.Pseudo),
# inactive reserved words
(r'(^|(?<=[^0-9a-z_\-]))'
r'(ACTIVE-CLASS|ALIGNED|ANYCASE|ARITHMETIC|ATTRIBUTE|B-AND|'
r'B-NOT|B-OR|B-XOR|BIT|BOOLEAN|CD|CENTER|CF|CH|CHAIN|CLASS-ID|'
r'CLASSIFICATION|COMMUNICATION|CONDITION|DATA-POINTER|'
r'DESTINATION|DISABLE|EC|EGI|EMI|ENABLE|END-RECEIVE|'
r'ENTRY-CONVENTION|EO|ESI|EXCEPTION-OBJECT|EXPANDS|FACTORY|'
r'FLOAT-BINARY-16|FLOAT-BINARY-34|FLOAT-BINARY-7|'
r'FLOAT-DECIMAL-16|FLOAT-DECIMAL-34|FLOAT-EXTENDED|FORMAT|'
r'FUNCTION-POINTER|GET|GROUP-USAGE|IMPLEMENTS|INFINITY|'
r'INHERITS|INTERFACE|INTERFACE-ID|INVOKE|LC_ALL|LC_COLLATE|'
r'LC_CTYPE|LC_MESSAGES|LC_MONETARY|LC_NUMERIC|LC_TIME|'
r'LINE-COUNTER|MESSAGE|METHOD|METHOD-ID|NESTED|NONE|NORMAL|'
r'OBJECT|OBJECT-REFERENCE|OPTIONS|OVERRIDE|PAGE-COUNTER|PF|PH|'
r'PROPERTY|PROTOTYPE|PURGE|QUEUE|RAISE|RAISING|RECEIVE|'
r'RELATION|REPLACE|REPRESENTS-NOT-A-NUMBER|RESET|RESUME|RETRY|'
r'RF|RH|SECONDS|SEGMENT|SELF|SEND|SOURCES|STATEMENT|STEP|'
r'STRONG|SUB-QUEUE-1|SUB-QUEUE-2|SUB-QUEUE-3|SUPER|SYMBOL|'
r'SYSTEM-DEFAULT|TABLE|TERMINAL|TEXT|TYPEDEF|UCS-4|UNIVERSAL|'
r'USER-DEFAULT|UTF-16|UTF-8|VAL-STATUS|VALID|VALIDATE|'
r'VALIDATE-STATUS)\s*($|(?=[^0-9a-z_\-]))', Error),
# Data Types
(r'(^|(?<=[^0-9a-z_\-]))'
r'(PIC\s+.+?(?=(\s|\.\s))|PICTURE\s+.+?(?=(\s|\.\s))|'
r'(COMPUTATIONAL)(-[1-5X])?|(COMP)(-[1-5X])?|'
r'BINARY-C-LONG|'
r'BINARY-CHAR|BINARY-DOUBLE|BINARY-LONG|BINARY-SHORT|'
r'BINARY)\s*($|(?=[^0-9a-z_\-]))', Keyword.Type),
# Operators
(r'(\*\*|\*|\+|-|/|<=|>=|<|>|==|/=|=)', Operator),
# (r'(::)', Keyword.Declaration),
(r'([(),;:&%.])', Punctuation),
# Intrinsics
(r'(^|(?<=[^0-9a-z_\-]))(ABS|ACOS|ANNUITY|ASIN|ATAN|BYTE-LENGTH|'
r'CHAR|COMBINED-DATETIME|CONCATENATE|COS|CURRENT-DATE|'
r'DATE-OF-INTEGER|DATE-TO-YYYYMMDD|DAY-OF-INTEGER|DAY-TO-YYYYDDD|'
r'EXCEPTION-(?:FILE|LOCATION|STATEMENT|STATUS)|EXP10|EXP|E|'
r'FACTORIAL|FRACTION-PART|INTEGER-OF-(?:DATE|DAY|PART)|INTEGER|'
r'LENGTH|LOCALE-(?:DATE|TIME(?:-FROM-SECONDS)?)|LOG10|LOG|'
r'LOWER-CASE|MAX|MEAN|MEDIAN|MIDRANGE|MIN|MOD|NUMVAL(?:-C)?|'
r'ORD(?:-MAX|-MIN)?|PI|PRESENT-VALUE|RANDOM|RANGE|REM|REVERSE|'
r'SECONDS-FROM-FORMATTED-TIME|SECONDS-PAST-MIDNIGHT|SIGN|SIN|SQRT|'
r'STANDARD-DEVIATION|STORED-CHAR-LENGTH|SUBSTITUTE(?:-CASE)?|'
r'SUM|TAN|TEST-DATE-YYYYMMDD|TEST-DAY-YYYYDDD|TRIM|'
r'UPPER-CASE|VARIANCE|WHEN-COMPILED|YEAR-TO-YYYY)\s*'
r'($|(?=[^0-9a-z_\-]))', Name.Function),
# Booleans
(r'(^|(?<=[^0-9a-z_\-]))(true|false)\s*($|(?=[^0-9a-z_\-]))', Name.Builtin),
# Comparing Operators
(r'(^|(?<=[^0-9a-z_\-]))(equal|equals|ne|lt|le|gt|ge|'
r'greater|less|than|not|and|or)\s*($|(?=[^0-9a-z_\-]))', Operator.Word),
],
# \"[^\"\n]*\"|\'[^\'\n]*\'
'strings': [
# apparently strings can be delimited by EOL if they are continued
# in the next line
(r'"[^"\n]*("|\n)', String.Double),
(r"'[^'\n]*('|\n)", String.Single),
],
'nums': [
(r'\d+(\s*|\.$|$)', Number.Integer),
(r'[+-]?\d*\.\d+([eE][-+]?\d+)?', Number.Float),
(r'[+-]?\d+\.\d*([eE][-+]?\d+)?', Number.Float),
],
}
class CobolFreeformatLexer(CobolLexer):
"""
Lexer for Free format OpenCOBOL code.
.. versionadded:: 1.6
"""
name = 'COBOLFree'
aliases = ['cobolfree']
filenames = ['*.cbl', '*.CBL']
mimetypes = []
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'comment': [
(r'(\*>.*\n|^\w*\*.*$)', Comment),
],
}
class LogosLexer(ObjectiveCppLexer):
"""
For Logos + Objective-C source code with preprocessor directives.
.. versionadded:: 1.6
"""
name = 'Logos'
aliases = ['logos']
filenames = ['*.x', '*.xi', '*.xm', '*.xmi']
mimetypes = ['text/x-logos']
priority = 0.25
tokens = {
'statements': [
(r'(%orig|%log)\b', Keyword),
(r'(%c)\b(\()(\s*)([a-zA-Z$_][\w$]*)(\s*)(\))',
bygroups(Keyword, Punctuation, Text, Name.Class, Text, Punctuation)),
(r'(%init)\b(\()',
bygroups(Keyword, Punctuation), 'logos_init_directive'),
(r'(%init)(?=\s*;)', bygroups(Keyword)),
(r'(%hook|%group)(\s+)([a-zA-Z$_][\w$]+)',
bygroups(Keyword, Text, Name.Class), '#pop'),
(r'(%subclass)(\s+)', bygroups(Keyword, Text),
('#pop', 'logos_classname')),
inherit,
],
'logos_init_directive' : [
('\s+', Text),
(',', Punctuation, ('logos_init_directive', '#pop')),
('([a-zA-Z$_][\w$]*)(\s*)(=)(\s*)([^);]*)',
bygroups(Name.Class, Text, Punctuation, Text, Text)),
('([a-zA-Z$_][\w$]*)', Name.Class),
('\)', Punctuation, '#pop'),
],
'logos_classname' : [
('([a-zA-Z$_][\w$]*)(\s*:\s*)([a-zA-Z$_][\w$]*)?',
bygroups(Name.Class, Text, Name.Class), '#pop'),
('([a-zA-Z$_][\w$]*)', Name.Class, '#pop')
],
'root': [
(r'(%subclass)(\s+)', bygroups(Keyword, Text),
'logos_classname'),
(r'(%hook|%group)(\s+)([a-zA-Z$_][\w$]+)',
bygroups(Keyword, Text, Name.Class)),
(r'(%config)(\s*\(\s*)(\w+)(\s*=\s*)(.*?)(\s*\)\s*)',
bygroups(Keyword, Text, Name.Variable, Text, String, Text)),
(r'(%ctor)(\s*)({)', bygroups(Keyword, Text, Punctuation),
'function'),
(r'(%new)(\s*)(\()(\s*.*?\s*)(\))',
bygroups(Keyword, Text, Keyword, String, Keyword)),
(r'(\s*)(%end)(\s*)', bygroups(Text, Keyword, Text)),
inherit,
],
}
_logos_keywords = re.compile(r'%(?:hook|ctor|init|c\()')
def analyse_text(text):
if LogosLexer._logos_keywords.search(text):
return 1.0
return 0
class ChapelLexer(RegexLexer):
"""
For `Chapel <http://chapel.cray.com/>`_ source.
.. versionadded:: 2.0
"""
name = 'Chapel'
filenames = ['*.chpl']
aliases = ['chapel', 'chpl']
# mimetypes = ['text/x-chapel']
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text),
(r'//(.*?)\n', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'(config|const|in|inout|out|param|ref|type|var)\b',
Keyword.Declaration),
(r'(false|nil|true)\b', Keyword.Constant),
(r'(bool|complex|imag|int|opaque|range|real|string|uint)\b',
Keyword.Type),
(r'(atomic|begin|break|by|cobegin|coforall|continue|iter|'
r'delete|dmapped|do|domain|else|enum|export|extern|for|forall|'
r'if|index|inline|label|lambda|let|local|new|on|otherwise|'
r'reduce|return|scan|select|serial|single|sparse|'
r'subdomain|sync|then|use|when|where|while|yield|zip)\b',
Keyword),
(r'(proc)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'procname'),
(r'(class|module|record|union)(\s+)', bygroups(Keyword, Text),
'classname'),
# imaginary integers
(r'\d+i', Number),
(r'\d+\.\d*([Ee][-+]\d+)?i', Number),
(r'\.\d+([Ee][-+]\d+)?i', Number),
(r'\d+[Ee][-+]\d+i', Number),
# reals cannot end with a period due to lexical ambiguity with
# .. operator. See reference for rationale.
(r'(\d*\.\d+)([eE][+-]?[0-9]+)?i?', Number.Float),
(r'\d+[eE][+-]?[0-9]+i?', Number.Float),
# integer literals
# -- binary
(r'0[bB][0-1]+', Number.Bin),
# -- hex
(r'0[xX][0-9a-fA-F]+', Number.Hex),
# -- decimal
(r'(0|[1-9][0-9]*)', Number.Integer),
# strings
(r'["\'](\\\\|\\"|[^"\'])*["\']', String),
# tokens
(r'(=|\+=|-=|\*=|/=|\*\*=|%=|&=|\|=|\^=|&&=|\|\|=|<<=|>>=|'
r'<=>|\.\.|by|#|\.\.\.|'
r'&&|\|\||!|&|\||\^|~|<<|>>|'
r'==|!=|<=|>=|<|>|'
r'[+\-*/%]|\*\*)', Operator),
(r'[:;,.?()\[\]{}]', Punctuation),
# identifiers
(r'[a-zA-Z_][\w$]*', Name.Other),
],
'classname': [
(r'[a-zA-Z_][\w$]*', Name.Class, '#pop'),
],
'procname': [
(r'[a-zA-Z_][\w$]*', Name.Function, '#pop'),
],
}
class EiffelLexer(RegexLexer):
"""
For `Eiffel <http://www.eiffel.com>`_ source code.
.. versionadded:: 2.0
"""
name = 'Eiffel'
aliases = ['eiffel']
filenames = ['*.e']
mimetypes = ['text/x-eiffel']
tokens = {
'root': [
(r'[^\S\n]+', Text),
(r'--.*?\n', Comment.Single),
(r'[^\S\n]+', Text),
# Please note that keyword and operator are case insensitive.
(r'(?i)(true|false|void|current|result|precursor)\b', Keyword.Constant),
(r'(?i)(and(\s+then)?|not|xor|implies|or(\s+else)?)\b', Operator.Word),
(r'(?i)\b(across|agent|alias|all|as|assign|attached|attribute|check|'
r'class|convert|create|debug|deferred|detachable|do|else|elseif|'
r'end|ensure|expanded|export|external|feature|from|frozen|if|'
r'inherit|inspect|invariant|like|local|loop|none|note|obsolete|'
r'old|once|only|redefine|rename|require|rescue|retry|select|'
r'separate|then|undefine|until|variant|when)\b',Keyword.Reserved),
(r'"\[(([^\]%]|\n)|%(.|\n)|\][^"])*?\]"', String),
(r'"([^"%\n]|%.)*?"', String),
include('numbers'),
(r"'([^'%]|%'|%%)'", String.Char),
(r"(//|\\\\|>=|<=|:=|/=|~|/~|[\\\?!#%&@|+/\-=\>\*$<|^\[\]])", Operator),
(r"([{}():;,.])", Punctuation),
(r'([a-z]\w*)|([A-Z][A-Z0-9_]*[a-z]\w*)', Name),
(r'([A-Z][A-Z0-9_]*)', Name.Class),
(r'\n+', Text),
],
'numbers': [
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'0[bB][0-1]+', Number.Bin),
(r'0[cC][0-7]+', Number.Oct),
(r'([0-9]+\.[0-9]*)|([0-9]*\.[0-9]+)', Number.Float),
(r'[0-9]+', Number.Integer),
],
}
class Inform6Lexer(RegexLexer):
"""
For `Inform 6 <http://inform-fiction.org/>`_ source code.
.. versionadded:: 2.0
"""
name = 'Inform 6'
aliases = ['inform6', 'i6']
filenames = ['*.inf']
flags = re.MULTILINE | re.DOTALL | re.UNICODE
_name = r'[a-zA-Z_][a-zA-Z_0-9]*'
# Inform 7 maps these four character classes to their ASCII
# equivalents. To support Inform 6 inclusions within Inform 7,
# Inform6Lexer maps them too.
_dash = u'\\-\u2010-\u2014'
_dquote = u'"\u201c\u201d'
_squote = u"'\u2018\u2019"
_newline = u'\\n\u0085\u2028\u2029'
tokens = {
'root': [
(r'(\A(!%%[^%s]*[%s])+)?' % (_newline, _newline), Comment.Preproc,
'directive')
],
'_whitespace': [
(r'\s+', Text),
(r'![^%s]*' % _newline, Comment.Single)
],
'default': [
include('_whitespace'),
(r'\[', Punctuation, 'many-values'), # Array initialization
(r':|(?=;)', Punctuation, '#pop'),
(r'<', Punctuation), # Second angle bracket in an action statement
default(('expression', '_expression'))
],
# Expressions
'_expression': [
include('_whitespace'),
(r'(?=sp\b)', Text, '#pop'),
(r'(?=[%s%s$0-9#a-zA-Z_])' % (_dquote, _squote), Text,
('#pop', 'value')),
(r'\+\+|[%s]{1,2}(?!>)|~~?' % _dash, Operator),
(r'(?=[()\[%s,?@{:;])' % _dash, Text, '#pop')
],
'expression': [
include('_whitespace'),
(r'\(', Punctuation, ('expression', '_expression')),
(r'\)', Punctuation, '#pop'),
(r'\[', Punctuation, ('#pop', 'statements', 'locals')),
(r'>(?=(\s+|(![^%s]*))*[>;])' % _newline, Punctuation),
(r'\+\+|[%s]{2}(?!>)' % _dash, Operator),
(r',', Punctuation, '_expression'),
(r'&&?|\|\|?|[=~><]?=|[%s]{1,2}>?|\.\.?[&#]?|::|[<>+*/%%]' % _dash,
Operator, '_expression'),
(r'(has|hasnt|in|notin|ofclass|or|provides)\b', Operator.Word,
'_expression'),
(r'sp\b', Name),
(r'\?~?', Name.Label, 'label?'),
(r'[@{]', Error),
default('#pop')
],
'_assembly-expression': [
(r'\(', Punctuation, ('#push', '_expression')),
(r'[\[\]]', Punctuation),
(r'[%s]>' % _dash, Punctuation, '_expression'),
(r'sp\b', Keyword.Pseudo),
(r';', Punctuation, '#pop:3'),
include('expression')
],
'_for-expression': [
(r'\)', Punctuation, '#pop:2'),
(r':', Punctuation, '#pop'),
include('expression')
],
'_keyword-expression': [
(r'(from|near|to)\b', Keyword, '_expression'),
include('expression')
],
'_list-expression': [
(r',', Punctuation, '#pop'),
include('expression')
],
'_object-expression': [
(r'has\b', Keyword.Declaration, '#pop'),
include('_list-expression')
],
# Values
'value': [
include('_whitespace'),
# Strings
(r'[%s][^@][%s]' % (_squote, _squote), String.Char, '#pop'),
(r'([%s])(@{[0-9a-fA-F]{1,4}})([%s])' % (_squote, _squote),
bygroups(String.Char, String.Escape, String.Char), '#pop'),
(r'([%s])(@..)([%s])' % (_squote, _squote),
bygroups(String.Char, String.Escape, String.Char), '#pop'),
(r'[%s]' % _squote, String.Single, ('#pop', 'dictionary-word')),
(r'[%s]' % _dquote, String.Double, ('#pop', 'string')),
# Numbers
(r'\$[+%s][0-9]*\.?[0-9]*([eE][+%s]?[0-9]+)?' % (_dash, _dash),
Number.Float, '#pop'),
(r'\$[0-9a-fA-F]+', Number.Hex, '#pop'),
(r'\$\$[01]+', Number.Bin, '#pop'),
(r'[0-9]+', Number.Integer, '#pop'),
# Values prefixed by hashes
(r'(##|#a\$)(%s)' % _name, bygroups(Operator, Name), '#pop'),
(r'(#g\$)(%s)' % _name,
bygroups(Operator, Name.Variable.Global), '#pop'),
(r'#[nw]\$', Operator, ('#pop', 'obsolete-dictionary-word')),
(r'(#r\$)(%s)' % _name, bygroups(Operator, Name.Function), '#pop'),
(r'#', Name.Builtin, ('#pop', 'system-constant')),
# System functions
(r'(child|children|elder|eldest|glk|indirect|metaclass|parent|'
r'random|sibling|younger|youngest)\b', Name.Builtin, '#pop'),
# Metaclasses
(r'(?i)(Class|Object|Routine|String)\b', Name.Builtin, '#pop'),
# Veneer routines
(r'(?i)(Box__Routine|CA__Pr|CDefArt|CInDefArt|Cl__Ms|'
r'Copy__Primitive|CP__Tab|DA__Pr|DB__Pr|DefArt|Dynam__String|'
r'EnglishNumber|Glk__Wrap|IA__Pr|IB__Pr|InDefArt|Main__|'
r'Meta__class|OB__Move|OB__Remove|OC__Cl|OP__Pr|Print__Addr|'
r'Print__PName|PrintShortName|RA__Pr|RA__Sc|RL__Pr|R_Process|'
r'RT__ChG|RT__ChGt|RT__ChLDB|RT__ChLDW|RT__ChPR|RT__ChPrintA|'
r'RT__ChPrintC|RT__ChPrintO|RT__ChPrintS|RT__ChPS|RT__ChR|'
r'RT__ChSTB|RT__ChSTW|RT__ChT|RT__Err|RT__TrPS|RV__Pr|'
r'Symb__Tab|Unsigned__Compare|WV__Pr|Z__Region)\b', Name.Builtin,
'#pop'),
# Other built-in symbols
(r'(?i)(call|copy|create|DEBUG|destroy|DICT_CHAR_SIZE|'
r'DICT_ENTRY_BYTES|DICT_IS_UNICODE|DICT_WORD_SIZE|false|'
r'FLOAT_INFINITY|FLOAT_NAN|FLOAT_NINFINITY|GOBJFIELD_CHAIN|'
r'GOBJFIELD_CHILD|GOBJFIELD_NAME|GOBJFIELD_PARENT|'
r'GOBJFIELD_PROPTAB|GOBJFIELD_SIBLING|GOBJ_EXT_START|'
r'GOBJ_TOTAL_LENGTH|Grammar__Version|INDIV_PROP_START|INFIX|'
r'infix__watching|MODULE_MODE|name|nothing|NUM_ATTR_BYTES|print|'
r'print_to_array|recreate|remaining|self|sender|STRICT_MODE|'
r'sw__var|sys__glob0|sys__glob1|sys__glob2|sys_statusline_flag|'
r'TARGET_GLULX|TARGET_ZCODE|temp__global2|temp__global3|'
r'temp__global4|temp_global|true|USE_MODULES|WORDSIZE)\b',
Name.Builtin, '#pop'),
# Other values
(_name, Name, '#pop')
],
# Strings
'dictionary-word': [
(r'[~^]+', String.Escape),
(r'[^~^\\@({%s]+' % _squote, String.Single),
(r'[({]', String.Single),
(r'@{[0-9a-fA-F]{,4}}', String.Escape),
(r'@..', String.Escape),
(r'[%s]' % _squote, String.Single, '#pop')
],
'string': [
(r'[~^]+', String.Escape),
(r'[^~^\\@({%s]+' % _dquote, String.Double),
(r'[({]', String.Double),
(r'\\', String.Escape),
(r'@(\\\s*[%s]\s*)*@((\\\s*[%s]\s*)*[0-9])*' %
(_newline, _newline), String.Escape),
(r'@(\\\s*[%s]\s*)*{((\\\s*[%s]\s*)*[0-9a-fA-F]){,4}'
r'(\\\s*[%s]\s*)*}' % (_newline, _newline, _newline),
String.Escape),
(r'@(\\\s*[%s]\s*)*.(\\\s*[%s]\s*)*.' % (_newline, _newline),
String.Escape),
(r'[%s]' % _dquote, String.Double, '#pop')
],
'plain-string': [
(r'[^~^\\({\[\]%s]+' % _dquote, String.Double),
(r'[~^({\[\]]', String.Double),
(r'\\', String.Escape),
(r'[%s]' % _dquote, String.Double, '#pop')
],
# Names
'_constant': [
include('_whitespace'),
(_name, Name.Constant, '#pop'),
include('value')
],
'_global': [
include('_whitespace'),
(_name, Name.Variable.Global, '#pop'),
include('value')
],
'label?': [
include('_whitespace'),
(r'(%s)?' % _name, Name.Label, '#pop')
],
'variable?': [
include('_whitespace'),
(r'(%s)?' % _name, Name.Variable, '#pop')
],
# Values after hashes
'obsolete-dictionary-word': [
(r'\S[a-zA-Z_0-9]*', String.Other, '#pop')
],
'system-constant': [
include('_whitespace'),
(_name, Name.Builtin, '#pop')
],
# Directives
'directive': [
include('_whitespace'),
(r'#', Punctuation),
(r';', Punctuation, '#pop'),
(r'\[', Punctuation,
('default', 'statements', 'locals', 'routine-name?')),
(r'(?i)(abbreviate|endif|dictionary|ifdef|iffalse|ifndef|ifnot|'
r'iftrue|ifv3|ifv5|release|serial|switches|system_file|version)'
r'\b', Keyword, 'default'),
(r'(?i)(array|global)\b', Keyword,
('default', 'directive-keyword?', '_global')),
(r'(?i)attribute\b', Keyword, ('default', 'alias?', '_constant')),
(r'(?i)class\b', Keyword,
('object-body', 'duplicates', 'class-name')),
(r'(?i)(constant|default)\b', Keyword,
('default', 'expression', '_constant')),
(r'(?i)(end\b)(.*)', bygroups(Keyword, Text)),
(r'(?i)(extend|verb)\b', Keyword, 'grammar'),
(r'(?i)fake_action\b', Keyword, ('default', '_constant')),
(r'(?i)import\b', Keyword, 'manifest'),
(r'(?i)(include|link)\b', Keyword,
('default', 'before-plain-string')),
(r'(?i)(lowstring|undef)\b', Keyword, ('default', '_constant')),
(r'(?i)message\b', Keyword, ('default', 'diagnostic')),
(r'(?i)(nearby|object)\b', Keyword,
('object-body', '_object-head')),
(r'(?i)property\b', Keyword,
('default', 'alias?', '_constant', 'property-keyword*')),
(r'(?i)replace\b', Keyword,
('default', 'routine-name?', 'routine-name?')),
(r'(?i)statusline\b', Keyword, ('default', 'directive-keyword?')),
(r'(?i)stub\b', Keyword, ('default', 'routine-name?')),
(r'(?i)trace\b', Keyword,
('default', 'trace-keyword?', 'trace-keyword?')),
(r'(?i)zcharacter\b', Keyword,
('default', 'directive-keyword?', 'directive-keyword?')),
(_name, Name.Class, ('object-body', '_object-head'))
],
# [, Replace, Stub
'routine-name?': [
include('_whitespace'),
(r'(%s)?' % _name, Name.Function, '#pop')
],
'locals': [
include('_whitespace'),
(r';', Punctuation, '#pop'),
(r'\*', Punctuation),
(_name, Name.Variable)
],
# Array
'many-values': [
include('_whitespace'),
(r';', Punctuation),
(r'\]', Punctuation, '#pop'),
(r':', Error),
default(('expression', '_expression'))
],
# Attribute, Property
'alias?': [
include('_whitespace'),
(r'alias\b', Keyword, ('#pop', '_constant')),
default('#pop')
],
# Class, Object, Nearby
'class-name': [
include('_whitespace'),
(r'(?=[,;]|(class|has|private|with)\b)', Text, '#pop'),
(_name, Name.Class, '#pop')
],
'duplicates': [
include('_whitespace'),
(r'\(', Punctuation, ('#pop', 'expression', '_expression')),
default('#pop')
],
'_object-head': [
(r'[%s]>' % _dash, Punctuation),
(r'(class|has|private|with)\b', Keyword.Declaration, '#pop'),
include('_global')
],
'object-body': [
include('_whitespace'),
(r';', Punctuation, '#pop:2'),
(r',', Punctuation),
(r'class\b', Keyword.Declaration, 'class-segment'),
(r'(has|private|with)\b', Keyword.Declaration),
(r':', Error),
default(('_object-expression', '_expression'))
],
'class-segment': [
include('_whitespace'),
(r'(?=[,;]|(class|has|private|with)\b)', Text, '#pop'),
(_name, Name.Class),
default('value')
],
# Extend, Verb
'grammar': [
include('_whitespace'),
(r'=', Punctuation, ('#pop', 'default')),
(r'\*', Punctuation, ('#pop', 'grammar-line')),
default('_directive-keyword')
],
'grammar-line': [
include('_whitespace'),
(r';', Punctuation, '#pop'),
(r'[/*]', Punctuation),
(r'[%s]>' % _dash, Punctuation, 'value'),
(r'(noun|scope)\b', Keyword, '=routine'),
default('_directive-keyword')
],
'=routine': [
include('_whitespace'),
(r'=', Punctuation, 'routine-name?'),
default('#pop')
],
# Import
'manifest': [
include('_whitespace'),
(r';', Punctuation, '#pop'),
(r',', Punctuation),
(r'(?i)(global\b)?', Keyword, '_global')
],
# Include, Link, Message
'diagnostic': [
include('_whitespace'),
(r'[%s]' % _dquote, String.Double, ('#pop', 'message-string')),
default(('#pop', 'before-plain-string', 'directive-keyword?'))
],
'before-plain-string': [
include('_whitespace'),
(r'[%s]' % _dquote, String.Double, ('#pop', 'plain-string'))
],
'message-string': [
(r'[~^]+', String.Escape),
include('plain-string')
],
# Keywords used in directives
'_directive-keyword!': [
include('_whitespace'),
(r'(additive|alias|buffer|class|creature|data|error|fatalerror|'
r'first|has|held|initial|initstr|last|long|meta|multi|'
r'multiexcept|multiheld|multiinside|noun|number|only|private|'
r'replace|reverse|scope|score|special|string|table|terminating|'
r'time|topic|warning|with)\b', Keyword, '#pop'),
(r'[%s]{1,2}>|[+=]' % _dash, Punctuation, '#pop')
],
'_directive-keyword': [
include('_directive-keyword!'),
include('value')
],
'directive-keyword?': [
include('_directive-keyword!'),
default('#pop')
],
'property-keyword*': [
include('_whitespace'),
(r'(additive|long)\b', Keyword),
default('#pop')
],
'trace-keyword?': [
include('_whitespace'),
(r'(assembly|dictionary|expressions|lines|linker|objects|off|on|'
r'symbols|tokens|verbs)\b', Keyword, '#pop'),
default('#pop')
],
# Statements
'statements': [
include('_whitespace'),
(r'\]', Punctuation, '#pop'),
(r'[;{}]', Punctuation),
(r'(box|break|continue|default|give|inversion|new_line|quit|read|'
r'remove|return|rfalse|rtrue|spaces|string|until)\b', Keyword,
'default'),
(r'(do|else)\b', Keyword),
(r'(font|style)\b', Keyword,
('default', 'miscellaneous-keyword?')),
(r'for\b', Keyword, ('for', '(?')),
(r'(if|switch|while)', Keyword,
('expression', '_expression', '(?')),
(r'(jump|save|restore)\b', Keyword, ('default', 'label?')),
(r'objectloop\b', Keyword,
('_keyword-expression', 'variable?', '(?')),
(r'print(_ret)?\b|(?=[%s])' % _dquote, Keyword, 'print-list'),
(r'\.', Name.Label, 'label?'),
(r'@', Keyword, 'opcode'),
(r'#(?![agrnw]\$|#)', Punctuation, 'directive'),
(r'<', Punctuation, 'default'),
(r'(move\b)?', Keyword,
('default', '_keyword-expression', '_expression'))
],
'miscellaneous-keyword?': [
include('_whitespace'),
(r'(bold|fixed|from|near|off|on|reverse|roman|to|underline)\b',
Keyword, '#pop'),
(r'(a|A|an|address|char|name|number|object|property|string|the|'
r'The)\b(?=(\s+|(![^%s]*))*\))' % _newline, Keyword.Pseudo,
'#pop'),
(r'%s(?=(\s+|(![^%s]*))*\))' % (_name, _newline), Name.Function,
'#pop'),
default('#pop')
],
'(?': [
include('_whitespace'),
(r'\(?', Punctuation, '#pop')
],
'for': [
include('_whitespace'),
(r';?', Punctuation, ('_for-expression', '_expression'))
],
'print-list': [
include('_whitespace'),
(r';', Punctuation, '#pop'),
(r':', Error),
default(('_list-expression', '_expression', '_list-expression', 'form'))
],
'form': [
include('_whitespace'),
(r'\(', Punctuation, ('#pop', 'miscellaneous-keyword?')),
default('#pop')
],
# Assembly
'opcode': [
include('_whitespace'),
(r'[%s]' % _dquote, String.Double, ('operands', 'plain-string')),
(_name, Keyword, 'operands')
],
'operands': [
(r':', Error),
default(('_assembly-expression', '_expression'))
]
}
def get_tokens_unprocessed(self, text):
# 'in' is either a keyword or an operator.
# If the token two tokens after 'in' is ')', 'in' is a keyword:
# objectloop(a in b)
# Otherwise, it is an operator:
# objectloop(a in b && true)
objectloop_queue = []
objectloop_token_count = -1
previous_token = None
for index, token, value in RegexLexer.get_tokens_unprocessed(self,
text):
if previous_token is Name.Variable and value == 'in':
objectloop_queue = [[index, token, value]]
objectloop_token_count = 2
elif objectloop_token_count > 0:
if token not in Comment and token not in Text:
objectloop_token_count -= 1
objectloop_queue.append((index, token, value))
else:
if objectloop_token_count == 0:
if objectloop_queue[-1][2] == ')':
objectloop_queue[0][1] = Keyword
while objectloop_queue:
yield objectloop_queue.pop(0)
objectloop_token_count = -1
yield index, token, value
if token not in Comment and token not in Text:
previous_token = token
while objectloop_queue:
yield objectloop_queue.pop(0)
class Inform7Lexer(RegexLexer):
"""
For `Inform 7 <http://inform7.com/>`_ source code.
.. versionadded:: 2.0
"""
name = 'Inform 7'
aliases = ['inform7', 'i7']
filenames = ['*.ni', '*.i7x']
flags = re.MULTILINE | re.DOTALL | re.UNICODE
_dash = Inform6Lexer._dash
_dquote = Inform6Lexer._dquote
_newline = Inform6Lexer._newline
_start = r'\A|(?<=[%s])' % _newline
# There are three variants of Inform 7, differing in how to
# interpret at signs and braces in I6T. In top-level inclusions, at
# signs in the first column are inweb syntax. In phrase definitions
# and use options, tokens in braces are treated as I7. Use options
# also interpret "{N}".
tokens = {}
token_variants = ['+i6t-not-inline', '+i6t-inline', '+i6t-use-option']
for level in token_variants:
tokens[level] = {
'+i6-root': list(Inform6Lexer.tokens['root']),
'+i6t-root': [ # For Inform6TemplateLexer
(r'[^%s]*' % Inform6Lexer._newline, Comment.Preproc,
('directive', '+p'))
],
'root': [
(r'(\|?\s)+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'[%s]' % _dquote, Generic.Heading,
('+main', '+titling', '+titling-string')),
default(('+main', '+heading?'))
],
'+titling-string': [
(r'[^%s]+' % _dquote, Generic.Heading),
(r'[%s]' % _dquote, Generic.Heading, '#pop')
],
'+titling': [
(r'\[', Comment.Multiline, '+comment'),
(r'[^%s.;:|%s]+' % (_dquote, _newline), Generic.Heading),
(r'[%s]' % _dquote, Generic.Heading, '+titling-string'),
(r'[%s]{2}|(?<=[\s%s])\|[\s%s]' % (_newline, _dquote, _dquote),
Text, ('#pop', '+heading?')),
(r'[.;:]|(?<=[\s%s])\|' % _dquote, Text, '#pop'),
(r'[|%s]' % _newline, Generic.Heading)
],
'+main': [
(r'(?i)[^%s:a\[(|%s]+' % (_dquote, _newline), Text),
(r'[%s]' % _dquote, String.Double, '+text'),
(r':', Text, '+phrase-definition'),
(r'(?i)\bas\b', Text, '+use-option'),
(r'\[', Comment.Multiline, '+comment'),
(r'(\([%s])(.*?)([%s]\))' % (_dash, _dash),
bygroups(Punctuation,
using(this, state=('+i6-root', 'directive'),
i6t='+i6t-not-inline'), Punctuation)),
(r'(%s|(?<=[\s;:.%s]))\|\s|[%s]{2,}' %
(_start, _dquote, _newline), Text, '+heading?'),
(r'(?i)[a(|%s]' % _newline, Text)
],
'+phrase-definition': [
(r'\s+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'(\([%s])(.*?)([%s]\))' % (_dash, _dash),
bygroups(Punctuation,
using(this, state=('+i6-root', 'directive',
'default', 'statements'),
i6t='+i6t-inline'), Punctuation), '#pop'),
default('#pop')
],
'+use-option': [
(r'\s+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'(\([%s])(.*?)([%s]\))' % (_dash, _dash),
bygroups(Punctuation,
using(this, state=('+i6-root', 'directive'),
i6t='+i6t-use-option'), Punctuation), '#pop'),
default('#pop')
],
'+comment': [
(r'[^\[\]]+', Comment.Multiline),
(r'\[', Comment.Multiline, '#push'),
(r'\]', Comment.Multiline, '#pop')
],
'+text': [
(r'[^\[%s]+' % _dquote, String.Double),
(r'\[.*?\]', String.Interpol),
(r'[%s]' % _dquote, String.Double, '#pop')
],
'+heading?': [
(r'(\|?\s)+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'[%s]{4}\s+' % _dash, Text, '+documentation-heading'),
(r'[%s]{1,3}' % _dash, Text),
(r'(?i)(volume|book|part|chapter|section)\b[^%s]*' % _newline,
Generic.Heading, '#pop'),
default('#pop')
],
'+documentation-heading': [
(r'\s+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'(?i)documentation\s+', Text, '+documentation-heading2'),
default('#pop')
],
'+documentation-heading2': [
(r'\s+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'[%s]{4}\s' % _dash, Text, '+documentation'),
default('#pop:2')
],
'+documentation': [
(r'(?i)(%s)\s*(chapter|example)\s*:[^%s]*' %
(_start, _newline), Generic.Heading),
(r'(?i)(%s)\s*section\s*:[^%s]*' % (_start, _newline),
Generic.Subheading),
(r'((%s)\t.*?[%s])+' % (_start, _newline),
using(this, state='+main')),
(r'[^%s\[]+|[%s\[]' % (_newline, _newline), Text),
(r'\[', Comment.Multiline, '+comment'),
],
'+i6t-not-inline': [
(r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline),
Comment.Preproc),
(r'(%s)@([%s]+|Purpose:)[^%s]*' % (_start, _dash, _newline),
Comment.Preproc),
(r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline),
Generic.Heading, '+p')
],
'+i6t-use-option': [
include('+i6t-not-inline'),
(r'({)(N)(})', bygroups(Punctuation, Text, Punctuation))
],
'+i6t-inline': [
(r'({)(\S[^}]*)?(})',
bygroups(Punctuation, using(this, state='+main'),
Punctuation))
],
'+i6t': [
(r'({[%s])(![^}]*)(}?)' % _dash,
bygroups(Punctuation, Comment.Single, Punctuation)),
(r'({[%s])(lines)(:)([^}]*)(}?)' % _dash,
bygroups(Punctuation, Keyword, Punctuation, Text,
Punctuation), '+lines'),
(r'({[%s])([^:}]*)(:?)([^}]*)(}?)' % _dash,
bygroups(Punctuation, Keyword, Punctuation, Text,
Punctuation)),
(r'(\(\+)(.*?)(\+\)|\Z)',
bygroups(Punctuation, using(this, state='+main'),
Punctuation))
],
'+p': [
(r'[^@]+', Comment.Preproc),
(r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline),
Comment.Preproc, '#pop'),
(r'(%s)@([%s]|Purpose:)' % (_start, _dash), Comment.Preproc),
(r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline),
Generic.Heading),
(r'@', Comment.Preproc)
],
'+lines': [
(r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline),
Comment.Preproc),
(r'(%s)@([%s]|Purpose:)[^%s]*' % (_start, _dash, _newline),
Comment.Preproc),
(r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline),
Generic.Heading, '+p'),
(r'(%s)@[a-zA-Z_0-9]*[ %s]' % (_start, _newline), Keyword),
(r'![^%s]*' % _newline, Comment.Single),
(r'({)([%s]endlines)(})' % _dash,
bygroups(Punctuation, Keyword, Punctuation), '#pop'),
(r'[^@!{]+?([%s]|\Z)|.' % _newline, Text)
]
}
# Inform 7 can include snippets of Inform 6 template language,
# so all of Inform6Lexer's states are copied here, with
# modifications to account for template syntax. Inform7Lexer's
# own states begin with '+' to avoid name conflicts. Some of
# Inform6Lexer's states begin with '_': these are not modified.
# They deal with template syntax either by including modified
# states, or by matching r'' then pushing to modified states.
for token in Inform6Lexer.tokens:
if token == 'root':
continue
tokens[level][token] = list(Inform6Lexer.tokens[token])
if not token.startswith('_'):
tokens[level][token][:0] = [include('+i6t'), include(level)]
def __init__(self, **options):
level = options.get('i6t', '+i6t-not-inline')
if level not in self._all_tokens:
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
class Inform6TemplateLexer(Inform7Lexer):
"""
For `Inform 6 template
<http://inform7.com/sources/src/i6template/Woven/index.html>`_ code.
.. versionadded:: 2.0
"""
name = 'Inform 6 template'
aliases = ['i6t']
filenames = ['*.i6t']
def get_tokens_unprocessed(self, text, stack=('+i6t-root',)):
return Inform7Lexer.get_tokens_unprocessed(self, text, stack)
class MqlLexer(CppLexer):
"""
For `MQL4 <http://docs.mql4.com/>`_ and
`MQL5 <http://www.mql5.com/en/docs>`_ source code.
.. versionadded:: 2.0
"""
name = 'MQL'
aliases = ['mql', 'mq4', 'mq5', 'mql4', 'mql5']
filenames = ['*.mq4', '*.mq5', '*.mqh']
mimetypes = ['text/x-mql']
tokens = {
'statements': [
(r'(input|_Digits|_Point|_LastError|_Period|_RandomSeed|'
r'_StopFlag|_Symbol|_UninitReason|'
r'Ask|Bars|Bid|Close|Digits|High|Low|Open|Point|Time|Volume)\b',
Keyword),
(r'(void|char|uchar|bool|short|ushort|int|uint|color|long|ulong|datetime|'
r'float|double|string)\b',
Keyword.Type),
(r'(Alert|CheckPointer|Comment|DebugBreak|ExpertRemove|'
r'GetPointer|GetTickCount|MessageBox|PeriodSeconds|PlaySound|'
r'Print|PrintFormat|ResetLastError|ResourceCreate|ResourceFree|'
r'ResourceReadImage|ResourceSave|SendFTP|SendMail|SendNotification|'
r'Sleep|TerminalClose|TesterStatistics|ZeroMemory|'
r'ArrayBsearch|ArrayCopy|ArrayCompare|ArrayFree|ArrayGetAsSeries|'
r'ArrayInitialize|ArrayFill|ArrayIsSeries|ArrayIsDynamic|'
r'ArrayMaximum|ArrayMinimum|ArrayRange|ArrayResize|'
r'ArraySetAsSeries|ArraySize|ArraySort|ArrayCopyRates|'
r'ArrayCopySeries|ArrayDimension|'
r'CharToString|DoubleToString|EnumToString|NormalizeDouble|'
r'StringToDouble|StringToInteger|StringToTime|TimeToString|'
r'IntegerToString|ShortToString|ShortArrayToString|'
r'StringToShortArray|CharArrayToString|StringToCharArray|'
r'ColorToARGB|ColorToString|StringToColor|StringFormat|'
r'CharToStr|DoubleToStr|StrToDouble|StrToInteger|StrToTime|TimeToStr|'
r'MathAbs|MathArccos|MathArcsin|MathArctan|MathCeil|MathCos|MathExp|'
r'MathFloor|MathLog|MathMax|MathMin|MathMod|MathPow|MathRand|'
r'MathRound|MathSin|MathSqrt|MathSrand|MathTan|MathIsValidNumber|'
r'StringAdd|StringBufferLen|StringCompare|StringConcatenate|StringFill|'
r'StringFind|StringGetCharacter|StringInit|StringLen|StringReplace|'
r'StringSetCharacter|StringSplit|StringSubstr|StringToLower|StringToUpper|'
r'StringTrimLeft|StringTrimRight|StringGetChar|StringSetChar|'
r'TimeCurrent|TimeTradeServer|TimeLocal|TimeGMT|TimeDaylightSavings|'
r'TimeGMTOffset|TimeToStruct|StructToTime|Day|DayOfWeek|DayOfYear|'
r'Hour|Minute|Month|Seconds|TimeDay|TimeDayOfWeek|TimeDayOfYear|TimeHour|'
r'TimeMinute|TimeMonth|TimeSeconds|TimeYear|Year|'
r'AccountInfoDouble|AccountInfoInteger|AccountInfoString|AccountBalance|'
r'AccountCredit|AccountCompany|AccountCurrency|AccountEquity|'
r'AccountFreeMargin|AccountFreeMarginCheck|AccountFreeMarginMode|'
r'AccountLeverage|AccountMargin|AccountName|AccountNumber|AccountProfit|'
r'AccountServer|AccountStopoutLevel|AccountStopoutMode|'
r'GetLastError|IsStopped|UninitializeReason|MQLInfoInteger|MQLInfoString|'
r'Symbol|Period|Digits|Point|IsConnected|IsDemo|IsDllsAllowed|'
r'IsExpertEnabled|IsLibrariesAllowed|IsOptimization|IsTesting|'
r'IsTradeAllowed|'
r'IsTradeContextBusy|IsVisualMode|TerminalCompany|TerminalName|'
r'TerminalPath|'
r'SymbolsTotal|SymbolName|SymbolSelect|SymbolIsSynchronized|'
r'SymbolInfoDouble|'
r'SymbolInfoInteger|SymbolInfoString|SymbolInfoTick|'
r'SymbolInfoSessionQuote|'
r'SymbolInfoSessionTrade|MarketInfo|'
r'SeriesInfoInteger|CopyRates|CopyTime|CopyOpen|'
r'CopyHigh|CopyLow|CopyClose|'
r'CopyTickVolume|CopyRealVolume|CopySpread|iBars|iBarShift|iClose|'
r'iHigh|iHighest|iLow|iLowest|iOpen|iTime|iVolume|'
r'HideTestIndicators|Period|RefreshRates|Symbol|WindowBarsPerChart|'
r'WindowExpertName|WindowFind|WindowFirstVisibleBar|WindowHandle|'
r'WindowIsVisible|WindowOnDropped|WindowPriceMax|WindowPriceMin|'
r'WindowPriceOnDropped|WindowRedraw|WindowScreenShot|'
r'WindowTimeOnDropped|WindowsTotal|WindowXOnDropped|WindowYOnDropped|'
r'OrderClose|OrderCloseBy|OrderClosePrice|OrderCloseTime|OrderComment|'
r'OrderCommission|OrderDelete|OrderExpiration|OrderLots|OrderMagicNumber|'
r'OrderModify|OrderOpenPrice|OrderOpenTime|OrderPrint|OrderProfit|'
r'OrderSelect|OrderSend|OrdersHistoryTotal|OrderStopLoss|OrdersTotal|'
r'OrderSwap|OrderSymbol|OrderTakeProfit|OrderTicket|OrderType|'
r'GlobalVariableCheck|GlobalVariableTime|'
r'GlobalVariableDel|GlobalVariableGet|GlobalVariableName|'
r'GlobalVariableSet|GlobalVariablesFlush|GlobalVariableTemp|'
r'GlobalVariableSetOnCondition|GlobalVariablesDeleteAll|'
r'GlobalVariablesTotal|GlobalVariableCheck|GlobalVariableTime|'
r'GlobalVariableDel|GlobalVariableGet|'
r'GlobalVariableName|GlobalVariableSet|GlobalVariablesFlush|'
r'GlobalVariableTemp|GlobalVariableSetOnCondition|'
r'GlobalVariablesDeleteAll|GlobalVariablesTotal|'
r'GlobalVariableCheck|GlobalVariableTime|GlobalVariableDel|'
r'GlobalVariableGet|GlobalVariableName|GlobalVariableSet|'
r'GlobalVariablesFlush|GlobalVariableTemp|'
r'GlobalVariableSetOnCondition|GlobalVariablesDeleteAll|'
r'GlobalVariablesTotal|'
r'FileFindFirst|FileFindNext|FileFindClose|FileOpen|FileDelete|'
r'FileFlush|FileGetInteger|FileIsEnding|FileIsLineEnding|'
r'FileClose|FileIsExist|FileCopy|FileMove|FileReadArray|'
r'FileReadBool|FileReadDatetime|FileReadDouble|FileReadFloat|'
r'FileReadInteger|FileReadLong|FileReadNumber|FileReadString|'
r'FileReadStruct|FileSeek|FileSize|FileTell|FileWrite|'
r'FileWriteArray|FileWriteDouble|FileWriteFloat|FileWriteInteger|'
r'FileWriteLong|FileWriteString|FileWriteStruct|FolderCreate|'
r'FolderDelete|FolderClean|FileOpenHistory|'
r'IndicatorSetDouble|IndicatorSetInteger|IndicatorSetString|'
r'SetIndexBuffer|IndicatorBuffers|IndicatorCounted|IndicatorDigits|'
r'IndicatorShortName|SetIndexArrow|SetIndexDrawBegin|'
r'SetIndexEmptyValue|SetIndexLabel|SetIndexShift|'
r'SetIndexStyle|SetLevelStyle|SetLevelValue|'
r'ObjectCreate|ObjectName|ObjectDelete|ObjectsDeleteAll|'
r'ObjectFind|ObjectGetTimeByValue|ObjectGetValueByTime|'
r'ObjectMove|ObjectsTotal|ObjectGetDouble|ObjectGetInteger|'
r'ObjectGetString|ObjectSetDouble|ObjectSetInteger|'
r'ObjectSetString|TextSetFont|TextOut|TextGetSize|'
r'ObjectDescription|ObjectGet|ObjectGetFiboDescription|'
r'ObjectGetShiftByValue|ObjectGetValueByShift|ObjectSet|'
r'ObjectSetFiboDescription|ObjectSetText|ObjectType|'
r'iAC|iAD|iADX|iAlligator|iAO|iATR|iBearsPower|'
r'iBands|iBandsOnArray|iBullsPower|iCCI|iCCIOnArray|'
r'iCustom|iDeMarker|iEnvelopes|iEnvelopesOnArray|'
r'iForce|iFractals|iGator|iIchimoku|iBWMFI|iMomentum|'
r'iMomentumOnArray|iMFI|iMA|iMAOnArray|iOsMA|iMACD|'
r'iOBV|iSAR|iRSI|iRSIOnArray|iRVI|iStdDev|iStdDevOnArray|'
r'iStochastic|iWPR|'
r'EventSetMillisecondTimer|EventSetTimer|'
r'EventKillTimer|EventChartCustom)\b', Name.Function),
(r'(CHARTEVENT_KEYDOWN|CHARTEVENT_MOUSE_MOVE|'
r'CHARTEVENT_OBJECT_CREATE|'
r'CHARTEVENT_OBJECT_CHANGE|CHARTEVENT_OBJECT_DELETE|'
r'CHARTEVENT_CLICK|'
r'CHARTEVENT_OBJECT_CLICK|CHARTEVENT_OBJECT_DRAG|'
r'CHARTEVENT_OBJECT_ENDEDIT|'
r'CHARTEVENT_CHART_CHANGE|CHARTEVENT_CUSTOM|'
r'CHARTEVENT_CUSTOM_LAST|'
r'PERIOD_CURRENT|PERIOD_M1|PERIOD_M2|PERIOD_M3|'
r'PERIOD_M4|PERIOD_M5|'
r'PERIOD_M6|PERIOD_M10|PERIOD_M12|PERIOD_M15|'
r'PERIOD_M20|PERIOD_M30|'
r'PERIOD_H1|PERIOD_H2|PERIOD_H3|PERIOD_H4|'
r'PERIOD_H6|PERIOD_H8|'
r'PERIOD_H12|PERIOD_D1|PERIOD_W1|PERIOD_MN1|'
r'CHART_IS_OBJECT|CHART_BRING_TO_TOP|'
r'CHART_MOUSE_SCROLL|CHART_EVENT_MOUSE_MOVE|'
r'CHART_EVENT_OBJECT_CREATE|'
r'CHART_EVENT_OBJECT_DELETE|CHART_MODE|CHART_FOREGROUND|'
r'CHART_SHIFT|'
r'CHART_AUTOSCROLL|CHART_SCALE|CHART_SCALEFIX|'
r'CHART_SCALEFIX_11|'
r'CHART_SCALE_PT_PER_BAR|CHART_SHOW_OHLC|'
r'CHART_SHOW_BID_LINE|'
r'CHART_SHOW_ASK_LINE|CHART_SHOW_LAST_LINE|'
r'CHART_SHOW_PERIOD_SEP|'
r'CHART_SHOW_GRID|CHART_SHOW_VOLUMES|'
r'CHART_SHOW_OBJECT_DESCR|'
r'CHART_VISIBLE_BARS|CHART_WINDOWS_TOTAL|'
r'CHART_WINDOW_IS_VISIBLE|'
r'CHART_WINDOW_HANDLE|CHART_WINDOW_YDISTANCE|'
r'CHART_FIRST_VISIBLE_BAR|'
r'CHART_WIDTH_IN_BARS|CHART_WIDTH_IN_PIXELS|'
r'CHART_HEIGHT_IN_PIXELS|'
r'CHART_COLOR_BACKGROUND|CHART_COLOR_FOREGROUND|'
r'CHART_COLOR_GRID|'
r'CHART_COLOR_VOLUME|CHART_COLOR_CHART_UP|'
r'CHART_COLOR_CHART_DOWN|'
r'CHART_COLOR_CHART_LINE|CHART_COLOR_CANDLE_BULL|'
r'CHART_COLOR_CANDLE_BEAR|'
r'CHART_COLOR_BID|CHART_COLOR_ASK|CHART_COLOR_LAST|'
r'CHART_COLOR_STOP_LEVEL|'
r'CHART_SHOW_TRADE_LEVELS|CHART_DRAG_TRADE_LEVELS|'
r'CHART_SHOW_DATE_SCALE|'
r'CHART_SHOW_PRICE_SCALE|CHART_SHIFT_SIZE|'
r'CHART_FIXED_POSITION|'
r'CHART_FIXED_MAX|CHART_FIXED_MIN|CHART_POINTS_PER_BAR|'
r'CHART_PRICE_MIN|'
r'CHART_PRICE_MAX|CHART_COMMENT|CHART_BEGIN|'
r'CHART_CURRENT_POS|CHART_END|'
r'CHART_BARS|CHART_CANDLES|CHART_LINE|CHART_VOLUME_HIDE|'
r'CHART_VOLUME_TICK|CHART_VOLUME_REAL|'
r'OBJ_VLINE|OBJ_HLINE|OBJ_TREND|OBJ_TRENDBYANGLE|OBJ_CYCLES|'
r'OBJ_CHANNEL|OBJ_STDDEVCHANNEL|OBJ_REGRESSION|OBJ_PITCHFORK|'
r'OBJ_GANNLINE|OBJ_GANNFAN|OBJ_GANNGRID|OBJ_FIBO|'
r'OBJ_FIBOTIMES|OBJ_FIBOFAN|OBJ_FIBOARC|OBJ_FIBOCHANNEL|'
r'OBJ_EXPANSION|OBJ_RECTANGLE|OBJ_TRIANGLE|OBJ_ELLIPSE|'
r'OBJ_ARROW_THUMB_UP|OBJ_ARROW_THUMB_DOWN|'
r'OBJ_ARROW_UP|OBJ_ARROW_DOWN|'
r'OBJ_ARROW_STOP|OBJ_ARROW_CHECK|OBJ_ARROW_LEFT_PRICE|'
r'OBJ_ARROW_RIGHT_PRICE|OBJ_ARROW_BUY|OBJ_ARROW_SELL|'
r'OBJ_ARROW|'
r'OBJ_TEXT|OBJ_LABEL|OBJ_BUTTON|OBJ_BITMAP|'
r'OBJ_BITMAP_LABEL|'
r'OBJ_EDIT|OBJ_EVENT|OBJ_RECTANGLE_LABEL|'
r'OBJPROP_TIME1|OBJPROP_PRICE1|OBJPROP_TIME2|'
r'OBJPROP_PRICE2|OBJPROP_TIME3|'
r'OBJPROP_PRICE3|OBJPROP_COLOR|OBJPROP_STYLE|'
r'OBJPROP_WIDTH|'
r'OBJPROP_BACK|OBJPROP_RAY|OBJPROP_ELLIPSE|'
r'OBJPROP_SCALE|'
r'OBJPROP_ANGLE|OBJPROP_ARROWCODE|OBJPROP_TIMEFRAMES|'
r'OBJPROP_DEVIATION|OBJPROP_FONTSIZE|OBJPROP_CORNER|'
r'OBJPROP_XDISTANCE|OBJPROP_YDISTANCE|OBJPROP_FIBOLEVELS|'
r'OBJPROP_LEVELCOLOR|OBJPROP_LEVELSTYLE|OBJPROP_LEVELWIDTH|'
r'OBJPROP_FIRSTLEVEL|OBJPROP_COLOR|OBJPROP_STYLE|OBJPROP_WIDTH|'
r'OBJPROP_BACK|OBJPROP_ZORDER|OBJPROP_FILL|OBJPROP_HIDDEN|'
r'OBJPROP_SELECTED|OBJPROP_READONLY|OBJPROP_TYPE|OBJPROP_TIME|'
r'OBJPROP_SELECTABLE|OBJPROP_CREATETIME|OBJPROP_LEVELS|'
r'OBJPROP_LEVELCOLOR|OBJPROP_LEVELSTYLE|OBJPROP_LEVELWIDTH|'
r'OBJPROP_ALIGN|OBJPROP_FONTSIZE|OBJPROP_RAY_RIGHT|OBJPROP_RAY|'
r'OBJPROP_ELLIPSE|OBJPROP_ARROWCODE|OBJPROP_TIMEFRAMES|OBJPROP_ANCHOR|'
r'OBJPROP_XDISTANCE|OBJPROP_YDISTANCE|OBJPROP_DRAWLINES|OBJPROP_STATE|'
r'OBJPROP_CHART_ID|OBJPROP_XSIZE|OBJPROP_YSIZE|OBJPROP_XOFFSET|'
r'OBJPROP_YOFFSET|OBJPROP_PERIOD|OBJPROP_DATE_SCALE|OBJPROP_PRICE_SCALE|'
r'OBJPROP_CHART_SCALE|OBJPROP_BGCOLOR|OBJPROP_CORNER|OBJPROP_BORDER_TYPE|'
r'OBJPROP_BORDER_COLOR|OBJPROP_PRICE|OBJPROP_LEVELVALUE|OBJPROP_SCALE|'
r'OBJPROP_ANGLE|OBJPROP_DEVIATION|'
r'OBJPROP_NAME|OBJPROP_TEXT|OBJPROP_TOOLTIP|OBJPROP_LEVELTEXT|'
r'OBJPROP_FONT|OBJPROP_BMPFILE|OBJPROP_SYMBOL|'
r'BORDER_FLAT|BORDER_RAISED|BORDER_SUNKEN|ALIGN_LEFT|ALIGN_CENTER|'
r'ALIGN_RIGHT|ANCHOR_LEFT_UPPER|ANCHOR_LEFT|ANCHOR_LEFT_LOWER|'
r'ANCHOR_LOWER|ANCHOR_RIGHT_LOWER|ANCHOR_RIGHT|ANCHOR_RIGHT_UPPER|'
r'ANCHOR_UPPER|ANCHOR_CENTER|ANCHOR_TOP|ANCHOR_BOTTOM|'
r'CORNER_LEFT_UPPER|CORNER_LEFT_LOWER|CORNER_RIGHT_LOWER|'
r'CORNER_RIGHT_UPPER|'
r'OBJ_NO_PERIODS|EMPTY|OBJ_PERIOD_M1|OBJ_PERIOD_M5|OBJ_PERIOD_M15|'
r'OBJ_PERIOD_M30|OBJ_PERIOD_H1|OBJ_PERIOD_H4|OBJ_PERIOD_D1|'
r'OBJ_PERIOD_W1|OBJ_PERIOD_MN1|OBJ_ALL_PERIODS|'
r'GANN_UP_TREND|GANN_DOWN_TREND|'
r'((clr)?(Black|DarkGreen|DarkSlateGray|Olive|'
r'Green|Teal|Navy|Purple|'
r'Maroon|Indigo|MidnightBlue|DarkBlue|'
r'DarkOliveGreen|SaddleBrown|'
r'ForestGreen|OliveDrab|SeaGreen|'
r'DarkGoldenrod|DarkSlateBlue|'
r'Sienna|MediumBlue|Brown|DarkTurquoise|'
r'DimGray|LightSeaGreen|'
r'DarkViolet|FireBrick|MediumVioletRed|'
r'MediumSeaGreen|Chocolate|'
r'Crimson|SteelBlue|Goldenrod|MediumSpringGreen|'
r'LawnGreen|CadetBlue|'
r'DarkOrchid|YellowGreen|LimeGreen|OrangeRed|'
r'DarkOrange|Orange|'
r'Gold|Yellow|Chartreuse|Lime|SpringGreen|'
r'Aqua|DeepSkyBlue|Blue|'
r'Magenta|Red|Gray|SlateGray|Peru|BlueViolet|'
r'LightSlateGray|DeepPink|'
r'MediumTurquoise|DodgerBlue|Turquoise|RoyalBlue|'
r'SlateBlue|DarkKhaki|'
r'IndianRed|MediumOrchid|GreenYellow|'
r'MediumAquamarine|DarkSeaGreen|'
r'Tomato|RosyBrown|Orchid|MediumPurple|'
r'PaleVioletRed|Coral|CornflowerBlue|'
r'DarkGray|SandyBrown|MediumSlateBlue|'
r'Tan|DarkSalmon|BurlyWood|'
r'HotPink|Salmon|Violet|LightCoral|SkyBlue|'
r'LightSalmon|Plum|'
r'Khaki|LightGreen|Aquamarine|Silver|'
r'LightSkyBlue|LightSteelBlue|'
r'LightBlue|PaleGreen|Thistle|PowderBlue|'
r'PaleGoldenrod|PaleTurquoise|'
r'LightGray|Wheat|NavajoWhite|Moccasin|'
r'LightPink|Gainsboro|PeachPuff|'
r'Pink|Bisque|LightGoldenrod|BlanchedAlmond|'
r'LemonChiffon|Beige|'
r'AntiqueWhite|PapayaWhip|Cornsilk|'
r'LightYellow|LightCyan|Linen|'
r'Lavender|MistyRose|OldLace|WhiteSmoke|'
r'Seashell|Ivory|Honeydew|'
r'AliceBlue|LavenderBlush|MintCream|Snow|White))|'
r'SYMBOL_THUMBSUP|SYMBOL_THUMBSDOWN|'
r'SYMBOL_ARROWUP|SYMBOL_ARROWDOWN|'
r'SYMBOL_STOPSIGN|SYMBOL_CHECKSIGN|'
r'SYMBOL_LEFTPRICE|SYMBOL_RIGHTPRICE|'
r'PRICE_CLOSE|PRICE_OPEN|PRICE_HIGH|PRICE_LOW|'
r'PRICE_MEDIAN|PRICE_TYPICAL|PRICE_WEIGHTED|'
r'VOLUME_TICK|VOLUME_REAL|'
r'STO_LOWHIGH|STO_CLOSECLOSE|'
r'MODE_OPEN|MODE_LOW|MODE_HIGH|MODE_CLOSE|MODE_VOLUME|MODE_TIME|'
r'MODE_SMA|MODE_EMA|MODE_SMMA|MODE_LWMA|'
r'MODE_MAIN|MODE_SIGNAL|MODE_MAIN|'
r'MODE_PLUSDI|MODE_MINUSDI|MODE_UPPER|'
r'MODE_LOWER|MODE_GATORJAW|MODE_GATORTEETH|'
r'MODE_GATORLIPS|MODE_TENKANSEN|'
r'MODE_KIJUNSEN|MODE_SENKOUSPANA|'
r'MODE_SENKOUSPANB|MODE_CHINKOUSPAN|'
r'DRAW_LINE|DRAW_SECTION|DRAW_HISTOGRAM|'
r'DRAW_ARROW|DRAW_ZIGZAG|DRAW_NONE|'
r'STYLE_SOLID|STYLE_DASH|STYLE_DOT|'
r'STYLE_DASHDOT|STYLE_DASHDOTDOT|'
r'DRAW_NONE|DRAW_LINE|DRAW_SECTION|DRAW_HISTOGRAM|'
r'DRAW_ARROW|DRAW_ZIGZAG|DRAW_FILLING|'
r'INDICATOR_DATA|INDICATOR_COLOR_INDEX|'
r'INDICATOR_CALCULATIONS|INDICATOR_DIGITS|'
r'INDICATOR_HEIGHT|INDICATOR_LEVELS|'
r'INDICATOR_LEVELCOLOR|INDICATOR_LEVELSTYLE|'
r'INDICATOR_LEVELWIDTH|INDICATOR_MINIMUM|'
r'INDICATOR_MAXIMUM|INDICATOR_LEVELVALUE|'
r'INDICATOR_SHORTNAME|INDICATOR_LEVELTEXT|'
r'TERMINAL_BUILD|TERMINAL_CONNECTED|'
r'TERMINAL_DLLS_ALLOWED|TERMINAL_TRADE_ALLOWED|'
r'TERMINAL_EMAIL_ENABLED|'
r'TERMINAL_FTP_ENABLED|TERMINAL_MAXBARS|'
r'TERMINAL_CODEPAGE|TERMINAL_CPU_CORES|'
r'TERMINAL_DISK_SPACE|TERMINAL_MEMORY_PHYSICAL|'
r'TERMINAL_MEMORY_TOTAL|'
r'TERMINAL_MEMORY_AVAILABLE|TERMINAL_MEMORY_USED|'
r'TERMINAL_X64|'
r'TERMINAL_OPENCL_SUPPORT|TERMINAL_LANGUAGE|'
r'TERMINAL_COMPANY|TERMINAL_NAME|'
r'TERMINAL_PATH|TERMINAL_DATA_PATH|'
r'TERMINAL_COMMONDATA_PATH|'
r'MQL_PROGRAM_TYPE|MQL_DLLS_ALLOWED|'
r'MQL_TRADE_ALLOWED|MQL_DEBUG|'
r'MQL_PROFILER|MQL_TESTER|MQL_OPTIMIZATION|'
r'MQL_VISUAL_MODE|'
r'MQL_FRAME_MODE|MQL_LICENSE_TYPE|MQL_PROGRAM_NAME|'
r'MQL_PROGRAM_PATH|'
r'PROGRAM_SCRIPT|PROGRAM_EXPERT|'
r'PROGRAM_INDICATOR|LICENSE_FREE|'
r'LICENSE_DEMO|LICENSE_FULL|LICENSE_TIME|'
r'MODE_LOW|MODE_HIGH|MODE_TIME|MODE_BID|'
r'MODE_ASK|MODE_POINT|'
r'MODE_DIGITS|MODE_SPREAD|MODE_STOPLEVEL|'
r'MODE_LOTSIZE|MODE_TICKVALUE|'
r'MODE_TICKSIZE|MODE_SWAPLONG|'
r'MODE_SWAPSHORT|MODE_STARTING|'
r'MODE_EXPIRATION|MODE_TRADEALLOWED|'
r'MODE_MINLOT|MODE_LOTSTEP|MODE_MAXLOT|'
r'MODE_SWAPTYPE|MODE_PROFITCALCMODE|'
r'MODE_MARGINCALCMODE|MODE_MARGININIT|'
r'MODE_MARGINMAINTENANCE|MODE_MARGINHEDGED|'
r'MODE_MARGINREQUIRED|MODE_FREEZELEVEL|'
r'SUNDAY|MONDAY|TUESDAY|WEDNESDAY|THURSDAY|'
r'FRIDAY|SATURDAY|'
r'ACCOUNT_LOGIN|ACCOUNT_TRADE_MODE|'
r'ACCOUNT_LEVERAGE|'
r'ACCOUNT_LIMIT_ORDERS|ACCOUNT_MARGIN_SO_MODE|'
r'ACCOUNT_TRADE_ALLOWED|ACCOUNT_TRADE_EXPERT|'
r'ACCOUNT_BALANCE|'
r'ACCOUNT_CREDIT|ACCOUNT_PROFIT|ACCOUNT_EQUITY|'
r'ACCOUNT_MARGIN|'
r'ACCOUNT_FREEMARGIN|ACCOUNT_MARGIN_LEVEL|'
r'ACCOUNT_MARGIN_SO_CALL|'
r'ACCOUNT_MARGIN_SO_SO|ACCOUNT_NAME|'
r'ACCOUNT_SERVER|ACCOUNT_CURRENCY|'
r'ACCOUNT_COMPANY|ACCOUNT_TRADE_MODE_DEMO|'
r'ACCOUNT_TRADE_MODE_CONTEST|'
r'ACCOUNT_TRADE_MODE_REAL|ACCOUNT_STOPOUT_MODE_PERCENT|'
r'ACCOUNT_STOPOUT_MODE_MONEY|'
r'STAT_INITIAL_DEPOSIT|STAT_WITHDRAWAL|STAT_PROFIT|'
r'STAT_GROSS_PROFIT|'
r'STAT_GROSS_LOSS|STAT_MAX_PROFITTRADE|'
r'STAT_MAX_LOSSTRADE|STAT_CONPROFITMAX|'
r'STAT_CONPROFITMAX_TRADES|STAT_MAX_CONWINS|'
r'STAT_MAX_CONPROFIT_TRADES|'
r'STAT_CONLOSSMAX|STAT_CONLOSSMAX_TRADES|'
r'STAT_MAX_CONLOSSES|'
r'STAT_MAX_CONLOSS_TRADES|STAT_BALANCEMIN|'
r'STAT_BALANCE_DD|'
r'STAT_BALANCEDD_PERCENT|STAT_BALANCE_DDREL_PERCENT|'
r'STAT_BALANCE_DD_RELATIVE|STAT_EQUITYMIN|'
r'STAT_EQUITY_DD|'
r'STAT_EQUITYDD_PERCENT|STAT_EQUITY_DDREL_PERCENT|'
r'STAT_EQUITY_DD_RELATIVE|STAT_EXPECTED_PAYOFF|'
r'STAT_PROFIT_FACTOR|'
r'STAT_RECOVERY_FACTOR|STAT_SHARPE_RATIO|'
r'STAT_MIN_MARGINLEVEL|'
r'STAT_CUSTOM_ONTESTER|STAT_DEALS|STAT_TRADES|'
r'STAT_PROFIT_TRADES|'
r'STAT_LOSS_TRADES|STAT_SHORT_TRADES|STAT_LONG_TRADES|'
r'STAT_PROFIT_SHORTTRADES|STAT_PROFIT_LONGTRADES|'
r'STAT_PROFITTRADES_AVGCON|STAT_LOSSTRADES_AVGCON|'
r'SERIES_BARS_COUNT|SERIES_FIRSTDATE|SERIES_LASTBAR_DATE|'
r'SERIES_SERVER_FIRSTDATE|SERIES_TERMINAL_FIRSTDATE|'
r'SERIES_SYNCHRONIZED|'
r'OP_BUY|OP_SELL|OP_BUYLIMIT|OP_SELLLIMIT|'
r'OP_BUYSTOP|OP_SELLSTOP|'
r'TRADE_ACTION_DEAL|TRADE_ACTION_PENDING|'
r'TRADE_ACTION_SLTP|'
r'TRADE_ACTION_MODIFY|TRADE_ACTION_REMOVE|'
r'__DATE__|__DATETIME__|__LINE__|__FILE__|'
r'__PATH__|__FUNCTION__|'
r'__FUNCSIG__|__MQLBUILD__|__MQL4BUILD__|'
r'M_E|M_LOG2E|M_LOG10E|M_LN2|M_LN10|'
r'M_PI|M_PI_2|M_PI_4|M_1_PI|'
r'M_2_PI|M_2_SQRTPI|M_SQRT2|M_SQRT1_2|'
r'CHAR_MIN|CHAR_MAX|UCHAR_MAX|'
r'SHORT_MIN|SHORT_MAX|USHORT_MAX|'
r'INT_MIN|INT_MAX|UINT_MAX|'
r'LONG_MIN|LONG_MAX|ULONG_MAX|'
r'DBL_MIN|DBL_MAX|DBL_EPSILON|DBL_DIG|DBL_MANT_DIG|'
r'DBL_MAX_10_EXP|DBL_MAX_EXP|DBL_MIN_10_EXP|DBL_MIN_EXP|'
r'FLT_MIN|FLT_MAX|FLT_EPSILON|'
r'FLT_DIG|FLT_MANT_DIG|FLT_MAX_10_EXP|'
r'FLT_MAX_EXP|FLT_MIN_10_EXP|FLT_MIN_EXP|REASON_PROGRAM'
r'REASON_REMOVE|REASON_RECOMPILE|'
r'REASON_CHARTCHANGE|REASON_CHARTCLOSE|'
r'REASON_PARAMETERS|REASON_ACCOUNT|'
r'REASON_TEMPLATE|REASON_INITFAILED|'
r'REASON_CLOSE|POINTER_INVALID'
r'POINTER_DYNAMIC|POINTER_AUTOMATIC|'
r'NULL|EMPTY|EMPTY_VALUE|CLR_NONE|WHOLE_ARRAY|'
r'CHARTS_MAX|clrNONE|EMPTY_VALUE|INVALID_HANDLE|'
r'IS_DEBUG_MODE|IS_PROFILE_MODE|NULL|WHOLE_ARRAY|WRONG_VALUE|'
r'ERR_NO_ERROR|ERR_NO_RESULT|ERR_COMMON_ERROR|'
r'ERR_INVALID_TRADE_PARAMETERS|'
r'ERR_SERVER_BUSY|ERR_OLD_VERSION|ERR_NO_CONNECTION|'
r'ERR_NOT_ENOUGH_RIGHTS|'
r'ERR_TOO_FREQUENT_REQUESTS|ERR_MALFUNCTIONAL_TRADE|'
r'ERR_ACCOUNT_DISABLED|'
r'ERR_INVALID_ACCOUNT|ERR_TRADE_TIMEOUT|'
r'ERR_INVALID_PRICE|ERR_INVALID_STOPS|'
r'ERR_INVALID_TRADE_VOLUME|ERR_MARKET_CLOSED|'
r'ERR_TRADE_DISABLED|'
r'ERR_NOT_ENOUGH_MONEY|ERR_PRICE_CHANGED|'
r'ERR_OFF_QUOTES|ERR_BROKER_BUSY|'
r'ERR_REQUOTE|ERR_ORDER_LOCKED|'
r'ERR_LONG_POSITIONS_ONLY_ALLOWED|ERR_TOO_MANY_REQUESTS|'
r'ERR_TRADE_MODIFY_DENIED|ERR_TRADE_CONTEXT_BUSY|'
r'ERR_TRADE_EXPIRATION_DENIED|'
r'ERR_TRADE_TOO_MANY_ORDERS|ERR_TRADE_HEDGE_PROHIBITED|'
r'ERR_TRADE_PROHIBITED_BY_FIFO|'
r'FILE_READ|FILE_WRITE|FILE_BIN|FILE_CSV|FILE_TXT|'
r'FILE_ANSI|FILE_UNICODE|'
r'FILE_SHARE_READ|FILE_SHARE_WRITE|FILE_REWRITE|'
r'FILE_COMMON|FILE_EXISTS|'
r'FILE_CREATE_DATE|FILE_MODIFY_DATE|'
r'FILE_ACCESS_DATE|FILE_SIZE|FILE_POSITION|'
r'FILE_END|FILE_LINE_END|FILE_IS_COMMON|'
r'FILE_IS_TEXT|FILE_IS_BINARY|'
r'FILE_IS_CSV|FILE_IS_ANSI|FILE_IS_READABLE|FILE_IS_WRITABLE|'
r'SEEK_SET|SEEK_CUR|SEEK_END|CP_ACP|'
r'CP_OEMCP|CP_MACCP|CP_THREAD_ACP|'
r'CP_SYMBOL|CP_UTF7|CP_UTF8|IDOK|IDCANCEL|IDABORT|'
r'IDRETRY|IDIGNORE|IDYES|IDNO|IDTRYAGAIN|IDCONTINUE|'
r'MB_OK|MB_OKCANCEL|MB_ABORTRETRYIGNORE|MB_YESNOCANCEL|'
r'MB_YESNO|MB_RETRYCANCEL|'
r'MB_CANCELTRYCONTINUE|MB_ICONSTOP|MB_ICONERROR|'
r'MB_ICONHAND|MB_ICONQUESTION|'
r'MB_ICONEXCLAMATION|MB_ICONWARNING|'
r'MB_ICONINFORMATION|MB_ICONASTERISK|'
r'MB_DEFBUTTON1|MB_DEFBUTTON2|MB_DEFBUTTON3|MB_DEFBUTTON4)\b',
Name.Constant),
inherit,
],
}
class SwiftLexer(ObjectiveCLexer):
"""
For `Swift <https://developer.apple.com/swift/>`_ source.
"""
name = 'Swift'
filenames = ['*.swift']
aliases = ['swift']
mimetypes = ['text/x-swift']
keywords_decl = ['class', 'deinit', 'enum', 'extension', 'func', 'import',
'init', 'let', 'protocol', 'static', 'struct', 'subscript',
'typealias', 'var']
keywords_stmt = ['break', 'case', 'continue', 'default', 'do', 'else',
'fallthrough', 'if', 'in', 'for', 'return', 'switch',
'where', 'while']
keywords_type = ['as', 'dynamicType', 'is', 'new', 'super', 'self', 'Self',
'Type', '__COLUMN__', '__FILE__', '__FUNCTION__',
'__LINE__']
keywords_resrv = ['associativity', 'didSet', 'get', 'infix', 'inout', 'left',
'mutating', 'none', 'nonmutating', 'operator', 'override',
'postfix', 'precedence', 'prefix', 'right', 'set',
'unowned', 'unowned(safe)', 'unowned(unsafe)', 'weak',
'willSet']
operators = ['->']
def get_tokens_unprocessed(self, text):
for index, token, value in \
ObjectiveCLexer.get_tokens_unprocessed(self, text):
if token is Name:
if value in self.keywords_decl:
token = Keyword
elif value in self.keywords_stmt:
token = Keyword
elif value in self.keywords_type:
token = Keyword.Type
elif value in self.keywords_resrv:
token = Keyword.Reserved
elif value in self.operators:
token = Operator
yield index, token, value
| mit |
robhudson/django | django/contrib/gis/utils/srs.py | 450 | 3123 | from django.contrib.gis.gdal import SpatialReference
from django.db import DEFAULT_DB_ALIAS, connections
def add_srs_entry(srs, auth_name='EPSG', auth_srid=None, ref_sys_name=None,
database=None):
"""
This function takes a GDAL SpatialReference system and adds its information
to the `spatial_ref_sys` table of the spatial backend. Doing this enables
database-level spatial transformations for the backend. Thus, this utility
is useful for adding spatial reference systems not included by default with
the backend:
>>> from django.contrib.gis.utils import add_srs_entry
>>> add_srs_entry(3857)
Keyword Arguments:
auth_name:
This keyword may be customized with the value of the `auth_name` field.
Defaults to 'EPSG'.
auth_srid:
This keyword may be customized with the value of the `auth_srid` field.
Defaults to the SRID determined by GDAL.
ref_sys_name:
For SpatiaLite users only, sets the value of the `ref_sys_name` field.
Defaults to the name determined by GDAL.
database:
The name of the database connection to use; the default is the value
of `django.db.DEFAULT_DB_ALIAS` (at the time of this writing, its value
is 'default').
"""
if not database:
database = DEFAULT_DB_ALIAS
connection = connections[database]
if not hasattr(connection.ops, 'spatial_version'):
raise Exception('The `add_srs_entry` utility only works '
'with spatial backends.')
if not connection.features.supports_add_srs_entry:
raise Exception('This utility does not support your database backend.')
SpatialRefSys = connection.ops.spatial_ref_sys()
# If argument is not a `SpatialReference` instance, use it as parameter
# to construct a `SpatialReference` instance.
if not isinstance(srs, SpatialReference):
srs = SpatialReference(srs)
if srs.srid is None:
raise Exception('Spatial reference requires an SRID to be '
'compatible with the spatial backend.')
# Initializing the keyword arguments dictionary for both PostGIS
# and SpatiaLite.
kwargs = {'srid': srs.srid,
'auth_name': auth_name,
'auth_srid': auth_srid or srs.srid,
'proj4text': srs.proj4,
}
# Backend-specific fields for the SpatialRefSys model.
srs_field_names = {f.name for f in SpatialRefSys._meta.get_fields()}
if 'srtext' in srs_field_names:
kwargs['srtext'] = srs.wkt
if 'ref_sys_name' in srs_field_names:
# Spatialite specific
kwargs['ref_sys_name'] = ref_sys_name or srs.name
# Creating the spatial_ref_sys model.
try:
# Try getting via SRID only, because using all kwargs may
# differ from exact wkt/proj in database.
SpatialRefSys.objects.using(database).get(srid=srs.srid)
except SpatialRefSys.DoesNotExist:
SpatialRefSys.objects.using(database).create(**kwargs)
# Alias is for backwards-compatibility purposes.
add_postgis_srs = add_srs_entry
| bsd-3-clause |
CSC301H-Fall2013/JuakStore | site-packages/tests/regressiontests/generic_relations_regress/tests.py | 89 | 2975 | from django.db.models import Q
from django.test import TestCase
from .models import (Address, Place, Restaurant, Link, CharLink, TextLink,
Person, Contact, Note, Organization, OddRelation1, OddRelation2)
class GenericRelationTests(TestCase):
def test_inherited_models_content_type(self):
"""
Test that GenericRelations on inherited classes use the correct content
type.
"""
p = Place.objects.create(name="South Park")
r = Restaurant.objects.create(name="Chubby's")
l1 = Link.objects.create(content_object=p)
l2 = Link.objects.create(content_object=r)
self.assertEqual(list(p.links.all()), [l1])
self.assertEqual(list(r.links.all()), [l2])
def test_reverse_relation_pk(self):
"""
Test that the correct column name is used for the primary key on the
originating model of a query. See #12664.
"""
p = Person.objects.create(account=23, name='Chef')
a = Address.objects.create(street='123 Anywhere Place',
city='Conifer', state='CO',
zipcode='80433', content_object=p)
qs = Person.objects.filter(addresses__zipcode='80433')
self.assertEqual(1, qs.count())
self.assertEqual('Chef', qs[0].name)
def test_charlink_delete(self):
oddrel = OddRelation1.objects.create(name='clink')
cl = CharLink.objects.create(content_object=oddrel)
oddrel.delete()
def test_textlink_delete(self):
oddrel = OddRelation2.objects.create(name='tlink')
tl = TextLink.objects.create(content_object=oddrel)
oddrel.delete()
def test_q_object_or(self):
"""
Tests that SQL query parameters for generic relations are properly
grouped when OR is used.
Test for bug http://code.djangoproject.com/ticket/11535
In this bug the first query (below) works while the second, with the
query parameters the same but in reverse order, does not.
The issue is that the generic relation conditions do not get properly
grouped in parentheses.
"""
note_contact = Contact.objects.create()
org_contact = Contact.objects.create()
note = Note.objects.create(note='note', content_object=note_contact)
org = Organization.objects.create(name='org name')
org.contacts.add(org_contact)
# search with a non-matching note and a matching org name
qs = Contact.objects.filter(Q(notes__note__icontains=r'other note') |
Q(organizations__name__icontains=r'org name'))
self.assertTrue(org_contact in qs)
# search again, with the same query parameters, in reverse order
qs = Contact.objects.filter(
Q(organizations__name__icontains=r'org name') |
Q(notes__note__icontains=r'other note'))
self.assertTrue(org_contact in qs)
| mit |
jamestwebber/scipy | scipy/io/arff/arffread.py | 1 | 26519 | # Last Change: Mon Aug 20 08:00 PM 2007 J
from __future__ import division, print_function, absolute_import
import re
import datetime
from collections import OrderedDict
import numpy as np
from scipy._lib.six import next
import csv
import ctypes
"""A module to read arff files."""
__all__ = ['MetaData', 'loadarff', 'ArffError', 'ParseArffError']
# An Arff file is basically two parts:
# - header
# - data
#
# A header has each of its components starting by @META where META is one of
# the keyword (attribute of relation, for now).
# TODO:
# - both integer and reals are treated as numeric -> the integer info
# is lost!
# - Replace ValueError by ParseError or something
# We know can handle the following:
# - numeric and nominal attributes
# - missing values for numeric attributes
r_meta = re.compile(r'^\s*@')
# Match a comment
r_comment = re.compile(r'^%')
# Match an empty line
r_empty = re.compile(r'^\s+$')
# Match a header line, that is a line which starts by @ + a word
r_headerline = re.compile(r'^\s*@\S*')
r_datameta = re.compile(r'^@[Dd][Aa][Tt][Aa]')
r_relation = re.compile(r'^@[Rr][Ee][Ll][Aa][Tt][Ii][Oo][Nn]\s*(\S*)')
r_attribute = re.compile(r'^\s*@[Aa][Tt][Tt][Rr][Ii][Bb][Uu][Tt][Ee]\s*(..*$)')
r_nominal = re.compile('{(.+)}')
r_date = re.compile(r"[Dd][Aa][Tt][Ee]\s+[\"']?(.+?)[\"']?$")
# To get attributes name enclosed with ''
r_comattrval = re.compile(r"'(..+)'\s+(..+$)")
# To get normal attributes
r_wcomattrval = re.compile(r"(\S+)\s+(..+$)")
# ------------------------
# Module defined exception
# ------------------------
class ArffError(IOError):
pass
class ParseArffError(ArffError):
pass
# ----------
# Attributes
# ----------
class Attribute(object):
type_name = None
def __init__(self, name):
self.name = name
self.range = None
self.dtype = np.object_
@classmethod
def parse_attribute(cls, name, attr_string):
"""
Parse the attribute line if it knows how. Returns the parsed
attribute, or None.
"""
return None
def parse_data(self, data_str):
"""
Parse a value of this type.
"""
return None
def __str__(self):
"""
Parse a value of this type.
"""
return self.name + ',' + self.type_name
class NominalAttribute(Attribute):
type_name = 'nominal'
def __init__(self, name, values):
super().__init__(name)
self.values = values
self.range = values
self.dtype = (np.string_, max(len(i) for i in values))
@staticmethod
def _get_nom_val(atrv):
"""Given a string containing a nominal type, returns a tuple of the
possible values.
A nominal type is defined as something framed between braces ({}).
Parameters
----------
atrv : str
Nominal type definition
Returns
-------
poss_vals : tuple
possible values
Examples
--------
>>> get_nom_val("{floup, bouga, fl, ratata}")
('floup', 'bouga', 'fl', 'ratata')
"""
m = r_nominal.match(atrv)
if m:
attrs, _ = split_data_line(m.group(1))
return tuple(attrs)
else:
raise ValueError("This does not look like a nominal string")
@classmethod
def parse_attribute(cls, name, attr_string):
"""
Parse the attribute line if it knows how. Returns the parsed
attribute, or None.
For nominal attributes, the attribute string would be like '{<attr_1>,
<attr2>, <attr_3>}'.
"""
if attr_string[0] == '{':
values = cls._get_nom_val(attr_string)
return cls(name, values)
else:
return None
def parse_data(self, data_str):
"""
Parse a value of this type.
"""
if data_str in self.values:
return data_str
elif data_str == '?':
return data_str
else:
raise ValueError("%s value not in %s" % (str(data_str),
str(self.values)))
def __str__(self):
msg = self.name + ",{"
for i in range(len(self.values)-1):
msg += self.values[i] + ","
msg += self.values[-1]
msg += "}"
return msg
class NumericAttribute(Attribute):
def __init__(self, name):
super().__init__(name)
self.type_name = 'numeric'
self.dtype = np.float_
@classmethod
def parse_attribute(cls, name, attr_string):
"""
Parse the attribute line if it knows how. Returns the parsed
attribute, or None.
For numeric attributes, the attribute string would be like
'numeric' or 'int' or 'real'.
"""
attr_string = attr_string.lower().strip()
if(attr_string[:len('numeric')] == 'numeric' or
attr_string[:len('int')] == 'int' or
attr_string[:len('real')] == 'real'):
return cls(name)
else:
return None
def parse_data(self, data_str):
"""
Parse a value of this type.
Parameters
----------
data_str : str
string to convert
Returns
-------
f : float
where float can be nan
Examples
--------
>>> atr = NumericAttribute('atr')
>>> atr.parse_data('1')
1.0
>>> atr.parse_data('1\\n')
1.0
>>> atr.parse_data('?\\n')
nan
"""
if '?' in data_str:
return np.nan
else:
return float(data_str)
def _basic_stats(self, data):
nbfac = data.size * 1. / (data.size - 1)
return (np.nanmin(data), np.nanmax(data),
np.mean(data), np.std(data) * nbfac)
class StringAttribute(Attribute):
def __init__(self, name):
super().__init__(name)
self.type_name = 'string'
@classmethod
def parse_attribute(cls, name, attr_string):
"""
Parse the attribute line if it knows how. Returns the parsed
attribute, or None.
For string attributes, the attribute string would be like
'string'.
"""
attr_string = attr_string.lower().strip()
if attr_string[:len('string')] == 'string':
return cls(name)
else:
return None
class DateAttribute(Attribute):
def __init__(self, name, date_format, datetime_unit):
super().__init__(name)
self.date_format = date_format
self.datetime_unit = datetime_unit
self.type_name = 'date'
self.range = date_format
self.dtype = np.datetime64(0, self.datetime_unit)
@staticmethod
def _get_date_format(atrv):
m = r_date.match(atrv)
if m:
pattern = m.group(1).strip()
# convert time pattern from Java's SimpleDateFormat to C's format
datetime_unit = None
if "yyyy" in pattern:
pattern = pattern.replace("yyyy", "%Y")
datetime_unit = "Y"
elif "yy":
pattern = pattern.replace("yy", "%y")
datetime_unit = "Y"
if "MM" in pattern:
pattern = pattern.replace("MM", "%m")
datetime_unit = "M"
if "dd" in pattern:
pattern = pattern.replace("dd", "%d")
datetime_unit = "D"
if "HH" in pattern:
pattern = pattern.replace("HH", "%H")
datetime_unit = "h"
if "mm" in pattern:
pattern = pattern.replace("mm", "%M")
datetime_unit = "m"
if "ss" in pattern:
pattern = pattern.replace("ss", "%S")
datetime_unit = "s"
if "z" in pattern or "Z" in pattern:
raise ValueError("Date type attributes with time zone not "
"supported, yet")
if datetime_unit is None:
raise ValueError("Invalid or unsupported date format")
return pattern, datetime_unit
else:
raise ValueError("Invalid or no date format")
@classmethod
def parse_attribute(cls, name, attr_string):
"""
Parse the attribute line if it knows how. Returns the parsed
attribute, or None.
For date attributes, the attribute string would be like
'date <format>'.
"""
attr_string_lower = attr_string.lower().strip()
if attr_string_lower[:len('date')] == 'date':
date_format, datetime_unit = cls._get_date_format(attr_string)
return cls(name, date_format, datetime_unit)
else:
return None
def parse_data(self, data_str):
"""
Parse a value of this type.
"""
date_str = data_str.strip().strip("'").strip('"')
if date_str == '?':
return np.datetime64('NaT', self.datetime_unit)
else:
dt = datetime.datetime.strptime(date_str, self.date_format)
return np.datetime64(dt).astype(
"datetime64[%s]" % self.datetime_unit)
def __str__(self):
return super(DateAttribute, self).__str__() + ',' + self.date_format
class RelationalAttribute(Attribute):
def __init__(self, name):
super().__init__(name)
self.type_name = 'relational'
self.dtype = np.object_
self.attributes = []
self.dialect = None
@classmethod
def parse_attribute(cls, name, attr_string):
"""
Parse the attribute line if it knows how. Returns the parsed
attribute, or None.
For date attributes, the attribute string would be like
'date <format>'.
"""
attr_string_lower = attr_string.lower().strip()
if attr_string_lower[:len('relational')] == 'relational':
return cls(name)
else:
return None
def parse_data(self, data_str):
# Copy-pasted
elems = list(range(len(self.attributes)))
escaped_string = data_str.encode().decode("unicode-escape")
row_tuples = []
for raw in escaped_string.split("\n"):
row, self.dialect = split_data_line(raw, self.dialect)
row_tuples.append(tuple(
[self.attributes[i].parse_data(row[i]) for i in elems]))
return np.array(row_tuples,
[(a.name, a.dtype) for a in self.attributes])
def __str__(self):
return (super(RelationalAttribute, self).__str__() + '\n\t' +
'\n\t'.join(str(a) for a in self.attributes))
# -----------------
# Various utilities
# -----------------
def to_attribute(name, attr_string):
attr_classes = (NominalAttribute, NumericAttribute, DateAttribute,
StringAttribute, RelationalAttribute)
for cls in attr_classes:
attr = cls.parse_attribute(name, attr_string)
if attr is not None:
return attr
raise ParseArffError("unknown attribute %s" % attr_string)
def csv_sniffer_has_bug_last_field():
"""
Checks if the bug https://bugs.python.org/issue30157 is unpatched.
"""
# We only compute this once.
has_bug = getattr(csv_sniffer_has_bug_last_field, "has_bug", None)
if has_bug is None:
dialect = csv.Sniffer().sniff("3, 'a'")
csv_sniffer_has_bug_last_field.has_bug = dialect.quotechar != "'"
has_bug = csv_sniffer_has_bug_last_field.has_bug
return has_bug
def workaround_csv_sniffer_bug_last_field(sniff_line, dialect, delimiters):
"""
Workaround for the bug https://bugs.python.org/issue30157 if is unpatched.
"""
if csv_sniffer_has_bug_last_field():
# Reuses code from the csv module
right_regex = r'(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'
for restr in (r'(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?",
r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # .*?",
right_regex, # ,".*?"
r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space)
regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
matches = regexp.findall(sniff_line)
if matches:
break
# If it does not match the expression that was bugged, then this bug does not apply
if restr != right_regex:
return
groupindex = regexp.groupindex
# There is only one end of the string
assert len(matches) == 1
m = matches[0]
n = groupindex['quote'] - 1
quote = m[n]
n = groupindex['delim'] - 1
delim = m[n]
n = groupindex['space'] - 1
space = bool(m[n])
dq_regexp = re.compile(
r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" %
{'delim': re.escape(delim), 'quote': quote}, re.MULTILINE
)
doublequote = bool(dq_regexp.search(sniff_line))
dialect.quotechar = quote
if delim in delimiters:
dialect.delimiter = delim
dialect.doublequote = doublequote
dialect.skipinitialspace = space
def split_data_line(line, dialect=None):
delimiters = ",\t"
# This can not be done in a per reader basis, and relational fields
# can be HUGE
csv.field_size_limit(int(ctypes.c_ulong(-1).value // 2))
# Remove the line end if any
if line[-1] == '\n':
line = line[:-1]
sniff_line = line
# Add a delimiter if none is present, so that the csv.Sniffer
# does not complain for a single-field CSV.
if not any(d in line for d in delimiters):
sniff_line += ","
if dialect is None:
dialect = csv.Sniffer().sniff(sniff_line, delimiters=delimiters)
workaround_csv_sniffer_bug_last_field(sniff_line=sniff_line,
dialect=dialect,
delimiters=delimiters)
row = next(csv.reader([line], dialect))
return row, dialect
# --------------
# Parsing header
# --------------
def tokenize_attribute(iterable, attribute):
"""Parse a raw string in header (e.g., starts by @attribute).
Given a raw string attribute, try to get the name and type of the
attribute. Constraints:
* The first line must start with @attribute (case insensitive, and
space like characters before @attribute are allowed)
* Works also if the attribute is spread on multilines.
* Works if empty lines or comments are in between
Parameters
----------
attribute : str
the attribute string.
Returns
-------
name : str
name of the attribute
value : str
value of the attribute
next : str
next line to be parsed
Examples
--------
If attribute is a string defined in python as r"floupi real", will
return floupi as name, and real as value.
>>> iterable = iter([0] * 10) # dummy iterator
>>> tokenize_attribute(iterable, r"@attribute floupi real")
('floupi', 'real', 0)
If attribute is r"'floupi 2' real", will return 'floupi 2' as name,
and real as value.
>>> tokenize_attribute(iterable, r" @attribute 'floupi 2' real ")
('floupi 2', 'real', 0)
"""
sattr = attribute.strip()
mattr = r_attribute.match(sattr)
if mattr:
# atrv is everything after @attribute
atrv = mattr.group(1)
if r_comattrval.match(atrv):
name, type = tokenize_single_comma(atrv)
next_item = next(iterable)
elif r_wcomattrval.match(atrv):
name, type = tokenize_single_wcomma(atrv)
next_item = next(iterable)
else:
# Not sure we should support this, as it does not seem supported by
# weka.
raise ValueError("multi line not supported yet")
else:
raise ValueError("First line unparsable: %s" % sattr)
attribute = to_attribute(name, type)
if type.lower() == 'relational':
next_item = read_relational_attribute(iterable, attribute, next_item)
# raise ValueError("relational attributes not supported yet")
return attribute, next_item
def tokenize_single_comma(val):
# XXX we match twice the same string (here and at the caller level). It is
# stupid, but it is easier for now...
m = r_comattrval.match(val)
if m:
try:
name = m.group(1).strip()
type = m.group(2).strip()
except IndexError:
raise ValueError("Error while tokenizing attribute")
else:
raise ValueError("Error while tokenizing single %s" % val)
return name, type
def tokenize_single_wcomma(val):
# XXX we match twice the same string (here and at the caller level). It is
# stupid, but it is easier for now...
m = r_wcomattrval.match(val)
if m:
try:
name = m.group(1).strip()
type = m.group(2).strip()
except IndexError:
raise ValueError("Error while tokenizing attribute")
else:
raise ValueError("Error while tokenizing single %s" % val)
return name, type
def read_relational_attribute(ofile, relational_attribute, i):
"""Read the nested attributes of a relational attribute"""
r_end_relational = re.compile(r'^@[Ee][Nn][Dd]\s*' +
relational_attribute.name + r'\s*$')
while not r_end_relational.match(i):
m = r_headerline.match(i)
if m:
isattr = r_attribute.match(i)
if isattr:
attr, i = tokenize_attribute(ofile, i)
relational_attribute.attributes.append(attr)
else:
raise ValueError("Error parsing line %s" % i)
else:
i = next(ofile)
i = next(ofile)
return i
def read_header(ofile):
"""Read the header of the iterable ofile."""
i = next(ofile)
# Pass first comments
while r_comment.match(i):
i = next(ofile)
# Header is everything up to DATA attribute ?
relation = None
attributes = []
while not r_datameta.match(i):
m = r_headerline.match(i)
if m:
isattr = r_attribute.match(i)
if isattr:
attr, i = tokenize_attribute(ofile, i)
attributes.append(attr)
else:
isrel = r_relation.match(i)
if isrel:
relation = isrel.group(1)
else:
raise ValueError("Error parsing line %s" % i)
i = next(ofile)
else:
i = next(ofile)
return relation, attributes
class MetaData(object):
"""Small container to keep useful information on a ARFF dataset.
Knows about attributes names and types.
Examples
--------
::
data, meta = loadarff('iris.arff')
# This will print the attributes names of the iris.arff dataset
for i in meta:
print(i)
# This works too
meta.names()
# Getting attribute type
types = meta.types()
Methods
-------
names
types
Notes
-----
Also maintains the list of attributes in order, i.e., doing for i in
meta, where meta is an instance of MetaData, will return the
different attribute names in the order they were defined.
"""
def __init__(self, rel, attr):
self.name = rel
# We need the dictionary to be ordered
self._attributes = OrderedDict((a.name, a) for a in attr)
def __repr__(self):
msg = ""
msg += "Dataset: %s\n" % self.name
for i in self._attributes:
msg += "\t%s's type is %s" % (i, self._attributes[i].type_name)
if self._attributes[i].range:
msg += ", range is %s" % str(self._attributes[i].range)
msg += '\n'
return msg
def __iter__(self):
return iter(self._attributes)
def __getitem__(self, key):
attr = self._attributes[key]
return (attr.type_name, attr.range)
def names(self):
"""Return the list of attribute names.
Returns
-------
attrnames : list of str
The attribute names.
"""
return list(self._attributes)
def types(self):
"""Return the list of attribute types.
Returns
-------
attr_types : list of str
The attribute types.
"""
attr_types = [self._attributes[name].type_name
for name in self._attributes]
return attr_types
def loadarff(f):
"""
Read an arff file.
The data is returned as a record array, which can be accessed much like
a dictionary of NumPy arrays. For example, if one of the attributes is
called 'pressure', then its first 10 data points can be accessed from the
``data`` record array like so: ``data['pressure'][0:10]``
Parameters
----------
f : file-like or str
File-like object to read from, or filename to open.
Returns
-------
data : record array
The data of the arff file, accessible by attribute names.
meta : `MetaData`
Contains information about the arff file such as name and
type of attributes, the relation (name of the dataset), etc.
Raises
------
ParseArffError
This is raised if the given file is not ARFF-formatted.
NotImplementedError
The ARFF file has an attribute which is not supported yet.
Notes
-----
This function should be able to read most arff files. Not
implemented functionality include:
* date type attributes
* string type attributes
It can read files with numeric and nominal attributes. It cannot read
files with sparse data ({} in the file). However, this function can
read files with missing data (? in the file), representing the data
points as NaNs.
Examples
--------
>>> from scipy.io import arff
>>> from io import StringIO
>>> content = \"\"\"
... @relation foo
... @attribute width numeric
... @attribute height numeric
... @attribute color {red,green,blue,yellow,black}
... @data
... 5.0,3.25,blue
... 4.5,3.75,green
... 3.0,4.00,red
... \"\"\"
>>> f = StringIO(content)
>>> data, meta = arff.loadarff(f)
>>> data
array([(5.0, 3.25, 'blue'), (4.5, 3.75, 'green'), (3.0, 4.0, 'red')],
dtype=[('width', '<f8'), ('height', '<f8'), ('color', '|S6')])
>>> meta
Dataset: foo
\twidth's type is numeric
\theight's type is numeric
\tcolor's type is nominal, range is ('red', 'green', 'blue', 'yellow', 'black')
"""
if hasattr(f, 'read'):
ofile = f
else:
ofile = open(f, 'rt')
try:
return _loadarff(ofile)
finally:
if ofile is not f: # only close what we opened
ofile.close()
def _loadarff(ofile):
# Parse the header file
try:
rel, attr = read_header(ofile)
except ValueError as e:
msg = "Error while parsing header, error was: " + str(e)
raise ParseArffError(msg)
# Check whether we have a string attribute (not supported yet)
hasstr = False
for a in attr:
if isinstance(a, StringAttribute):
hasstr = True
meta = MetaData(rel, attr)
# XXX The following code is not great
# Build the type descriptor descr and the list of convertors to convert
# each attribute to the suitable type (which should match the one in
# descr).
# This can be used once we want to support integer as integer values and
# not as numeric anymore (using masked arrays ?).
if hasstr:
# How to support string efficiently ? Ideally, we should know the max
# size of the string before allocating the numpy array.
raise NotImplementedError("String attributes not supported yet, sorry")
ni = len(attr)
def generator(row_iter, delim=','):
# TODO: this is where we are spending time (~80%). I think things
# could be made more efficiently:
# - We could for example "compile" the function, because some values
# do not change here.
# - The function to convert a line to dtyped values could also be
# generated on the fly from a string and be executed instead of
# looping.
# - The regex are overkill: for comments, checking that a line starts
# by % should be enough and faster, and for empty lines, same thing
# --> this does not seem to change anything.
# 'compiling' the range since it does not change
# Note, I have already tried zipping the converters and
# row elements and got slightly worse performance.
elems = list(range(ni))
dialect = None
for raw in row_iter:
# We do not abstract skipping comments and empty lines for
# performance reasons.
if r_comment.match(raw) or r_empty.match(raw):
continue
row, dialect = split_data_line(raw, dialect)
yield tuple([attr[i].parse_data(row[i]) for i in elems])
a = list(generator(ofile))
# No error should happen here: it is a bug otherwise
data = np.array(a, [(a.name, a.dtype) for a in attr])
return data, meta
# ----
# Misc
# ----
def basic_stats(data):
nbfac = data.size * 1. / (data.size - 1)
return np.nanmin(data), np.nanmax(data), np.mean(data), np.std(data) * nbfac
def print_attribute(name, tp, data):
type = tp.type_name
if type == 'numeric' or type == 'real' or type == 'integer':
min, max, mean, std = basic_stats(data)
print("%s,%s,%f,%f,%f,%f" % (name, type, min, max, mean, std))
else:
print(str(tp))
def test_weka(filename):
data, meta = loadarff(filename)
print(len(data.dtype))
print(data.size)
for i in meta:
print_attribute(i, meta[i], data[i])
# make sure nose does not find this as a test
test_weka.__test__ = False
if __name__ == '__main__':
import sys
filename = sys.argv[1]
test_weka(filename)
| bsd-3-clause |
np/alot | alot/helper.py | 1 | 15145 | # -*- coding: utf-8 -*-
# Copyright (C) 2011-2012 Patrick Totzke <patricktotzke@gmail.com>
# This file is released under the GNU GPL, version 3 or a later revision.
# For further details see the COPYING file
from datetime import timedelta
from datetime import datetime
from collections import deque
import subprocess
import shlex
import email
import os
import re
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
import urwid
import magic
from twisted.internet import reactor
from twisted.internet.protocol import ProcessProtocol
from twisted.internet.defer import Deferred
import StringIO
import logging
def split_commandline(s, comments=False, posix=True):
"""
splits semi-colon separated commandlines
"""
# shlex seems to remove unescaped quotes
s = s.replace('\'', '\\\'')
# encode s to utf-8 for shlex
if isinstance(s, unicode):
s = s.encode('utf-8')
lex = shlex.shlex(s, posix=posix)
lex.whitespace_split = True
lex.whitespace = ';'
if not comments:
lex.commenters = ''
return list(lex)
def split_commandstring(cmdstring):
"""
split command string into a list of strings to pass on to subprocess.Popen
and the like. This simply calls shlex.split but works also with unicode
bytestrings.
"""
if isinstance(cmdstring, unicode):
cmdstring = cmdstring.encode('utf-8', errors='ignore')
return shlex.split(cmdstring)
def safely_get(clb, E, on_error=''):
"""
returns result of :func:`clb` and falls back to `on_error`
in case exception `E` is raised.
:param clb: function to evaluate
:type clb: callable
:param E: exception to catch
:type E: Exception
:param on_error: default string returned when exception is caught
:type on_error: str
"""
try:
return clb()
except E:
return on_error
def string_sanitize(string, tab_width=8):
r"""
strips, and replaces non-printable characters
:param tab_width: number of spaces to replace tabs with. Read from
`globals.tabwidth` setting if `None`
:type tab_width: int or `None`
>>> string_sanitize(' foo\rbar ', 8)
'foobar'
>>> string_sanitize('foo\tbar', 8)
'foo bar'
>>> string_sanitize('foo\t\tbar', 8)
'foo bar'
"""
string = string.strip()
string = string.replace('\r', '')
lines = list()
for line in string.split('\n'):
tab_count = line.count('\t')
if tab_count > 0:
line_length = 0
new_line = list()
for i, chunk in enumerate(line.split('\t')):
line_length += len(chunk)
new_line.append(chunk)
if i < tab_count:
next_tab_stop_in = tab_width - (line_length % tab_width)
new_line.append(' ' * next_tab_stop_in)
line_length += next_tab_stop_in
lines.append(''.join(new_line))
else:
lines.append(line)
return '\n'.join(lines)
def string_decode(string, enc='ascii'):
"""
safely decodes string to unicode bytestring, respecting `enc` as a hint.
"""
if enc is None:
enc = 'ascii'
try:
string = unicode(string, enc, errors='replace')
except LookupError: # malformed enc string
string = string.decode('ascii', errors='replace')
except TypeError: # already unicode
pass
return string
def shorten(string, maxlen):
"""shortens string if longer than maxlen, appending ellipsis"""
if maxlen > 1 and len(string) > maxlen:
string = string[:maxlen - 1] + u'\u2026'
return string[:maxlen]
def shorten_author_string(authors_string, maxlength):
"""
Parse a list of authors concatenated as a text string (comma
separated) and smartly adjust them to maxlength.
1) If the complete list of sender names does not fit in maxlength, it
tries to shorten names by using only the first part of each.
2) If the list is still too long, hide authors according to the
following priority:
- First author is always shown (if too long is shorten with ellipsis)
- If possible, last author is also shown (if too long, uses ellipsis)
- If there are more than 2 authors in the thread, show the
maximum of them. More recent senders have higher priority.
- If it is finally necessary to hide any author, an ellipsis
between first and next authors is added.
>>> authors = u'King Kong, Mucho Muchacho, Jaime Huerta, Flash Gordon'
>>> print shorten_author_string(authors, 60)
King Kong, Mucho Muchacho, Jaime Huerta, Flash Gordon
>>> print shorten_author_string(authors, 40)
King, Mucho, Jaime, Flash
>>> print shorten_author_string(authors, 20)
King, …, Jai…, Flash
>>> print shorten_author_string(authors, 10)
King, …
>>> print shorten_author_string(authors, 2)
K…
>>> print shorten_author_string(authors, 1)
K
"""
# I will create a list of authors by parsing author_string. I use
# deque to do popleft without performance penalties
authors = deque()
# If author list is too long, it uses only the first part of each
# name (gmail style)
short_names = len(authors_string) > maxlength
for au in authors_string.split(", "):
if short_names:
author_as_list = au.split()
if len(author_as_list) > 0:
authors.append(author_as_list[0])
else:
authors.append(au)
# Author chain will contain the list of author strings to be
# concatenated using commas for the final formatted author_string.
authors_chain = deque()
if len(authors) == 0:
return u''
# reserve space for first author
first_au = shorten(authors.popleft(), maxlength)
remaining_length = maxlength - len(first_au)
# Tries to add an ellipsis if no space to show more than 1 author
if authors and maxlength > 3 and remaining_length < 3:
first_au = shorten(first_au, maxlength - 3)
remaining_length += 3
# Tries to add as more authors as possible. It takes into account
# that if any author will be hidden, and ellipsis should be added
while authors and remaining_length >= 3:
au = authors.pop()
if len(au) > 1 and (remaining_length == 3 or (authors and
remaining_length < 7)):
authors_chain.appendleft(u'\u2026')
break
else:
if authors:
# 5= ellipsis + 2 x comma and space used as separators
au_string = shorten(au, remaining_length - 5)
else:
# 2 = comma and space used as separator
au_string = shorten(au, remaining_length - 2)
remaining_length -= len(au_string) + 2
authors_chain.appendleft(au_string)
# Add the first author to the list and concatenate list
authors_chain.appendleft(first_au)
authorsstring = ', '.join(authors_chain)
return authorsstring
def pretty_datetime(d):
"""
translates :class:`datetime` `d` to a "sup-style" human readable string.
>>> now = datetime.now()
>>> now.strftime('%c')
'Sat 31 Mar 2012 14:47:26 '
>>> pretty_datetime(now)
u'just now'
>>> pretty_datetime(now - timedelta(minutes=1))
u'1min ago'
>>> pretty_datetime(now - timedelta(hours=5))
u'5h ago'
>>> pretty_datetime(now - timedelta(hours=12))
u'02:54am'
>>> pretty_datetime(now - timedelta(days=1))
u'yest 02pm'
>>> pretty_datetime(now - timedelta(days=2))
u'Thu 02pm'
>>> pretty_datetime(now - timedelta(days=7))
u'Mar 24'
>>> pretty_datetime(now - timedelta(days=356))
u'Apr 2011'
"""
ampm = d.strftime('%P')
if len(ampm):
hourfmt = '%I' + ampm
hourminfmt = '%I:%M' + ampm
else:
hourfmt = '%Hh'
hourminfmt = '%H:%M'
now = datetime.now()
today = now.date()
if d.date() == today or d > now - timedelta(hours=6):
delta = datetime.now() - d
if delta.seconds < 60:
string = 'just now'
elif delta.seconds < 3600:
string = '%dmin ago' % (delta.seconds / 60)
elif delta.seconds < 6 * 3600:
string = '%dh ago' % (delta.seconds / 3600)
else:
string = d.strftime(hourminfmt)
elif d.date() == today - timedelta(1):
string = d.strftime('yest ' + hourfmt)
elif d.date() > today - timedelta(7):
string = d.strftime('%a ' + hourfmt)
elif d.year != today.year:
string = d.strftime('%b %Y')
else:
string = d.strftime('%b %d')
return string_decode(string, 'UTF-8')
def call_cmd(cmdlist, stdin=None):
"""
get a shell commands output, error message and return value and immediately
return.
.. warning::
This returns with the first screen content for interctive commands.
:param cmdlist: shellcommand to call, already splitted into a list accepted
by :meth:`subprocess.Popen`
:type cmdlist: list of str
:param stdin: string to pipe to the process
:type stdin: str
:return: triple of stdout, error msg, return value of the shell command
:rtype: str, str, int
"""
out, err, ret = '', '', 0
try:
if stdin:
proc = subprocess.Popen(cmdlist, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate(stdin)
ret = proc.poll()
else:
out = subprocess.check_output(cmdlist)
# todo: get error msg. rval
except (subprocess.CalledProcessError, OSError), e:
err = str(e)
ret = -1
out = string_decode(out, urwid.util.detected_encoding)
err = string_decode(err, urwid.util.detected_encoding)
return out, err, ret
def call_cmd_async(cmdlist, stdin=None, env=None):
"""
get a shell commands output, error message and return value as a deferred.
:type cmdlist: list of str
:param stdin: string to pipe to the process
:type stdin: str
:return: deferred that calls back with triple of stdout, stderr and
return value of the shell command
:rtype: `twisted.internet.defer.Deferred`
"""
class _EverythingGetter(ProcessProtocol):
def __init__(self, deferred):
self.deferred = deferred
self.outBuf = StringIO.StringIO()
self.errBuf = StringIO.StringIO()
self.outReceived = self.outBuf.write
self.errReceived = self.errBuf.write
def processEnded(self, status):
termenc = urwid.util.detected_encoding
out = string_decode(self.outBuf.getvalue(), termenc)
err = string_decode(self.errBuf.getvalue(), termenc)
if status.value.exitCode == 0:
self.deferred.callback(out)
else:
terminated_obj = status.value
terminated_obj.stderr = err
self.deferred.errback(terminated_obj)
d = Deferred()
environment = os.environ
if env is not None:
environment.update(env)
logging.debug('ENV = %s' % environment)
logging.debug('CMD = %s' % cmdlist)
proc = reactor.spawnProcess(_EverythingGetter(d), executable=cmdlist[0],
env=environment,
args=cmdlist)
if stdin:
logging.debug('writing to stdin')
proc.write(stdin)
proc.closeStdin()
return d
def guess_mimetype(blob):
"""
uses file magic to determine the mime-type of the given data blob.
:param blob: file content as read by file.read()
:type blob: data
:returns: mime-type, falls back to 'application/octet-stream'
:rtype: str
"""
mimetype = 'application/octet-stream'
m = magic.open(magic.MAGIC_MIME_TYPE)
m.load()
magictype = m.buffer(blob)
# libmagic does not always return proper mimetype strings, cf. issue #459
if re.match(r'\w+\/\w+', magictype):
mimetype = magictype
return mimetype
def guess_encoding(blob):
"""
uses file magic to determine the encoding of the given data blob.
:param blob: file content as read by file.read()
:type blob: data
:returns: encoding
:rtype: str
"""
m = magic.open(magic.MAGIC_MIME_ENCODING)
m.load()
return m.buffer(blob)
# TODO: make this work on blobs, not paths
def mimewrap(path, filename=None, ctype=None):
content = open(path, 'rb').read()
ctype = ctype or guess_mimetype(content)
maintype, subtype = ctype.split('/', 1)
if maintype == 'text':
part = MIMEText(content.decode(guess_encoding(content), 'replace'),
_subtype=subtype,
_charset='utf-8')
elif maintype == 'image':
part = MIMEImage(content, _subtype=subtype)
elif maintype == 'audio':
part = MIMEAudio(content, _subtype=subtype)
else:
part = MIMEBase(maintype, subtype)
part.set_payload(content)
# Encode the payload using Base64
email.encoders.encode_base64(part)
# Set the filename parameter
if not filename:
filename = os.path.basename(path)
part.add_header('Content-Disposition', 'attachment',
filename=filename)
return part
def shell_quote(text):
r'''
>>> print(shell_quote("hello"))
'hello'
>>> print(shell_quote("hello'there"))
'hello'"'"'there'
'''
return "'%s'" % text.replace("'", """'"'"'""")
def tag_cmp(a, b):
r'''
Sorting tags using this function puts all tags of length 1 at the
beginning. This groups all tags mapped to unicode characters.
'''
if min(len(a), len(b)) == 1 and max(len(a), len(b)) > 1:
return cmp(len(a), len(b))
else:
return cmp(a.lower(), b.lower())
def humanize_size(size):
r'''
>>> humanize_size(1)
'1'
>>> humanize_size(123)
'123'
>>> humanize_size(1234)
'1K'
>>> humanize_size(1234 * 1024)
'1.2M'
>>> humanize_size(1234 * 1024 * 1024)
'1234.0M'
'''
for factor, format_string in ((1, '%i'),
(1024, '%iK'),
(1024 * 1024, '%.1fM')):
if size / factor < 1024:
return format_string % (float(size) / factor)
return format_string % (size / factor)
def parse_mailcap_nametemplate(tmplate='%s'):
"""this returns a prefix and suffix to be used
in the tempfile module for a given mailcap nametemplate string"""
nt_list = tmplate.split('%s')
template_prefix = ''
template_suffix = ''
if len(nt_list) == 2:
template_suffix = nt_list[1]
template_prefix = nt_list[0]
else:
template_suffix = tmplate
return (template_prefix, template_suffix)
| gpl-3.0 |
KaranToor/MA450 | google-cloud-sdk/.install/.backup/lib/googlecloudsdk/third_party/apis/dns/v1beta1/dns_v1beta1_client.py | 6 | 11760 | """Generated client library for dns version v1beta1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.dns.v1beta1 import dns_v1beta1_messages as messages
class DnsV1beta1(base_api.BaseApiClient):
"""Generated client library for service dns version v1beta1."""
MESSAGES_MODULE = messages
BASE_URL = u'https://www.googleapis.com/dns/v1beta1/'
_PACKAGE = u'dns'
_SCOPES = [u'https://www.googleapis.com/auth/cloud-platform', u'https://www.googleapis.com/auth/cloud-platform.read-only', u'https://www.googleapis.com/auth/ndev.clouddns.readonly', u'https://www.googleapis.com/auth/ndev.clouddns.readwrite']
_VERSION = u'v1beta1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_CLIENT_CLASS_NAME = u'DnsV1beta1'
_URL_VERSION = u'v1beta1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None):
"""Create a new dns handle."""
url = url or self.BASE_URL
super(DnsV1beta1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers)
self.changes = self.ChangesService(self)
self.managedZones = self.ManagedZonesService(self)
self.projects = self.ProjectsService(self)
self.resourceRecordSets = self.ResourceRecordSetsService(self)
class ChangesService(base_api.BaseApiService):
"""Service class for the changes resource."""
_NAME = u'changes'
def __init__(self, client):
super(DnsV1beta1.ChangesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
"""Atomically update the ResourceRecordSet collection.
Args:
request: (DnsChangesCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Change) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'dns.changes.create',
ordered_params=[u'project', u'managedZone'],
path_params=[u'managedZone', u'project'],
query_params=[],
relative_path=u'projects/{project}/managedZones/{managedZone}/changes',
request_field=u'change',
request_type_name=u'DnsChangesCreateRequest',
response_type_name=u'Change',
supports_download=False,
)
def Get(self, request, global_params=None):
"""Fetch the representation of an existing Change.
Args:
request: (DnsChangesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Change) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'dns.changes.get',
ordered_params=[u'project', u'managedZone', u'changeId'],
path_params=[u'changeId', u'managedZone', u'project'],
query_params=[],
relative_path=u'projects/{project}/managedZones/{managedZone}/changes/{changeId}',
request_field='',
request_type_name=u'DnsChangesGetRequest',
response_type_name=u'Change',
supports_download=False,
)
def List(self, request, global_params=None):
"""Enumerate Changes to a ResourceRecordSet collection.
Args:
request: (DnsChangesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ChangesListResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'dns.changes.list',
ordered_params=[u'project', u'managedZone'],
path_params=[u'managedZone', u'project'],
query_params=[u'maxResults', u'pageToken', u'sortBy', u'sortOrder'],
relative_path=u'projects/{project}/managedZones/{managedZone}/changes',
request_field='',
request_type_name=u'DnsChangesListRequest',
response_type_name=u'ChangesListResponse',
supports_download=False,
)
class ManagedZonesService(base_api.BaseApiService):
"""Service class for the managedZones resource."""
_NAME = u'managedZones'
def __init__(self, client):
super(DnsV1beta1.ManagedZonesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
"""Create a new ManagedZone.
Args:
request: (DnsManagedZonesCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ManagedZone) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'dns.managedZones.create',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[],
relative_path=u'projects/{project}/managedZones',
request_field=u'managedZone',
request_type_name=u'DnsManagedZonesCreateRequest',
response_type_name=u'ManagedZone',
supports_download=False,
)
def Delete(self, request, global_params=None):
"""Delete a previously created ManagedZone.
Args:
request: (DnsManagedZonesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DnsManagedZonesDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'dns.managedZones.delete',
ordered_params=[u'project', u'managedZone'],
path_params=[u'managedZone', u'project'],
query_params=[],
relative_path=u'projects/{project}/managedZones/{managedZone}',
request_field='',
request_type_name=u'DnsManagedZonesDeleteRequest',
response_type_name=u'DnsManagedZonesDeleteResponse',
supports_download=False,
)
def Get(self, request, global_params=None):
"""Fetch the representation of an existing ManagedZone.
Args:
request: (DnsManagedZonesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ManagedZone) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'dns.managedZones.get',
ordered_params=[u'project', u'managedZone'],
path_params=[u'managedZone', u'project'],
query_params=[],
relative_path=u'projects/{project}/managedZones/{managedZone}',
request_field='',
request_type_name=u'DnsManagedZonesGetRequest',
response_type_name=u'ManagedZone',
supports_download=False,
)
def List(self, request, global_params=None):
"""Enumerate ManagedZones that have been created but not yet deleted.
Args:
request: (DnsManagedZonesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ManagedZonesListResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'dns.managedZones.list',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[u'dnsName', u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/managedZones',
request_field='',
request_type_name=u'DnsManagedZonesListRequest',
response_type_name=u'ManagedZonesListResponse',
supports_download=False,
)
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = u'projects'
def __init__(self, client):
super(DnsV1beta1.ProjectsService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
"""Fetch the representation of an existing Project.
Args:
request: (DnsProjectsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Project) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'dns.projects.get',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[],
relative_path=u'projects/{project}',
request_field='',
request_type_name=u'DnsProjectsGetRequest',
response_type_name=u'Project',
supports_download=False,
)
class ResourceRecordSetsService(base_api.BaseApiService):
"""Service class for the resourceRecordSets resource."""
_NAME = u'resourceRecordSets'
def __init__(self, client):
super(DnsV1beta1.ResourceRecordSetsService, self).__init__(client)
self._upload_configs = {
}
def List(self, request, global_params=None):
"""Enumerate ResourceRecordSets that have been created but not yet deleted.
Args:
request: (DnsResourceRecordSetsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ResourceRecordSetsListResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'dns.resourceRecordSets.list',
ordered_params=[u'project', u'managedZone'],
path_params=[u'managedZone', u'project'],
query_params=[u'maxResults', u'name', u'pageToken', u'type'],
relative_path=u'projects/{project}/managedZones/{managedZone}/rrsets',
request_field='',
request_type_name=u'DnsResourceRecordSetsListRequest',
response_type_name=u'ResourceRecordSetsListResponse',
supports_download=False,
)
| apache-2.0 |
nathanbjenx/cairis | cairis/gui/AssetEnvironmentPanel.py | 1 | 7194 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
from cairis.core.armid import *
from PropertiesListCtrl import PropertiesListCtrl
from cairis.core.AssetEnvironmentProperties import AssetEnvironmentProperties
from EnvironmentListCtrl import EnvironmentListCtrl
from AssetEnvironmentNotebook import AssetEnvironmentNotebook
__author__ = 'Shamal Faily'
class AssetEnvironmentPanel(wx.Panel):
def __init__(self,parent,dp):
wx.Panel.__init__(self,parent,ASSET_PANELENVIRONMENT_ID)
self.dbProxy = dp
self.theAssetId = None
self.theEnvironmentDictionary = {}
self.theSelectedIdx = -1
mainSizer = wx.BoxSizer(wx.HORIZONTAL)
environmentBox = wx.StaticBox(self)
environmentListSizer = wx.StaticBoxSizer(environmentBox,wx.HORIZONTAL)
mainSizer.Add(environmentListSizer,0,wx.EXPAND)
self.environmentList = EnvironmentListCtrl(self,ASSETENVIRONMENT_LISTENVIRONMENTS_ID,self.dbProxy)
environmentListSizer.Add(self.environmentList,1,wx.EXPAND)
dimBox = wx.StaticBox(self)
environmentDimSizer = wx.StaticBoxSizer(dimBox,wx.VERTICAL)
mainSizer.Add(environmentDimSizer,1,wx.EXPAND)
nbBox = wx.StaticBox(self,-1)
nbSizer = wx.StaticBoxSizer(nbBox,wx.HORIZONTAL)
environmentDimSizer.Add(nbSizer,1,wx.EXPAND)
self.notebook = AssetEnvironmentNotebook(self,self.dbProxy)
nbSizer.Add(self.notebook,1,wx.EXPAND)
self.propertiesList = self.notebook.FindWindowById(ASSETENVIRONMENT_LISTPROPERTIES_ID)
self.associationCtrl = self.notebook.FindWindowById(ASSET_LISTASSOCIATIONS_ID)
self.SetSizer(mainSizer)
self.environmentList.Bind(wx.EVT_LIST_INSERT_ITEM,self.OnAddEnvironment)
self.environmentList.Bind(wx.EVT_LIST_DELETE_ITEM,self.OnDeleteEnvironment)
self.environmentList.Bind(wx.EVT_LIST_ITEM_SELECTED,self.OnEnvironmentSelected)
self.environmentList.Bind(wx.EVT_LIST_ITEM_DESELECTED,self.OnEnvironmentDeselected)
self.propertiesList.Disable()
self.associationCtrl.Disable()
def loadControls(self,asset):
self.theAssetId = asset.id()
self.environmentList.Unbind(wx.EVT_LIST_ITEM_SELECTED)
self.environmentList.Unbind(wx.EVT_LIST_ITEM_DESELECTED)
noOfProperties = len(asset.environmentProperties())
if (noOfProperties > 0):
environmentNames = []
for cp in asset.environmentProperties():
environmentNames.append(cp.name())
self.environmentList.load(environmentNames)
for cp in asset.environmentProperties():
environmentName = cp.name()
self.theEnvironmentDictionary[environmentName] = cp
environmentNames.append(environmentName)
environmentName = environmentNames[0]
p = self.theEnvironmentDictionary[environmentName]
self.propertiesList.setEnvironment(environmentName)
self.propertiesList.load(p.properties(),p.rationale())
self.associationCtrl.setEnvironment(environmentName)
self.associationCtrl.load(p.associations())
self.environmentList.Select(0)
self.environmentList.Bind(wx.EVT_LIST_ITEM_SELECTED,self.OnEnvironmentSelected)
self.environmentList.Bind(wx.EVT_LIST_ITEM_DESELECTED,self.OnEnvironmentDeselected)
self.propertiesList.Enable()
self.associationCtrl.Enable()
if (noOfProperties > 0):
self.theSelectedIdx = 0
def OnEnvironmentSelected(self,evt):
self.theSelectedIdx = evt.GetIndex()
environmentName = self.environmentList.GetItemText(self.theSelectedIdx)
p = self.theEnvironmentDictionary[environmentName]
self.propertiesList.setEnvironment(environmentName)
self.propertiesList.load(p.properties(),p.rationale())
self.associationCtrl.setEnvironment(environmentName)
self.associationCtrl.load(p.associations())
self.propertiesList.Enable()
self.associationCtrl.Enable()
def OnEnvironmentDeselected(self,evt):
self.theSelectedIdx = evt.GetIndex()
environmentName = self.environmentList.GetItemText(self.theSelectedIdx)
syProperties,pRationale = self.propertiesList.properties()
self.theEnvironmentDictionary[environmentName] = AssetEnvironmentProperties(environmentName,syProperties,pRationale,self.associationCtrl.dimensions())
self.propertiesList.setEnvironment('')
self.propertiesList.DeleteAllItems()
self.associationCtrl.setEnvironment('')
self.associationCtrl.DeleteAllItems()
self.propertiesList.Disable()
self.associationCtrl.Disable()
self.theSelectedIdx = -1
def OnAddEnvironment(self,evt):
self.theSelectedIdx = evt.GetIndex()
environmentName = self.environmentList.GetItemText(self.theSelectedIdx)
self.theEnvironmentDictionary[environmentName] = AssetEnvironmentProperties(environmentName,[0,0,0,0,0,0,0,0],['None','None','None','None','None','None','None','None'])
self.environmentList.Select(self.theSelectedIdx)
self.propertiesList.setEnvironment(environmentName)
self.propertiesList.DeleteAllItems()
self.associationCtrl.setEnvironment(environmentName)
self.associationCtrl.DeleteAllItems()
self.propertiesList.Enable()
self.associationCtrl.Enable()
inheritedEnv = self.environmentList.inheritedEnvironment()
if (inheritedEnv != '' and self.theAssetId != None):
p = self.dbProxy.inheritedAssetProperties(self.theAssetId,inheritedEnv)
self.theEnvironmentDictionary[environmentName] = p
self.propertiesList.setEnvironment(environmentName)
self.propertiesList.load(p.properties(),p.rationale())
self.associationCtrl.setEnvironment(environmentName)
self.associationCtrl.load(p.associations())
def OnDeleteEnvironment(self,evt):
selectedIdx = evt.GetIndex()
environmentName = self.environmentList.GetItemText(selectedIdx)
del self.theEnvironmentDictionary[environmentName]
self.theSelectedIdx = -1
self.propertiesList.setEnvironment('')
self.propertiesList.DeleteAllItems()
self.associationCtrl.setEnvironment('')
self.associationCtrl.DeleteAllItems()
self.propertiesList.Disable()
self.associationCtrl.Disable()
def environmentProperties(self):
if (self.theSelectedIdx != -1):
environmentName = self.environmentList.GetItemText(self.theSelectedIdx)
syProperties,pRationale = self.propertiesList.properties()
self.theEnvironmentDictionary[environmentName] = AssetEnvironmentProperties(environmentName,syProperties,pRationale,self.associationCtrl.dimensions())
return self.theEnvironmentDictionary.values()
| apache-2.0 |
rizumu/django | tests/gis_tests/gdal_tests/test_raster.py | 238 | 13865 | """
gdalinfo tests/gis_tests/data/rasters/raster.tif:
Driver: GTiff/GeoTIFF
Files: tests/gis_tests/data/rasters/raster.tif
Size is 163, 174
Coordinate System is:
PROJCS["NAD83 / Florida GDL Albers",
GEOGCS["NAD83",
DATUM["North_American_Datum_1983",
SPHEROID["GRS 1980",6378137,298.2572221010002,
AUTHORITY["EPSG","7019"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6269"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4269"]],
PROJECTION["Albers_Conic_Equal_Area"],
PARAMETER["standard_parallel_1",24],
PARAMETER["standard_parallel_2",31.5],
PARAMETER["latitude_of_center",24],
PARAMETER["longitude_of_center",-84],
PARAMETER["false_easting",400000],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]],
AUTHORITY["EPSG","3086"]]
Origin = (511700.468070655711927,435103.377123198588379)
Pixel Size = (100.000000000000000,-100.000000000000000)
Metadata:
AREA_OR_POINT=Area
Image Structure Metadata:
INTERLEAVE=BAND
Corner Coordinates:
Upper Left ( 511700.468, 435103.377) ( 82d51'46.16"W, 27d55' 1.53"N)
Lower Left ( 511700.468, 417703.377) ( 82d51'52.04"W, 27d45'37.50"N)
Upper Right ( 528000.468, 435103.377) ( 82d41'48.81"W, 27d54'56.30"N)
Lower Right ( 528000.468, 417703.377) ( 82d41'55.54"W, 27d45'32.28"N)
Center ( 519850.468, 426403.377) ( 82d46'50.64"W, 27d50'16.99"N)
Band 1 Block=163x50 Type=Byte, ColorInterp=Gray
NoData Value=15
"""
import os
import struct
import tempfile
import unittest
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.shortcuts import numpy
from django.utils import six
from django.utils._os import upath
from ..data.rasters.textrasters import JSON_RASTER
if HAS_GDAL:
from django.contrib.gis.gdal import GDALRaster, GDAL_VERSION
from django.contrib.gis.gdal.raster.band import GDALBand
@unittest.skipUnless(HAS_GDAL, "GDAL is required")
class GDALRasterTests(unittest.TestCase):
"""
Test a GDALRaster instance created from a file (GeoTiff).
"""
def setUp(self):
self.rs_path = os.path.join(os.path.dirname(upath(__file__)),
'../data/rasters/raster.tif')
self.rs = GDALRaster(self.rs_path)
def test_rs_name_repr(self):
self.assertEqual(self.rs_path, self.rs.name)
six.assertRegex(self, repr(self.rs), "<Raster object at 0x\w+>")
def test_rs_driver(self):
self.assertEqual(self.rs.driver.name, 'GTiff')
def test_rs_size(self):
self.assertEqual(self.rs.width, 163)
self.assertEqual(self.rs.height, 174)
def test_rs_srs(self):
self.assertEqual(self.rs.srs.srid, 3086)
self.assertEqual(self.rs.srs.units, (1.0, 'metre'))
def test_geotransform_and_friends(self):
# Assert correct values for file based raster
self.assertEqual(self.rs.geotransform,
[511700.4680706557, 100.0, 0.0, 435103.3771231986, 0.0, -100.0])
self.assertEqual(self.rs.origin, [511700.4680706557, 435103.3771231986])
self.assertEqual(self.rs.origin.x, 511700.4680706557)
self.assertEqual(self.rs.origin.y, 435103.3771231986)
self.assertEqual(self.rs.scale, [100.0, -100.0])
self.assertEqual(self.rs.scale.x, 100.0)
self.assertEqual(self.rs.scale.y, -100.0)
self.assertEqual(self.rs.skew, [0, 0])
self.assertEqual(self.rs.skew.x, 0)
self.assertEqual(self.rs.skew.y, 0)
# Create in-memory rasters and change gtvalues
rsmem = GDALRaster(JSON_RASTER)
rsmem.geotransform = range(6)
self.assertEqual(rsmem.geotransform, [float(x) for x in range(6)])
self.assertEqual(rsmem.origin, [0, 3])
self.assertEqual(rsmem.origin.x, 0)
self.assertEqual(rsmem.origin.y, 3)
self.assertEqual(rsmem.scale, [1, 5])
self.assertEqual(rsmem.scale.x, 1)
self.assertEqual(rsmem.scale.y, 5)
self.assertEqual(rsmem.skew, [2, 4])
self.assertEqual(rsmem.skew.x, 2)
self.assertEqual(rsmem.skew.y, 4)
self.assertEqual(rsmem.width, 5)
self.assertEqual(rsmem.height, 5)
def test_rs_extent(self):
self.assertEqual(self.rs.extent,
(511700.4680706557, 417703.3771231986,
528000.4680706557, 435103.3771231986))
def test_rs_bands(self):
self.assertEqual(len(self.rs.bands), 1)
self.assertIsInstance(self.rs.bands[0], GDALBand)
def test_file_based_raster_creation(self):
# Prepare tempfile
rstfile = tempfile.NamedTemporaryFile(suffix='.tif')
# Create file-based raster from scratch
GDALRaster({
'datatype': self.rs.bands[0].datatype(),
'driver': 'tif',
'name': rstfile.name,
'width': 163,
'height': 174,
'nr_of_bands': 1,
'srid': self.rs.srs.wkt,
'origin': (self.rs.origin.x, self.rs.origin.y),
'scale': (self.rs.scale.x, self.rs.scale.y),
'skew': (self.rs.skew.x, self.rs.skew.y),
'bands': [{
'data': self.rs.bands[0].data(),
'nodata_value': self.rs.bands[0].nodata_value,
}],
})
# Reload newly created raster from file
restored_raster = GDALRaster(rstfile.name)
self.assertEqual(restored_raster.srs.wkt, self.rs.srs.wkt)
self.assertEqual(restored_raster.geotransform, self.rs.geotransform)
if numpy:
numpy.testing.assert_equal(
restored_raster.bands[0].data(),
self.rs.bands[0].data()
)
else:
self.assertEqual(restored_raster.bands[0].data(), self.rs.bands[0].data())
def test_raster_warp(self):
# Create in memory raster
source = GDALRaster({
'datatype': 1,
'driver': 'MEM',
'name': 'sourceraster',
'width': 4,
'height': 4,
'nr_of_bands': 1,
'srid': 3086,
'origin': (500000, 400000),
'scale': (100, -100),
'skew': (0, 0),
'bands': [{
'data': range(16),
'nodata_value': 255,
}],
})
# Test altering the scale, width, and height of a raster
data = {
'scale': [200, -200],
'width': 2,
'height': 2,
}
target = source.warp(data)
self.assertEqual(target.width, data['width'])
self.assertEqual(target.height, data['height'])
self.assertEqual(target.scale, data['scale'])
self.assertEqual(target.bands[0].datatype(), source.bands[0].datatype())
self.assertEqual(target.name, 'sourceraster_copy.MEM')
result = target.bands[0].data()
if numpy:
result = result.flatten().tolist()
self.assertEqual(result, [5, 7, 13, 15])
# Test altering the name and datatype (to float)
data = {
'name': '/path/to/targetraster.tif',
'datatype': 6,
}
target = source.warp(data)
self.assertEqual(target.bands[0].datatype(), 6)
self.assertEqual(target.name, '/path/to/targetraster.tif')
self.assertEqual(target.driver.name, 'MEM')
result = target.bands[0].data()
if numpy:
result = result.flatten().tolist()
self.assertEqual(
result,
[0.0, 1.0, 2.0, 3.0,
4.0, 5.0, 6.0, 7.0,
8.0, 9.0, 10.0, 11.0,
12.0, 13.0, 14.0, 15.0]
)
def test_raster_transform(self):
if GDAL_VERSION < (1, 8, 1):
self.skipTest("GDAL >= 1.8.1 is required for this test")
# Prepare tempfile and nodata value
rstfile = tempfile.NamedTemporaryFile(suffix='.tif')
ndv = 99
# Create in file based raster
source = GDALRaster({
'datatype': 1,
'driver': 'tif',
'name': rstfile.name,
'width': 5,
'height': 5,
'nr_of_bands': 1,
'srid': 4326,
'origin': (-5, 5),
'scale': (2, -2),
'skew': (0, 0),
'bands': [{
'data': range(25),
'nodata_value': ndv,
}],
})
# Transform raster into srid 4326.
target = source.transform(3086)
# Reload data from disk
target = GDALRaster(target.name)
self.assertEqual(target.srs.srid, 3086)
self.assertEqual(target.width, 7)
self.assertEqual(target.height, 7)
self.assertEqual(target.bands[0].datatype(), source.bands[0].datatype())
self.assertEqual(target.origin, [9124842.791079799, 1589911.6476407414])
self.assertEqual(target.scale, [223824.82664250192, -223824.82664250192])
self.assertEqual(target.skew, [0, 0])
result = target.bands[0].data()
if numpy:
result = result.flatten().tolist()
# The reprojection of a raster that spans over a large area
# skews the data matrix and might introduce nodata values.
self.assertEqual(
result,
[
ndv, ndv, ndv, ndv, 4, ndv, ndv,
ndv, ndv, 2, 3, 9, ndv, ndv,
ndv, 1, 2, 8, 13, 19, ndv,
0, 6, 6, 12, 18, 18, 24,
ndv, 10, 11, 16, 22, 23, ndv,
ndv, ndv, 15, 21, 22, ndv, ndv,
ndv, ndv, 20, ndv, ndv, ndv, ndv,
]
)
@unittest.skipUnless(HAS_GDAL, "GDAL is required")
class GDALBandTests(unittest.TestCase):
def setUp(self):
self.rs_path = os.path.join(os.path.dirname(upath(__file__)),
'../data/rasters/raster.tif')
rs = GDALRaster(self.rs_path)
self.band = rs.bands[0]
def test_band_data(self):
self.assertEqual(self.band.width, 163)
self.assertEqual(self.band.height, 174)
self.assertEqual(self.band.description, '')
self.assertEqual(self.band.datatype(), 1)
self.assertEqual(self.band.datatype(as_string=True), 'GDT_Byte')
self.assertEqual(self.band.min, 0)
self.assertEqual(self.band.max, 255)
self.assertEqual(self.band.nodata_value, 15)
def test_read_mode_error(self):
# Open raster in read mode
rs = GDALRaster(self.rs_path, write=False)
band = rs.bands[0]
# Setting attributes in write mode raises exception in the _flush method
self.assertRaises(GDALException, setattr, band, 'nodata_value', 10)
def test_band_data_setters(self):
# Create in-memory raster and get band
rsmem = GDALRaster({
'datatype': 1,
'driver': 'MEM',
'name': 'mem_rst',
'width': 10,
'height': 10,
'nr_of_bands': 1,
'srid': 4326,
})
bandmem = rsmem.bands[0]
# Set nodata value
bandmem.nodata_value = 99
self.assertEqual(bandmem.nodata_value, 99)
# Set data for entire dataset
bandmem.data(range(100))
if numpy:
numpy.testing.assert_equal(bandmem.data(), numpy.arange(100).reshape(10, 10))
else:
self.assertEqual(bandmem.data(), list(range(100)))
# Prepare data for setting values in subsequent tests
block = list(range(100, 104))
packed_block = struct.pack('<' + 'B B B B', *block)
# Set data from list
bandmem.data(block, (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from packed block
bandmem.data(packed_block, (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from bytes
bandmem.data(bytes(packed_block), (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from bytearray
bandmem.data(bytearray(packed_block), (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from memoryview
bandmem.data(six.memoryview(packed_block), (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from numpy array
if numpy:
bandmem.data(numpy.array(block, dtype='int8').reshape(2, 2), (1, 1), (2, 2))
numpy.testing.assert_equal(
bandmem.data(offset=(1, 1), size=(2, 2)),
numpy.array(block).reshape(2, 2)
)
# Test json input data
rsmemjson = GDALRaster(JSON_RASTER)
bandmemjson = rsmemjson.bands[0]
if numpy:
numpy.testing.assert_equal(
bandmemjson.data(),
numpy.array(range(25)).reshape(5, 5)
)
else:
self.assertEqual(bandmemjson.data(), list(range(25)))
| bsd-3-clause |
apache/incubator-airflow | airflow/serialization/enums.py | 7 | 1441 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Enums for DAG serialization."""
from enum import Enum, unique
# Fields of an encoded object in serialization.
@unique
class Encoding(str, Enum):
"""Enum of encoding constants."""
TYPE = '__type'
VAR = '__var'
# Supported types for encoding. primitives and list are not encoded.
@unique
class DagAttributeTypes(str, Enum):
"""Enum of supported attribute types of DAG."""
DAG = 'dag'
OP = 'operator'
DATETIME = 'datetime'
TIMEDELTA = 'timedelta'
TIMEZONE = 'timezone'
RELATIVEDELTA = 'relativedelta'
DICT = 'dict'
SET = 'set'
TUPLE = 'tuple'
POD = 'k8s.V1Pod'
TASK_GROUP = 'taskgroup'
| apache-2.0 |
Azure/azure-sdk-for-python | sdk/netapp/azure-mgmt-netapp/azure/mgmt/netapp/aio/operations/_net_app_resource_operations.py | 1 | 12004 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NetAppResourceOperations:
"""NetAppResourceOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.netapp.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def check_name_availability(
self,
location: str,
name: str,
type: Union[str, "_models.CheckNameResourceTypes"],
resource_group: str,
**kwargs: Any
) -> "_models.CheckAvailabilityResponse":
"""Check resource name availability.
Check if a resource name is available.
:param location: The location.
:type location: str
:param name: Resource name to verify.
:type name: str
:param type: Resource type used for verification.
:type type: str or ~azure.mgmt.netapp.models.CheckNameResourceTypes
:param resource_group: Resource group name.
:type resource_group: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CheckAvailabilityResponse, or the result of cls(response)
:rtype: ~azure.mgmt.netapp.models.CheckAvailabilityResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CheckAvailabilityResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_body = _models.ResourceNameAvailabilityRequest(name=name, type=type, resource_group=resource_group)
api_version = "2021-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.check_name_availability.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'location': self._serialize.url("location", location, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'ResourceNameAvailabilityRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CheckAvailabilityResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_name_availability.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.NetApp/locations/{location}/checkNameAvailability'} # type: ignore
async def check_file_path_availability(
self,
location: str,
name: str,
subnet_id: str,
**kwargs: Any
) -> "_models.CheckAvailabilityResponse":
"""Check file path availability.
Check if a file path is available.
:param location: The location.
:type location: str
:param name: File path to verify.
:type name: str
:param subnet_id: The Azure Resource URI for a delegated subnet. Must have the delegation
Microsoft.NetApp/volumes.
:type subnet_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CheckAvailabilityResponse, or the result of cls(response)
:rtype: ~azure.mgmt.netapp.models.CheckAvailabilityResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CheckAvailabilityResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_body = _models.FilePathAvailabilityRequest(name=name, subnet_id=subnet_id)
api_version = "2021-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.check_file_path_availability.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'location': self._serialize.url("location", location, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'FilePathAvailabilityRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CheckAvailabilityResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_file_path_availability.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.NetApp/locations/{location}/checkFilePathAvailability'} # type: ignore
async def check_quota_availability(
self,
location: str,
name: str,
type: Union[str, "_models.CheckQuotaNameResourceTypes"],
resource_group: str,
**kwargs: Any
) -> "_models.CheckAvailabilityResponse":
"""Check quota availability.
Check if a quota is available.
:param location: The location.
:type location: str
:param name: Name of the resource to verify.
:type name: str
:param type: Resource type used for verification.
:type type: str or ~azure.mgmt.netapp.models.CheckQuotaNameResourceTypes
:param resource_group: Resource group name.
:type resource_group: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CheckAvailabilityResponse, or the result of cls(response)
:rtype: ~azure.mgmt.netapp.models.CheckAvailabilityResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CheckAvailabilityResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_body = _models.QuotaAvailabilityRequest(name=name, type=type, resource_group=resource_group)
api_version = "2021-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.check_quota_availability.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'location': self._serialize.url("location", location, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'QuotaAvailabilityRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CheckAvailabilityResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_quota_availability.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.NetApp/locations/{location}/checkQuotaAvailability'} # type: ignore
| mit |
CristianBB/SickRage | lib/github/PullRequestPart.py | 74 | 3771 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.Repository
import github.NamedUser
class PullRequestPart(github.GithubObject.NonCompletableGithubObject):
"""
This class represents PullRequestParts as returned for example by http://developer.github.com/v3/todo
"""
@property
def label(self):
"""
:type: string
"""
return self._label.value
@property
def ref(self):
"""
:type: string
"""
return self._ref.value
@property
def repo(self):
"""
:type: :class:`github.Repository.Repository`
"""
return self._repo.value
@property
def sha(self):
"""
:type: string
"""
return self._sha.value
@property
def user(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
return self._user.value
def _initAttributes(self):
self._label = github.GithubObject.NotSet
self._ref = github.GithubObject.NotSet
self._repo = github.GithubObject.NotSet
self._sha = github.GithubObject.NotSet
self._user = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "label" in attributes: # pragma no branch
self._label = self._makeStringAttribute(attributes["label"])
if "ref" in attributes: # pragma no branch
self._ref = self._makeStringAttribute(attributes["ref"])
if "repo" in attributes: # pragma no branch
self._repo = self._makeClassAttribute(github.Repository.Repository, attributes["repo"])
if "sha" in attributes: # pragma no branch
self._sha = self._makeStringAttribute(attributes["sha"])
if "user" in attributes: # pragma no branch
self._user = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["user"])
| gpl-3.0 |
himleyb85/django | tests/template_tests/syntax_tests/test_filter_syntax.py | 139 | 9295 | # coding: utf-8
from __future__ import unicode_literals
from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import SomeClass, SomeOtherException, UTF8Class, setup
class FilterSyntaxTests(SimpleTestCase):
@setup({'filter-syntax01': '{{ var|upper }}'})
def test_filter_syntax01(self):
"""
Basic filter usage
"""
output = self.engine.render_to_string('filter-syntax01', {"var": "Django is the greatest!"})
self.assertEqual(output, "DJANGO IS THE GREATEST!")
@setup({'filter-syntax02': '{{ var|upper|lower }}'})
def test_filter_syntax02(self):
"""
Chained filters
"""
output = self.engine.render_to_string('filter-syntax02', {"var": "Django is the greatest!"})
self.assertEqual(output, "django is the greatest!")
@setup({'filter-syntax03': '{{ var |upper }}'})
def test_filter_syntax03(self):
"""
Allow spaces before the filter pipe
"""
output = self.engine.render_to_string('filter-syntax03', {'var': 'Django is the greatest!'})
self.assertEqual(output, 'DJANGO IS THE GREATEST!')
@setup({'filter-syntax04': '{{ var| upper }}'})
def test_filter_syntax04(self):
"""
Allow spaces after the filter pipe
"""
output = self.engine.render_to_string('filter-syntax04', {'var': 'Django is the greatest!'})
self.assertEqual(output, 'DJANGO IS THE GREATEST!')
@setup({'filter-syntax05': '{{ var|does_not_exist }}'})
def test_filter_syntax05(self):
"""
Raise TemplateSyntaxError for a nonexistent filter
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('filter-syntax05')
@setup({'filter-syntax06': '{{ var|fil(ter) }}'})
def test_filter_syntax06(self):
"""
Raise TemplateSyntaxError when trying to access a filter containing
an illegal character
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('filter-syntax06')
@setup({'filter-syntax07': "{% nothing_to_see_here %}"})
def test_filter_syntax07(self):
"""
Raise TemplateSyntaxError for invalid block tags
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('filter-syntax07')
@setup({'filter-syntax08': "{% %}"})
def test_filter_syntax08(self):
"""
Raise TemplateSyntaxError for empty block tags
"""
with self.assertRaisesMessage(TemplateSyntaxError, 'Empty block tag on line 1'):
self.engine.get_template('filter-syntax08')
@setup({'filter-syntax08-multi-line': "line 1\nline 2\nline 3{% %}\nline 4\nline 5"})
def test_filter_syntax08_multi_line(self):
"""
Raise TemplateSyntaxError for empty block tags in templates with
multiple lines.
"""
with self.assertRaisesMessage(TemplateSyntaxError, 'Empty block tag on line 3'):
self.engine.get_template('filter-syntax08-multi-line')
@setup({'filter-syntax09': '{{ var|cut:"o"|upper|lower }}'})
def test_filter_syntax09(self):
"""
Chained filters, with an argument to the first one
"""
output = self.engine.render_to_string('filter-syntax09', {'var': 'Foo'})
self.assertEqual(output, 'f')
@setup({'filter-syntax10': r'{{ var|default_if_none:" endquote\" hah" }}'})
def test_filter_syntax10(self):
"""
Literal string as argument is always "safe" from auto-escaping.
"""
output = self.engine.render_to_string('filter-syntax10', {"var": None})
self.assertEqual(output, ' endquote" hah')
@setup({'filter-syntax11': r'{{ var|default_if_none:var2 }}'})
def test_filter_syntax11(self):
"""
Variable as argument
"""
output = self.engine.render_to_string('filter-syntax11', {"var": None, "var2": "happy"})
self.assertEqual(output, 'happy')
@setup({'filter-syntax12': r'{{ var|yesno:"yup,nup,mup" }} {{ var|yesno }}'})
def test_filter_syntax12(self):
"""
Default argument testing
"""
output = self.engine.render_to_string('filter-syntax12', {"var": True})
self.assertEqual(output, 'yup yes')
@setup({'filter-syntax13': r'1{{ var.method3 }}2'})
def test_filter_syntax13(self):
"""
Fail silently for methods that raise an exception with a
`silent_variable_failure` attribute
"""
output = self.engine.render_to_string('filter-syntax13', {"var": SomeClass()})
if self.engine.string_if_invalid:
self.assertEqual(output, "1INVALID2")
else:
self.assertEqual(output, "12")
@setup({'filter-syntax14': r'1{{ var.method4 }}2'})
def test_filter_syntax14(self):
"""
In methods that raise an exception without a
`silent_variable_attribute` set to True, the exception propagates
"""
with self.assertRaises(SomeOtherException):
self.engine.render_to_string('filter-syntax14', {"var": SomeClass()})
@setup({'filter-syntax15': r'{{ var|default_if_none:"foo\bar" }}'})
def test_filter_syntax15(self):
"""
Escaped backslash in argument
"""
output = self.engine.render_to_string('filter-syntax15', {"var": None})
self.assertEqual(output, r'foo\bar')
@setup({'filter-syntax16': r'{{ var|default_if_none:"foo\now" }}'})
def test_filter_syntax16(self):
"""
Escaped backslash using known escape char
"""
output = self.engine.render_to_string('filter-syntax16', {"var": None})
self.assertEqual(output, r'foo\now')
@setup({'filter-syntax17': r'{{ var|join:"" }}'})
def test_filter_syntax17(self):
"""
Empty strings can be passed as arguments to filters
"""
output = self.engine.render_to_string('filter-syntax17', {'var': ['a', 'b', 'c']})
self.assertEqual(output, 'abc')
@setup({'filter-syntax18': r'{{ var }}'})
def test_filter_syntax18(self):
"""
Make sure that any unicode strings are converted to bytestrings
in the final output.
"""
output = self.engine.render_to_string('filter-syntax18', {'var': UTF8Class()})
self.assertEqual(output, '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111')
@setup({'filter-syntax19': '{{ var|truncatewords:1 }}'})
def test_filter_syntax19(self):
"""
Numbers as filter arguments should work
"""
output = self.engine.render_to_string('filter-syntax19', {"var": "hello world"})
self.assertEqual(output, "hello ...")
@setup({'filter-syntax20': '{{ ""|default_if_none:"was none" }}'})
def test_filter_syntax20(self):
"""
Filters should accept empty string constants
"""
output = self.engine.render_to_string('filter-syntax20')
self.assertEqual(output, "")
@setup({'filter-syntax21': r'1{{ var.silent_fail_key }}2'})
def test_filter_syntax21(self):
"""
Fail silently for non-callable attribute and dict lookups which
raise an exception with a "silent_variable_failure" attribute
"""
output = self.engine.render_to_string('filter-syntax21', {"var": SomeClass()})
if self.engine.string_if_invalid:
self.assertEqual(output, "1INVALID2")
else:
self.assertEqual(output, "12")
@setup({'filter-syntax22': r'1{{ var.silent_fail_attribute }}2'})
def test_filter_syntax22(self):
"""
Fail silently for non-callable attribute and dict lookups which
raise an exception with a `silent_variable_failure` attribute
"""
output = self.engine.render_to_string('filter-syntax22', {"var": SomeClass()})
if self.engine.string_if_invalid:
self.assertEqual(output, "1INVALID2")
else:
self.assertEqual(output, "12")
@setup({'filter-syntax23': r'1{{ var.noisy_fail_key }}2'})
def test_filter_syntax23(self):
"""
In attribute and dict lookups that raise an unexpected exception
without a `silent_variable_attribute` set to True, the exception
propagates
"""
with self.assertRaises(SomeOtherException):
self.engine.render_to_string('filter-syntax23', {"var": SomeClass()})
@setup({'filter-syntax24': r'1{{ var.noisy_fail_attribute }}2'})
def test_filter_syntax24(self):
"""
In attribute and dict lookups that raise an unexpected exception
without a `silent_variable_attribute` set to True, the exception
propagates
"""
with self.assertRaises(SomeOtherException):
self.engine.render_to_string('filter-syntax24', {"var": SomeClass()})
@setup({'filter-syntax25': '{{ var.attribute_error_attribute }}'})
def test_filter_syntax25(self):
"""
#16383 - Attribute errors from an @property value should be
reraised.
"""
with self.assertRaises(AttributeError):
self.engine.render_to_string('filter-syntax25', {'var': SomeClass()})
| bsd-3-clause |
sunshineDrizzle/FreeROI | froi/widgets/binaryerosiondialog.py | 2 | 5165 |
__author__ = 'zhouguangfu, chenxiayu'
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import numpy as np
from scipy.ndimage import morphology
from froi.algorithm.meshtool import binary_shrink
class BinErosionDialog(QDialog):
"""A dialog for action of binary erosion."""
def __init__(self, model, parent=None):
super(BinErosionDialog, self).__init__(parent)
self._model = model
self._init_gui()
self._create_actions()
def _init_gui(self):
"""Initialize GUI."""
# set dialog title
self.setWindowTitle("BinaryErosion")
# initialize widgets
structure_label = QLabel("Structure")
self.structure_combo = QComboBox()
out_label = QLabel("Output name")
self.out_edit = QLineEdit()
# layout config
grid_layout = QGridLayout()
grid_layout.addWidget(structure_label, 0, 0)
grid_layout.addWidget(self.structure_combo, 0, 1)
grid_layout.addWidget(out_label, 1, 0)
grid_layout.addWidget(self.out_edit, 1, 1)
# button config
self.run_button = QPushButton("Run")
self.cancel_button = QPushButton("Cancel")
hbox_layout = QHBoxLayout()
hbox_layout.addWidget(self.run_button)
hbox_layout.addWidget(self.cancel_button)
vbox_layout = QVBoxLayout()
vbox_layout.addLayout(grid_layout)
vbox_layout.addLayout(hbox_layout)
self.setLayout(vbox_layout)
def _create_actions(self):
self.run_button.clicked.connect(self._binary_erosion)
self.cancel_button.clicked.connect(self.done)
def _binary_erosion(self):
raise NotImplementedError
class VolBinErosionDialog(BinErosionDialog):
def __init__(self, model, parent=None):
super(VolBinErosionDialog, self).__init__(model, parent)
self.index = self._model.currentIndex()
# fill output editor
source_name = self._model.data(self.index, Qt.DisplayRole)
output_name = '_'.join(['binErosion', source_name])
self.out_edit.setText(output_name)
# fill structure combo box
self.structure_combo.addItem("3x3x3")
self.structure_combo.addItem("4x4x4")
self.structure_combo.addItem("5x5x5")
self.structure_combo.addItem("6x6x6")
def _binary_erosion(self):
vol_name = str(self.out_edit.text())
num = self.structure_combo.currentIndex() + 3
structure = np.ones((num, num, num), dtype=np.int8)
if not vol_name:
self.out_edit.setFocus()
return
source_data = self._model.data(self.index, Qt.UserRole + 6)
binary_vol = source_data > self._model.data(self.index, Qt.UserRole)
new_vol = morphology.binary_erosion(binary_vol,
structure=structure)
self._model.addItem(new_vol.astype(np.int8),
name=vol_name,
header=self._model.data(self.index, Qt.UserRole + 11))
self.done(0)
class SurfBinErosionDialog(BinErosionDialog):
def __init__(self, model, parent=None):
super(SurfBinErosionDialog, self).__init__(model, parent)
self.index = self._model.current_index()
depth = self._model.index_depth(self.index)
if depth != 2:
QMessageBox.warning(self,
'Warning!',
'Get overlay failed!\nYou may have not selected any overlay!',
QMessageBox.Yes)
# raise error to prevent dialog from being created
raise RuntimeError("You may have not selected any overlay!")
# fill output editor
source_name = self._model.data(self.index, Qt.DisplayRole)
output_name = '_'.join(['binErosion', source_name])
self.out_edit.setText(output_name)
# fill structure combo box
self.structure_combo.addItem("1-ring")
self.structure_combo.addItem("2-ring")
self.structure_combo.addItem("3-ring")
self.structure_combo.addItem("4-ring")
def _binary_erosion(self):
out_name = str(self.out_edit.text())
n_ring = self.structure_combo.currentIndex() + 1
if not out_name:
self.out_edit.setFocus()
return
source_data = self._model.data(self.index, Qt.UserRole + 10)
if self._model.data(self.index, Qt.UserRole + 7):
bin_data = source_data != 0
else:
bin_data = source_data > self._model.data(self.index, Qt.UserRole)
new_data = binary_shrink(bin_data,
faces=self._model.data(self.index.parent(), Qt.UserRole + 6).faces,
n=n_ring)
self._model.add_item(self.index,
source=new_data.astype(np.int8),
colormap="blue",
islabel=True,
name=out_name)
self.done(0)
| bsd-3-clause |
savoirfairelinux/OpenUpgrade | addons/hr_gamification/wizard/grant_badge.py | 180 | 2524 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class hr_grant_badge_wizard(osv.TransientModel):
_name = 'gamification.badge.user.wizard'
_inherit = ['gamification.badge.user.wizard']
_columns = {
'employee_id': fields.many2one("hr.employee", string='Employee', required=True),
'user_id': fields.related("employee_id", "user_id",
type="many2one", relation="res.users",
store=True, string='User')
}
def action_grant_badge(self, cr, uid, ids, context=None):
"""Wizard action for sending a badge to a chosen employee"""
if context is None:
context = {}
badge_user_obj = self.pool.get('gamification.badge.user')
for wiz in self.browse(cr, uid, ids, context=context):
if not wiz.user_id:
raise osv.except_osv(_('Warning!'), _('You can send badges only to employees linked to a user.'))
if uid == wiz.user_id.id:
raise osv.except_osv(_('Warning!'), _('You can not send a badge to yourself'))
values = {
'user_id': wiz.user_id.id,
'sender_id': uid,
'badge_id': wiz.badge_id.id,
'employee_id': wiz.employee_id.id,
'comment': wiz.comment,
}
badge_user = badge_user_obj.create(cr, uid, values, context=context)
result = badge_user_obj._send_badge(cr, uid, [badge_user], context=context)
return result | agpl-3.0 |
odoousers2014/odoo | addons/point_of_sale/report/__init__.py | 8 | 1171 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import pos_users_product
import account_statement
import pos_receipt
import pos_invoice
import pos_lines
import pos_details
import pos_payment_report
import pos_report
import pos_order_report
| agpl-3.0 |
FreekingDean/home-assistant | homeassistant/helpers/event_decorators.py | 12 | 2483 | """Event Decorators for custom components."""
import functools
# pylint: disable=unused-import
from typing import Optional # NOQA
from homeassistant.core import HomeAssistant # NOQA
from homeassistant.helpers import event
HASS = None # type: Optional[HomeAssistant]
def track_state_change(entity_ids, from_state=None, to_state=None):
"""Decorator factory to track state changes for entity id."""
def track_state_change_decorator(action):
"""Decorator to track state changes."""
event.track_state_change(HASS, entity_ids,
functools.partial(action, HASS),
from_state, to_state)
return action
return track_state_change_decorator
def track_sunrise(offset=None):
"""Decorator factory to track sunrise events."""
def track_sunrise_decorator(action):
"""Decorator to track sunrise events."""
event.track_sunrise(HASS,
functools.partial(action, HASS),
offset)
return action
return track_sunrise_decorator
def track_sunset(offset=None):
"""Decorator factory to track sunset events."""
def track_sunset_decorator(action):
"""Decorator to track sunset events."""
event.track_sunset(HASS,
functools.partial(action, HASS),
offset)
return action
return track_sunset_decorator
def track_time_change(year=None, month=None, day=None, hour=None, minute=None,
second=None):
"""Decorator factory to track time changes."""
def track_time_change_decorator(action):
"""Decorator to track time changes."""
event.track_time_change(HASS,
functools.partial(action, HASS),
year, month, day, hour, minute, second)
return action
return track_time_change_decorator
def track_utc_time_change(year=None, month=None, day=None, hour=None,
minute=None, second=None):
"""Decorator factory to track time changes."""
def track_utc_time_change_decorator(action):
"""Decorator to track time changes."""
event.track_utc_time_change(HASS,
functools.partial(action, HASS),
year, month, day, hour, minute, second)
return action
return track_utc_time_change_decorator
| mit |
DingSoung/linux-3.0.1 | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
thnee/ansible | lib/ansible/modules/clustering/k8s/k8s_info.py | 28 | 5024 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Will Thames <@willthames>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: k8s_info
short_description: Describe Kubernetes (K8s) objects
version_added: "2.7"
author:
- "Will Thames (@willthames)"
description:
- Use the OpenShift Python client to perform read operations on K8s objects.
- Access to the full range of K8s APIs.
- Authenticate using either a config file, certificates, password or token.
- Supports check mode.
- This module was called C(k8s_facts) before Ansible 2.9. The usage did not change.
options:
api_version:
description:
- Use to specify the API version. in conjunction with I(kind), I(name), and I(namespace) to identify a
specific object.
default: v1
aliases:
- api
- version
kind:
description:
- Use to specify an object model. Use in conjunction with I(api_version), I(name), and I(namespace) to identify a
specific object.
required: yes
name:
description:
- Use to specify an object name. Use in conjunction with I(api_version), I(kind) and I(namespace) to identify a
specific object.
namespace:
description:
- Use to specify an object namespace. Use in conjunction with I(api_version), I(kind), and I(name)
to identify a specific object.
label_selectors:
description: List of label selectors to use to filter results
field_selectors:
description: List of field selectors to use to filter results
extends_documentation_fragment:
- k8s_auth_options
requirements:
- "python >= 2.7"
- "openshift >= 0.6"
- "PyYAML >= 3.11"
'''
EXAMPLES = '''
- name: Get an existing Service object
k8s_info:
api_version: v1
kind: Service
name: web
namespace: testing
register: web_service
- name: Get a list of all service objects
k8s_info:
api_version: v1
kind: Service
namespace: testing
register: service_list
- name: Get a list of all pods from any namespace
k8s_info:
kind: Pod
register: pod_list
- name: Search for all Pods labelled app=web
k8s_info:
kind: Pod
label_selectors:
- app = web
- tier in (dev, test)
- name: Search for all running pods
k8s_info:
kind: Pod
field_selectors:
- status.phase=Running
'''
RETURN = '''
resources:
description:
- The object(s) that exists
returned: success
type: complex
contains:
api_version:
description: The versioned schema of this representation of an object.
returned: success
type: str
kind:
description: Represents the REST resource this object represents.
returned: success
type: str
metadata:
description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
returned: success
type: dict
spec:
description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
returned: success
type: dict
status:
description: Current status details for the object.
returned: success
type: dict
'''
from ansible.module_utils.k8s.common import KubernetesAnsibleModule, AUTH_ARG_SPEC
import copy
class KubernetesInfoModule(KubernetesAnsibleModule):
def __init__(self, *args, **kwargs):
KubernetesAnsibleModule.__init__(self, *args,
supports_check_mode=True,
**kwargs)
if self._name == 'k8s_facts':
self.deprecate("The 'k8s_facts' module has been renamed to 'k8s_info'", version='2.13')
def execute_module(self):
self.client = self.get_api_client()
self.exit_json(changed=False,
**self.kubernetes_facts(self.params['kind'],
self.params['api_version'],
self.params['name'],
self.params['namespace'],
self.params['label_selectors'],
self.params['field_selectors']))
@property
def argspec(self):
args = copy.deepcopy(AUTH_ARG_SPEC)
args.update(
dict(
kind=dict(required=True),
api_version=dict(default='v1', aliases=['api', 'version']),
name=dict(),
namespace=dict(),
label_selectors=dict(type='list', default=[]),
field_selectors=dict(type='list', default=[]),
)
)
return args
def main():
KubernetesInfoModule().execute_module()
if __name__ == '__main__':
main()
| gpl-3.0 |
andersk/zulip | scripts/lib/node_cache.py | 3 | 3632 | import hashlib
import json
import os
import shutil
import subprocess
from typing import Dict, List, Optional
from scripts.lib.zulip_tools import run
ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
ZULIP_SRV_PATH = "/srv"
NODE_MODULES_CACHE_PATH = os.path.join(ZULIP_SRV_PATH, "zulip-npm-cache")
YARN_BIN = os.path.join(ZULIP_SRV_PATH, "zulip-yarn/bin/yarn")
YARN_PACKAGE_JSON = os.path.join(ZULIP_SRV_PATH, "zulip-yarn/package.json")
DEFAULT_PRODUCTION = False
def get_yarn_args(production: bool) -> List[str]:
if production:
yarn_args = ["--prod"]
else:
yarn_args = []
return yarn_args
def generate_sha1sum_node_modules(
setup_dir: Optional[str] = None,
production: bool = DEFAULT_PRODUCTION,
) -> str:
if setup_dir is None:
setup_dir = os.path.realpath(os.getcwd())
PACKAGE_JSON_FILE_PATH = os.path.join(setup_dir, "package.json")
YARN_LOCK_FILE_PATH = os.path.join(setup_dir, "yarn.lock")
data: Dict[str, object] = {}
with open(PACKAGE_JSON_FILE_PATH, "r") as f:
data[PACKAGE_JSON_FILE_PATH] = f.read().strip()
if os.path.exists(YARN_LOCK_FILE_PATH):
# For backwards compatibility, we can't assume yarn.lock exists
with open(YARN_LOCK_FILE_PATH, "r") as f:
data[YARN_LOCK_FILE_PATH] = f.read().strip()
with open(YARN_PACKAGE_JSON) as f:
data["yarn-package-version"] = json.load(f)["version"]
data["node-version"] = subprocess.check_output(
["node", "--version"], universal_newlines=True
).strip()
data["yarn-args"] = get_yarn_args(production=production)
sha1sum = hashlib.sha1()
sha1sum.update(json.dumps(data, sort_keys=True).encode("utf-8"))
return sha1sum.hexdigest()
def setup_node_modules(
production: bool = DEFAULT_PRODUCTION,
prefer_offline: bool = False,
) -> None:
yarn_args = get_yarn_args(production=production)
if prefer_offline:
yarn_args.append("--prefer-offline")
sha1sum = generate_sha1sum_node_modules(production=production)
target_path = os.path.join(NODE_MODULES_CACHE_PATH, sha1sum)
cached_node_modules = os.path.join(target_path, "node_modules")
success_stamp = os.path.join(target_path, ".success-stamp")
# Check if a cached version already exists
if not os.path.exists(success_stamp):
do_yarn_install(target_path, yarn_args, success_stamp)
print("Using cached node modules from {}".format(cached_node_modules))
if os.path.islink("node_modules"):
os.remove("node_modules")
elif os.path.isdir("node_modules"):
shutil.rmtree("node_modules")
os.symlink(cached_node_modules, "node_modules")
def do_yarn_install(
target_path: str,
yarn_args: List[str],
success_stamp: str,
) -> None:
os.makedirs(target_path, exist_ok=True)
shutil.copy("package.json", target_path)
shutil.copy("yarn.lock", target_path)
shutil.copy(".yarnrc", target_path)
cached_node_modules = os.path.join(target_path, "node_modules")
print("Cached version not found! Installing node modules.")
# Copy the existing node_modules to speed up install
if os.path.exists("node_modules") and not os.path.exists(cached_node_modules):
shutil.copytree("node_modules/", cached_node_modules, symlinks=True)
if os.environ.get("CUSTOM_CA_CERTIFICATES"):
run([YARN_BIN, "config", "set", "cafile", os.environ["CUSTOM_CA_CERTIFICATES"]])
run(
[YARN_BIN, "install", "--non-interactive", "--frozen-lockfile", *yarn_args], cwd=target_path
)
with open(success_stamp, "w"):
pass
| apache-2.0 |
rhololkeolke/apo-website | src/werkzeug/testsuite/http.py | 66 | 16213 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.http
~~~~~~~~~~~~~~~~~~~~~~~
HTTP parsing utilities.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import unittest
from datetime import datetime
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug import http, datastructures
from werkzeug.test import create_environ
class HTTPUtilityTestCase(WerkzeugTestCase):
def test_accept(self):
a = http.parse_accept_header('en-us,ru;q=0.5')
self.assert_equal(a.values(), ['en-us', 'ru'])
self.assert_equal(a.best, 'en-us')
self.assert_equal(a.find('ru'), 1)
self.assert_raises(ValueError, a.index, 'de')
self.assert_equal(a.to_header(), 'en-us,ru;q=0.5')
def test_mime_accept(self):
a = http.parse_accept_header('text/xml,application/xml,'
'application/xhtml+xml,'
'text/html;q=0.9,text/plain;q=0.8,'
'image/png,*/*;q=0.5',
datastructures.MIMEAccept)
self.assert_raises(ValueError, lambda: a['missing'])
self.assert_equal(a['image/png'], 1)
self.assert_equal(a['text/plain'], 0.8)
self.assert_equal(a['foo/bar'], 0.5)
self.assert_equal(a[a.find('foo/bar')], ('*/*', 0.5))
def test_accept_matches(self):
a = http.parse_accept_header('text/xml,application/xml,application/xhtml+xml,'
'text/html;q=0.9,text/plain;q=0.8,'
'image/png', datastructures.MIMEAccept)
self.assert_equal(a.best_match(['text/html', 'application/xhtml+xml']),
'application/xhtml+xml')
self.assert_equal(a.best_match(['text/html']), 'text/html')
self.assert_(a.best_match(['foo/bar']) is None)
self.assert_equal(a.best_match(['foo/bar', 'bar/foo'],
default='foo/bar'), 'foo/bar')
self.assert_equal(a.best_match(['application/xml', 'text/xml']), 'application/xml')
def test_charset_accept(self):
a = http.parse_accept_header('ISO-8859-1,utf-8;q=0.7,*;q=0.7',
datastructures.CharsetAccept)
self.assert_equal(a['iso-8859-1'], a['iso8859-1'])
self.assert_equal(a['iso-8859-1'], 1)
self.assert_equal(a['UTF8'], 0.7)
self.assert_equal(a['ebcdic'], 0.7)
def test_language_accept(self):
a = http.parse_accept_header('de-AT,de;q=0.8,en;q=0.5',
datastructures.LanguageAccept)
self.assert_equal(a.best, 'de-AT')
self.assert_('de_AT' in a)
self.assert_('en' in a)
self.assert_equal(a['de-at'], 1)
self.assert_equal(a['en'], 0.5)
def test_set_header(self):
hs = http.parse_set_header('foo, Bar, "Blah baz", Hehe')
self.assert_('blah baz' in hs)
self.assert_('foobar' not in hs)
self.assert_('foo' in hs)
self.assert_equal(list(hs), ['foo', 'Bar', 'Blah baz', 'Hehe'])
hs.add('Foo')
self.assert_equal(hs.to_header(), 'foo, Bar, "Blah baz", Hehe')
def test_list_header(self):
hl = http.parse_list_header('foo baz, blah')
self.assert_equal(hl, ['foo baz', 'blah'])
def test_dict_header(self):
d = http.parse_dict_header('foo="bar baz", blah=42')
self.assert_equal(d, {'foo': 'bar baz', 'blah': '42'})
def test_cache_control_header(self):
cc = http.parse_cache_control_header('max-age=0, no-cache')
assert cc.max_age == 0
assert cc.no_cache
cc = http.parse_cache_control_header('private, community="UCI"', None,
datastructures.ResponseCacheControl)
assert cc.private
assert cc['community'] == 'UCI'
c = datastructures.ResponseCacheControl()
assert c.no_cache is None
assert c.private is None
c.no_cache = True
assert c.no_cache == '*'
c.private = True
assert c.private == '*'
del c.private
assert c.private is None
assert c.to_header() == 'no-cache'
def test_authorization_header(self):
a = http.parse_authorization_header('Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==')
assert a.type == 'basic'
assert a.username == 'Aladdin'
assert a.password == 'open sesame'
a = http.parse_authorization_header('''Digest username="Mufasa",
realm="testrealm@host.invalid",
nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093",
uri="/dir/index.html",
qop=auth,
nc=00000001,
cnonce="0a4f113b",
response="6629fae49393a05397450978507c4ef1",
opaque="5ccc069c403ebaf9f0171e9517f40e41"''')
assert a.type == 'digest'
assert a.username == 'Mufasa'
assert a.realm == 'testrealm@host.invalid'
assert a.nonce == 'dcd98b7102dd2f0e8b11d0f600bfb0c093'
assert a.uri == '/dir/index.html'
assert 'auth' in a.qop
assert a.nc == '00000001'
assert a.cnonce == '0a4f113b'
assert a.response == '6629fae49393a05397450978507c4ef1'
assert a.opaque == '5ccc069c403ebaf9f0171e9517f40e41'
a = http.parse_authorization_header('''Digest username="Mufasa",
realm="testrealm@host.invalid",
nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093",
uri="/dir/index.html",
response="e257afa1414a3340d93d30955171dd0e",
opaque="5ccc069c403ebaf9f0171e9517f40e41"''')
assert a.type == 'digest'
assert a.username == 'Mufasa'
assert a.realm == 'testrealm@host.invalid'
assert a.nonce == 'dcd98b7102dd2f0e8b11d0f600bfb0c093'
assert a.uri == '/dir/index.html'
assert a.response == 'e257afa1414a3340d93d30955171dd0e'
assert a.opaque == '5ccc069c403ebaf9f0171e9517f40e41'
assert http.parse_authorization_header('') is None
assert http.parse_authorization_header(None) is None
assert http.parse_authorization_header('foo') is None
def test_www_authenticate_header(self):
wa = http.parse_www_authenticate_header('Basic realm="WallyWorld"')
assert wa.type == 'basic'
assert wa.realm == 'WallyWorld'
wa.realm = 'Foo Bar'
assert wa.to_header() == 'Basic realm="Foo Bar"'
wa = http.parse_www_authenticate_header('''Digest
realm="testrealm@host.com",
qop="auth,auth-int",
nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093",
opaque="5ccc069c403ebaf9f0171e9517f40e41"''')
assert wa.type == 'digest'
assert wa.realm == 'testrealm@host.com'
assert 'auth' in wa.qop
assert 'auth-int' in wa.qop
assert wa.nonce == 'dcd98b7102dd2f0e8b11d0f600bfb0c093'
assert wa.opaque == '5ccc069c403ebaf9f0171e9517f40e41'
wa = http.parse_www_authenticate_header('broken')
assert wa.type == 'broken'
assert not http.parse_www_authenticate_header('').type
assert not http.parse_www_authenticate_header('')
def test_etags(self):
assert http.quote_etag('foo') == '"foo"'
assert http.quote_etag('foo', True) == 'w/"foo"'
assert http.unquote_etag('"foo"') == ('foo', False)
assert http.unquote_etag('w/"foo"') == ('foo', True)
es = http.parse_etags('"foo", "bar", w/"baz", blar')
assert sorted(es) == ['bar', 'blar', 'foo']
assert 'foo' in es
assert 'baz' not in es
assert es.contains_weak('baz')
assert 'blar' in es
assert es.contains_raw('w/"baz"')
assert es.contains_raw('"foo"')
assert sorted(es.to_header().split(', ')) == ['"bar"', '"blar"', '"foo"', 'w/"baz"']
def test_parse_date(self):
assert http.parse_date('Sun, 06 Nov 1994 08:49:37 GMT ') == datetime(1994, 11, 6, 8, 49, 37)
assert http.parse_date('Sunday, 06-Nov-94 08:49:37 GMT') == datetime(1994, 11, 6, 8, 49, 37)
assert http.parse_date(' Sun Nov 6 08:49:37 1994') == datetime(1994, 11, 6, 8, 49, 37)
assert http.parse_date('foo') is None
def test_parse_date_overflows(self):
assert http.parse_date(' Sun 02 Feb 1343 08:49:37 GMT') == datetime(1343, 2, 2, 8, 49, 37)
assert http.parse_date('Thu, 01 Jan 1970 00:00:00 GMT') == datetime(1970, 1, 1, 0, 0)
assert http.parse_date('Thu, 33 Jan 1970 00:00:00 GMT') is None
def test_remove_entity_headers(self):
now = http.http_date()
headers1 = [('Date', now), ('Content-Type', 'text/html'), ('Content-Length', '0')]
headers2 = datastructures.Headers(headers1)
http.remove_entity_headers(headers1)
assert headers1 == [('Date', now)]
http.remove_entity_headers(headers2)
assert headers2 == datastructures.Headers([('Date', now)])
def test_remove_hop_by_hop_headers(self):
headers1 = [('Connection', 'closed'), ('Foo', 'bar'),
('Keep-Alive', 'wtf')]
headers2 = datastructures.Headers(headers1)
http.remove_hop_by_hop_headers(headers1)
assert headers1 == [('Foo', 'bar')]
http.remove_hop_by_hop_headers(headers2)
assert headers2 == datastructures.Headers([('Foo', 'bar')])
def test_parse_options_header(self):
assert http.parse_options_header('something; foo="other\"thing"') == \
('something', {'foo': 'other"thing'})
assert http.parse_options_header('something; foo="other\"thing"; meh=42') == \
('something', {'foo': 'other"thing', 'meh': '42'})
assert http.parse_options_header('something; foo="other\"thing"; meh=42; bleh') == \
('something', {'foo': 'other"thing', 'meh': '42', 'bleh': None})
def test_dump_options_header(self):
assert http.dump_options_header('foo', {'bar': 42}) == \
'foo; bar=42'
assert http.dump_options_header('foo', {'bar': 42, 'fizz': None}) == \
'foo; bar=42; fizz'
def test_dump_header(self):
assert http.dump_header([1, 2, 3]) == '1, 2, 3'
assert http.dump_header([1, 2, 3], allow_token=False) == '"1", "2", "3"'
assert http.dump_header({'foo': 'bar'}, allow_token=False) == 'foo="bar"'
assert http.dump_header({'foo': 'bar'}) == 'foo=bar'
def test_is_resource_modified(self):
env = create_environ()
# ignore POST
env['REQUEST_METHOD'] = 'POST'
assert not http.is_resource_modified(env, etag='testing')
env['REQUEST_METHOD'] = 'GET'
# etagify from data
self.assert_raises(TypeError, http.is_resource_modified, env,
data='42', etag='23')
env['HTTP_IF_NONE_MATCH'] = http.generate_etag('awesome')
assert not http.is_resource_modified(env, data='awesome')
env['HTTP_IF_MODIFIED_SINCE'] = http.http_date(datetime(2008, 1, 1, 12, 30))
assert not http.is_resource_modified(env,
last_modified=datetime(2008, 1, 1, 12, 00))
assert http.is_resource_modified(env,
last_modified=datetime(2008, 1, 1, 13, 00))
def test_date_formatting(self):
assert http.cookie_date(0) == 'Thu, 01-Jan-1970 00:00:00 GMT'
assert http.cookie_date(datetime(1970, 1, 1)) == 'Thu, 01-Jan-1970 00:00:00 GMT'
assert http.http_date(0) == 'Thu, 01 Jan 1970 00:00:00 GMT'
assert http.http_date(datetime(1970, 1, 1)) == 'Thu, 01 Jan 1970 00:00:00 GMT'
def test_cookies(self):
assert http.parse_cookie('dismiss-top=6; CP=null*; PHPSESSID=0a539d42abc001cd'
'c762809248d4beed; a=42') == {
'CP': u'null*',
'PHPSESSID': u'0a539d42abc001cdc762809248d4beed',
'a': u'42',
'dismiss-top': u'6'
}
assert set(http.dump_cookie('foo', 'bar baz blub', 360, httponly=True,
sync_expires=False).split('; ')) == \
set(['HttpOnly', 'Max-Age=360', 'Path=/', 'foo="bar baz blub"'])
assert http.parse_cookie('fo234{=bar blub=Blah') == {'blub': 'Blah'}
def test_cookie_quoting(self):
val = http.dump_cookie("foo", "?foo")
assert val == 'foo="?foo"; Path=/'
assert http.parse_cookie(val) == {'foo': '?foo'}
assert http.parse_cookie(r'foo="foo\054bar"') == {'foo': 'foo,bar'}
class RangeTestCase(WerkzeugTestCase):
def test_if_range_parsing(self):
rv = http.parse_if_range_header('"Test"')
assert rv.etag == 'Test'
assert rv.date is None
assert rv.to_header() == '"Test"'
# weak information is dropped
rv = http.parse_if_range_header('w/"Test"')
assert rv.etag == 'Test'
assert rv.date is None
assert rv.to_header() == '"Test"'
# broken etags are supported too
rv = http.parse_if_range_header('bullshit')
assert rv.etag == 'bullshit'
assert rv.date is None
assert rv.to_header() == '"bullshit"'
rv = http.parse_if_range_header('Thu, 01 Jan 1970 00:00:00 GMT')
assert rv.etag is None
assert rv.date == datetime(1970, 1, 1)
assert rv.to_header() == 'Thu, 01 Jan 1970 00:00:00 GMT'
for x in '', None:
rv = http.parse_if_range_header(x)
assert rv.etag is None
assert rv.date is None
assert rv.to_header() == ''
def test_range_parsing():
rv = http.parse_range_header('bytes=52')
assert rv is None
rv = http.parse_range_header('bytes=52-')
assert rv.units == 'bytes'
assert rv.ranges == [(52, None)]
assert rv.to_header() == 'bytes=52-'
rv = http.parse_range_header('bytes=52-99')
assert rv.units == 'bytes'
assert rv.ranges == [(52, 100)]
assert rv.to_header() == 'bytes=52-99'
rv = http.parse_range_header('bytes=52-99,-1000')
assert rv.units == 'bytes'
assert rv.ranges == [(52, 100), (-1000, None)]
assert rv.to_header() == 'bytes=52-99,-1000'
rv = http.parse_range_header('bytes = 1 - 100')
assert rv.units == 'bytes'
assert rv.ranges == [(1, 101)]
assert rv.to_header() == 'bytes=1-100'
rv = http.parse_range_header('AWesomes=0-999')
assert rv.units == 'awesomes'
assert rv.ranges == [(0, 1000)]
assert rv.to_header() == 'awesomes=0-999'
def test_content_range_parsing():
rv = http.parse_content_range_header('bytes 0-98/*')
assert rv.units == 'bytes'
assert rv.start == 0
assert rv.stop == 99
assert rv.length is None
assert rv.to_header() == 'bytes 0-98/*'
rv = http.parse_content_range_header('bytes 0-98/*asdfsa')
assert rv is None
rv = http.parse_content_range_header('bytes 0-99/100')
assert rv.to_header() == 'bytes 0-99/100'
rv.start = None
rv.stop = None
assert rv.units == 'bytes'
assert rv.to_header() == 'bytes */100'
rv = http.parse_content_range_header('bytes */100')
assert rv.start is None
assert rv.stop is None
assert rv.length == 100
assert rv.units == 'bytes'
class RegressionTestCase(WerkzeugTestCase):
def test_best_match_works(self):
# was a bug in 0.6
rv = http.parse_accept_header('foo=,application/xml,application/xhtml+xml,'
'text/html;q=0.9,text/plain;q=0.8,'
'image/png,*/*;q=0.5',
datastructures.MIMEAccept).best_match(['foo/bar'])
self.assert_equal(rv, 'foo/bar')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(HTTPUtilityTestCase))
suite.addTest(unittest.makeSuite(RegressionTestCase))
return suite
| bsd-3-clause |
snyderr/robotframework | src/robot/libraries/Dialogs.py | 2 | 4541 | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A test library providing dialogs for interacting with users.
``Dialogs`` is Robot Framework's standard library that provides means
for pausing the test execution and getting input from users. The
dialogs are slightly different depending on whether tests are run on
Python, IronPython or Jython but they provide the same functionality.
Long lines in the provided messages are wrapped automatically since
Robot Framework 2.8. If you want to wrap lines manually, you can add
newlines using the ``\\n`` character sequence.
The library has a known limitation that it cannot be used with timeouts
on Python. Support for IronPython was added in Robot Framework 2.9.2.
"""
from robot.version import get_version
from robot.utils import IRONPYTHON, JYTHON, is_truthy
if JYTHON:
from .dialogs_jy import MessageDialog, PassFailDialog, InputDialog, SelectionDialog
elif IRONPYTHON:
from .dialogs_ipy import MessageDialog, PassFailDialog, InputDialog, SelectionDialog
else:
from .dialogs_py import MessageDialog, PassFailDialog, InputDialog, SelectionDialog
__version__ = get_version()
__all__ = ['execute_manual_step', 'get_value_from_user',
'get_selection_from_user', 'pause_execution']
def pause_execution(message='Test execution paused. Press OK to continue.'):
"""Pauses test execution until user clicks ``Ok`` button.
``message`` is the message shown in the dialog.
"""
MessageDialog(message).show()
def execute_manual_step(message, default_error=''):
"""Pauses test execution until user sets the keyword status.
User can press either ``PASS`` or ``FAIL`` button. In the latter case execution
fails and an additional dialog is opened for defining the error message.
``message`` is the instruction shown in the initial dialog and
``default_error`` is the default value shown in the possible error message
dialog.
"""
if not _validate_user_input(PassFailDialog(message)):
msg = get_value_from_user('Give error message:', default_error)
raise AssertionError(msg)
def get_value_from_user(message, default_value='', hidden=False):
"""Pauses test execution and asks user to input a value.
Value typed by the user, or the possible default value, is returned.
Returning an empty value is fine, but pressing ``Cancel`` fails the keyword.
``message`` is the instruction shown in the dialog and ``default_value`` is
the possible default value shown in the input field.
If ``hidden`` is given a true value, the value typed by the user is hidden.
``hidden`` is considered true if it is a non-empty string not equal to
``false`` or ``no``, case-insensitively. If it is not a string, its truth
value is got directly using same
[http://docs.python.org/2/library/stdtypes.html#truth-value-testing|rules
as in Python].
Example:
| ${username} = | Get Value From User | Input user name | default |
| ${password} = | Get Value From User | Input password | hidden=yes |
Possibility to hide the typed in value is new in Robot Framework 2.8.4.
Considering strings ``false`` and ``no`` to be false is new in 2.9.
"""
return _validate_user_input(InputDialog(message, default_value,
is_truthy(hidden)))
def get_selection_from_user(message, *values):
"""Pauses test execution and asks user to select a value.
The selected value is returned. Pressing ``Cancel`` fails the keyword.
``message`` is the instruction shown in the dialog and ``values`` are
the options given to the user.
Example:
| ${username} = | Get Selection From User | Select user name | user1 | user2 | admin |
"""
return _validate_user_input(SelectionDialog(message, values))
def _validate_user_input(dialog):
value = dialog.show()
if value is None:
raise RuntimeError('No value provided by user.')
return value
| apache-2.0 |
kaushik94/pymc | pymc/theanof.py | 4 | 2024 | from .vartypes import typefilter, continuous_types
from theano import theano, tensor as t
from theano.gof.graph import inputs
from .memoize import memoize
__all__ = ['gradient', 'hessian', 'hessian_diag', 'inputvars', 'cont_inputs']
def inputvars(a):
"""
Get the inputs into a theano variables
Parameters
----------
a : theano variable
Returns
-------
r : list of tensor variables that are inputs
"""
return [v for v in inputs(makeiter(a)) if isinstance(v, t.TensorVariable)]
def cont_inputs(f):
"""
Get the continuous inputs into a theano variables
Parameters
----------
a : theano variable
Returns
-------
r : list of tensor variables that are continuous inputs
"""
return typefilter(inputvars(f), continuous_types)
"""
Theano derivative functions
"""
def gradient1(f, v):
"""flat gradient of f wrt v"""
return t.flatten(t.grad(f, v, disconnected_inputs='warn'))
@memoize
def gradient(f, vars=None):
if vars is None:
vars = cont_inputs(f)
return t.concatenate([gradient1(f, v) for v in vars], axis=0)
def jacobian1(f, v):
"""jacobian of f wrt v"""
f = t.flatten(f)
idx = t.arange(f.shape[0])
def grad_i(i):
return gradient1(f[i], v)
return theano.map(grad_i, idx)[0]
@memoize
def jacobian(f, vars=None):
if vars is None:
vars = cont_inputs(f)
return t.concatenate([jacobian1(f, v) for v in vars], axis=1)
@memoize
def hessian(f, vars=None):
return -jacobian(gradient(f, vars), vars)
def hessian_diag1(f, v):
g = gradient1(f, v)
idx = t.arange(g.shape[0])
def hess_ii(i):
return gradient1(g[i], v)[i]
return theano.map(hess_ii, idx)[0]
@memoize
def hessian_diag(f, vars=None):
if vars is None:
vars = cont_inputs(f)
return -t.concatenate([hessian_diag1(f, v) for v in vars], axis=0)
def makeiter(a):
if isinstance(a, (tuple, list)):
return a
else:
return [a]
| apache-2.0 |
juanyaw/python | cpython/Lib/test/test_pathlib.py | 6 | 83908 | import collections
import io
import os
import errno
import pathlib
import pickle
import socket
import stat
import tempfile
import unittest
from test import support
TESTFN = support.TESTFN
try:
import grp, pwd
except ImportError:
grp = pwd = None
class _BaseFlavourTest(object):
def _check_parse_parts(self, arg, expected):
f = self.flavour.parse_parts
sep = self.flavour.sep
altsep = self.flavour.altsep
actual = f([x.replace('/', sep) for x in arg])
self.assertEqual(actual, expected)
if altsep:
actual = f([x.replace('/', altsep) for x in arg])
self.assertEqual(actual, expected)
def test_parse_parts_common(self):
check = self._check_parse_parts
sep = self.flavour.sep
# Unanchored parts
check([], ('', '', []))
check(['a'], ('', '', ['a']))
check(['a/'], ('', '', ['a']))
check(['a', 'b'], ('', '', ['a', 'b']))
# Expansion
check(['a/b'], ('', '', ['a', 'b']))
check(['a/b/'], ('', '', ['a', 'b']))
check(['a', 'b/c', 'd'], ('', '', ['a', 'b', 'c', 'd']))
# Collapsing and stripping excess slashes
check(['a', 'b//c', 'd'], ('', '', ['a', 'b', 'c', 'd']))
check(['a', 'b/c/', 'd'], ('', '', ['a', 'b', 'c', 'd']))
# Eliminating standalone dots
check(['.'], ('', '', []))
check(['.', '.', 'b'], ('', '', ['b']))
check(['a', '.', 'b'], ('', '', ['a', 'b']))
check(['a', '.', '.'], ('', '', ['a']))
# The first part is anchored
check(['/a/b'], ('', sep, [sep, 'a', 'b']))
check(['/a', 'b'], ('', sep, [sep, 'a', 'b']))
check(['/a/', 'b'], ('', sep, [sep, 'a', 'b']))
# Ignoring parts before an anchored part
check(['a', '/b', 'c'], ('', sep, [sep, 'b', 'c']))
check(['a', '/b', '/c'], ('', sep, [sep, 'c']))
class PosixFlavourTest(_BaseFlavourTest, unittest.TestCase):
flavour = pathlib._posix_flavour
def test_parse_parts(self):
check = self._check_parse_parts
# Collapsing of excess leading slashes, except for the double-slash
# special case.
check(['//a', 'b'], ('', '//', ['//', 'a', 'b']))
check(['///a', 'b'], ('', '/', ['/', 'a', 'b']))
check(['////a', 'b'], ('', '/', ['/', 'a', 'b']))
# Paths which look like NT paths aren't treated specially
check(['c:a'], ('', '', ['c:a']))
check(['c:\\a'], ('', '', ['c:\\a']))
check(['\\a'], ('', '', ['\\a']))
def test_splitroot(self):
f = self.flavour.splitroot
self.assertEqual(f(''), ('', '', ''))
self.assertEqual(f('a'), ('', '', 'a'))
self.assertEqual(f('a/b'), ('', '', 'a/b'))
self.assertEqual(f('a/b/'), ('', '', 'a/b/'))
self.assertEqual(f('/a'), ('', '/', 'a'))
self.assertEqual(f('/a/b'), ('', '/', 'a/b'))
self.assertEqual(f('/a/b/'), ('', '/', 'a/b/'))
# The root is collapsed when there are redundant slashes
# except when there are exactly two leading slashes, which
# is a special case in POSIX.
self.assertEqual(f('//a'), ('', '//', 'a'))
self.assertEqual(f('///a'), ('', '/', 'a'))
self.assertEqual(f('///a/b'), ('', '/', 'a/b'))
# Paths which look like NT paths aren't treated specially
self.assertEqual(f('c:/a/b'), ('', '', 'c:/a/b'))
self.assertEqual(f('\\/a/b'), ('', '', '\\/a/b'))
self.assertEqual(f('\\a\\b'), ('', '', '\\a\\b'))
class NTFlavourTest(_BaseFlavourTest, unittest.TestCase):
flavour = pathlib._windows_flavour
def test_parse_parts(self):
check = self._check_parse_parts
# First part is anchored
check(['c:'], ('c:', '', ['c:']))
check(['c:/'], ('c:', '\\', ['c:\\']))
check(['/'], ('', '\\', ['\\']))
check(['c:a'], ('c:', '', ['c:', 'a']))
check(['c:/a'], ('c:', '\\', ['c:\\', 'a']))
check(['/a'], ('', '\\', ['\\', 'a']))
# UNC paths
check(['//a/b'], ('\\\\a\\b', '\\', ['\\\\a\\b\\']))
check(['//a/b/'], ('\\\\a\\b', '\\', ['\\\\a\\b\\']))
check(['//a/b/c'], ('\\\\a\\b', '\\', ['\\\\a\\b\\', 'c']))
# Second part is anchored, so that the first part is ignored
check(['a', 'Z:b', 'c'], ('Z:', '', ['Z:', 'b', 'c']))
check(['a', 'Z:/b', 'c'], ('Z:', '\\', ['Z:\\', 'b', 'c']))
# UNC paths
check(['a', '//b/c', 'd'], ('\\\\b\\c', '\\', ['\\\\b\\c\\', 'd']))
# Collapsing and stripping excess slashes
check(['a', 'Z://b//c/', 'd/'], ('Z:', '\\', ['Z:\\', 'b', 'c', 'd']))
# UNC paths
check(['a', '//b/c//', 'd'], ('\\\\b\\c', '\\', ['\\\\b\\c\\', 'd']))
# Extended paths
check(['//?/c:/'], ('\\\\?\\c:', '\\', ['\\\\?\\c:\\']))
check(['//?/c:/a'], ('\\\\?\\c:', '\\', ['\\\\?\\c:\\', 'a']))
check(['//?/c:/a', '/b'], ('\\\\?\\c:', '\\', ['\\\\?\\c:\\', 'b']))
# Extended UNC paths (format is "\\?\UNC\server\share")
check(['//?/UNC/b/c'], ('\\\\?\\UNC\\b\\c', '\\', ['\\\\?\\UNC\\b\\c\\']))
check(['//?/UNC/b/c/d'], ('\\\\?\\UNC\\b\\c', '\\', ['\\\\?\\UNC\\b\\c\\', 'd']))
# Second part has a root but not drive
check(['a', '/b', 'c'], ('', '\\', ['\\', 'b', 'c']))
check(['Z:/a', '/b', 'c'], ('Z:', '\\', ['Z:\\', 'b', 'c']))
check(['//?/Z:/a', '/b', 'c'], ('\\\\?\\Z:', '\\', ['\\\\?\\Z:\\', 'b', 'c']))
def test_splitroot(self):
f = self.flavour.splitroot
self.assertEqual(f(''), ('', '', ''))
self.assertEqual(f('a'), ('', '', 'a'))
self.assertEqual(f('a\\b'), ('', '', 'a\\b'))
self.assertEqual(f('\\a'), ('', '\\', 'a'))
self.assertEqual(f('\\a\\b'), ('', '\\', 'a\\b'))
self.assertEqual(f('c:a\\b'), ('c:', '', 'a\\b'))
self.assertEqual(f('c:\\a\\b'), ('c:', '\\', 'a\\b'))
# Redundant slashes in the root are collapsed
self.assertEqual(f('\\\\a'), ('', '\\', 'a'))
self.assertEqual(f('\\\\\\a/b'), ('', '\\', 'a/b'))
self.assertEqual(f('c:\\\\a'), ('c:', '\\', 'a'))
self.assertEqual(f('c:\\\\\\a/b'), ('c:', '\\', 'a/b'))
# Valid UNC paths
self.assertEqual(f('\\\\a\\b'), ('\\\\a\\b', '\\', ''))
self.assertEqual(f('\\\\a\\b\\'), ('\\\\a\\b', '\\', ''))
self.assertEqual(f('\\\\a\\b\\c\\d'), ('\\\\a\\b', '\\', 'c\\d'))
# These are non-UNC paths (according to ntpath.py and test_ntpath)
# However, command.com says such paths are invalid, so it's
# difficult to know what the right semantics are
self.assertEqual(f('\\\\\\a\\b'), ('', '\\', 'a\\b'))
self.assertEqual(f('\\\\a'), ('', '\\', 'a'))
#
# Tests for the pure classes
#
class _BasePurePathTest(object):
# keys are canonical paths, values are list of tuples of arguments
# supposed to produce equal paths
equivalences = {
'a/b': [
('a', 'b'), ('a/', 'b'), ('a', 'b/'), ('a/', 'b/'),
('a/b/',), ('a//b',), ('a//b//',),
# empty components get removed
('', 'a', 'b'), ('a', '', 'b'), ('a', 'b', ''),
],
'/b/c/d': [
('a', '/b/c', 'd'), ('a', '///b//c', 'd/'),
('/a', '/b/c', 'd'),
# empty components get removed
('/', 'b', '', 'c/d'), ('/', '', 'b/c/d'), ('', '/b/c/d'),
],
}
def setUp(self):
p = self.cls('a')
self.flavour = p._flavour
self.sep = self.flavour.sep
self.altsep = self.flavour.altsep
def test_constructor_common(self):
P = self.cls
p = P('a')
self.assertIsInstance(p, P)
P('a', 'b', 'c')
P('/a', 'b', 'c')
P('a/b/c')
P('/a/b/c')
self.assertEqual(P(P('a')), P('a'))
self.assertEqual(P(P('a'), 'b'), P('a/b'))
self.assertEqual(P(P('a'), P('b')), P('a/b'))
def _check_str_subclass(self, *args):
# Issue #21127: it should be possible to construct a PurePath object
# from an str subclass instance, and it then gets converted to
# a pure str object.
class StrSubclass(str):
pass
P = self.cls
p = P(*(StrSubclass(x) for x in args))
self.assertEqual(p, P(*args))
for part in p.parts:
self.assertIs(type(part), str)
def test_str_subclass_common(self):
self._check_str_subclass('')
self._check_str_subclass('.')
self._check_str_subclass('a')
self._check_str_subclass('a/b.txt')
self._check_str_subclass('/a/b.txt')
def test_join_common(self):
P = self.cls
p = P('a/b')
pp = p.joinpath('c')
self.assertEqual(pp, P('a/b/c'))
self.assertIs(type(pp), type(p))
pp = p.joinpath('c', 'd')
self.assertEqual(pp, P('a/b/c/d'))
pp = p.joinpath(P('c'))
self.assertEqual(pp, P('a/b/c'))
pp = p.joinpath('/c')
self.assertEqual(pp, P('/c'))
def test_div_common(self):
# Basically the same as joinpath()
P = self.cls
p = P('a/b')
pp = p / 'c'
self.assertEqual(pp, P('a/b/c'))
self.assertIs(type(pp), type(p))
pp = p / 'c/d'
self.assertEqual(pp, P('a/b/c/d'))
pp = p / 'c' / 'd'
self.assertEqual(pp, P('a/b/c/d'))
pp = 'c' / p / 'd'
self.assertEqual(pp, P('c/a/b/d'))
pp = p / P('c')
self.assertEqual(pp, P('a/b/c'))
pp = p/ '/c'
self.assertEqual(pp, P('/c'))
def _check_str(self, expected, args):
p = self.cls(*args)
self.assertEqual(str(p), expected.replace('/', self.sep))
def test_str_common(self):
# Canonicalized paths roundtrip
for pathstr in ('a', 'a/b', 'a/b/c', '/', '/a/b', '/a/b/c'):
self._check_str(pathstr, (pathstr,))
# Special case for the empty path
self._check_str('.', ('',))
# Other tests for str() are in test_equivalences()
def test_as_posix_common(self):
P = self.cls
for pathstr in ('a', 'a/b', 'a/b/c', '/', '/a/b', '/a/b/c'):
self.assertEqual(P(pathstr).as_posix(), pathstr)
# Other tests for as_posix() are in test_equivalences()
def test_as_bytes_common(self):
sep = os.fsencode(self.sep)
P = self.cls
self.assertEqual(bytes(P('a/b')), b'a' + sep + b'b')
def test_as_uri_common(self):
P = self.cls
with self.assertRaises(ValueError):
P('a').as_uri()
with self.assertRaises(ValueError):
P().as_uri()
def test_repr_common(self):
for pathstr in ('a', 'a/b', 'a/b/c', '/', '/a/b', '/a/b/c'):
p = self.cls(pathstr)
clsname = p.__class__.__name__
r = repr(p)
# The repr() is in the form ClassName("forward-slashes path")
self.assertTrue(r.startswith(clsname + '('), r)
self.assertTrue(r.endswith(')'), r)
inner = r[len(clsname) + 1 : -1]
self.assertEqual(eval(inner), p.as_posix())
# The repr() roundtrips
q = eval(r, pathlib.__dict__)
self.assertIs(q.__class__, p.__class__)
self.assertEqual(q, p)
self.assertEqual(repr(q), r)
def test_eq_common(self):
P = self.cls
self.assertEqual(P('a/b'), P('a/b'))
self.assertEqual(P('a/b'), P('a', 'b'))
self.assertNotEqual(P('a/b'), P('a'))
self.assertNotEqual(P('a/b'), P('/a/b'))
self.assertNotEqual(P('a/b'), P())
self.assertNotEqual(P('/a/b'), P('/'))
self.assertNotEqual(P(), P('/'))
self.assertNotEqual(P(), "")
self.assertNotEqual(P(), {})
self.assertNotEqual(P(), int)
def test_match_common(self):
P = self.cls
self.assertRaises(ValueError, P('a').match, '')
self.assertRaises(ValueError, P('a').match, '.')
# Simple relative pattern
self.assertTrue(P('b.py').match('b.py'))
self.assertTrue(P('a/b.py').match('b.py'))
self.assertTrue(P('/a/b.py').match('b.py'))
self.assertFalse(P('a.py').match('b.py'))
self.assertFalse(P('b/py').match('b.py'))
self.assertFalse(P('/a.py').match('b.py'))
self.assertFalse(P('b.py/c').match('b.py'))
# Wilcard relative pattern
self.assertTrue(P('b.py').match('*.py'))
self.assertTrue(P('a/b.py').match('*.py'))
self.assertTrue(P('/a/b.py').match('*.py'))
self.assertFalse(P('b.pyc').match('*.py'))
self.assertFalse(P('b./py').match('*.py'))
self.assertFalse(P('b.py/c').match('*.py'))
# Multi-part relative pattern
self.assertTrue(P('ab/c.py').match('a*/*.py'))
self.assertTrue(P('/d/ab/c.py').match('a*/*.py'))
self.assertFalse(P('a.py').match('a*/*.py'))
self.assertFalse(P('/dab/c.py').match('a*/*.py'))
self.assertFalse(P('ab/c.py/d').match('a*/*.py'))
# Absolute pattern
self.assertTrue(P('/b.py').match('/*.py'))
self.assertFalse(P('b.py').match('/*.py'))
self.assertFalse(P('a/b.py').match('/*.py'))
self.assertFalse(P('/a/b.py').match('/*.py'))
# Multi-part absolute pattern
self.assertTrue(P('/a/b.py').match('/a/*.py'))
self.assertFalse(P('/ab.py').match('/a/*.py'))
self.assertFalse(P('/a/b/c.py').match('/a/*.py'))
def test_ordering_common(self):
# Ordering is tuple-alike
def assertLess(a, b):
self.assertLess(a, b)
self.assertGreater(b, a)
P = self.cls
a = P('a')
b = P('a/b')
c = P('abc')
d = P('b')
assertLess(a, b)
assertLess(a, c)
assertLess(a, d)
assertLess(b, c)
assertLess(c, d)
P = self.cls
a = P('/a')
b = P('/a/b')
c = P('/abc')
d = P('/b')
assertLess(a, b)
assertLess(a, c)
assertLess(a, d)
assertLess(b, c)
assertLess(c, d)
with self.assertRaises(TypeError):
P() < {}
def test_parts_common(self):
# `parts` returns a tuple
sep = self.sep
P = self.cls
p = P('a/b')
parts = p.parts
self.assertEqual(parts, ('a', 'b'))
# The object gets reused
self.assertIs(parts, p.parts)
# When the path is absolute, the anchor is a separate part
p = P('/a/b')
parts = p.parts
self.assertEqual(parts, (sep, 'a', 'b'))
def test_equivalences(self):
for k, tuples in self.equivalences.items():
canon = k.replace('/', self.sep)
posix = k.replace(self.sep, '/')
if canon != posix:
tuples = tuples + [
tuple(part.replace('/', self.sep) for part in t)
for t in tuples
]
tuples.append((posix, ))
pcanon = self.cls(canon)
for t in tuples:
p = self.cls(*t)
self.assertEqual(p, pcanon, "failed with args {}".format(t))
self.assertEqual(hash(p), hash(pcanon))
self.assertEqual(str(p), canon)
self.assertEqual(p.as_posix(), posix)
def test_parent_common(self):
# Relative
P = self.cls
p = P('a/b/c')
self.assertEqual(p.parent, P('a/b'))
self.assertEqual(p.parent.parent, P('a'))
self.assertEqual(p.parent.parent.parent, P())
self.assertEqual(p.parent.parent.parent.parent, P())
# Anchored
p = P('/a/b/c')
self.assertEqual(p.parent, P('/a/b'))
self.assertEqual(p.parent.parent, P('/a'))
self.assertEqual(p.parent.parent.parent, P('/'))
self.assertEqual(p.parent.parent.parent.parent, P('/'))
def test_parents_common(self):
# Relative
P = self.cls
p = P('a/b/c')
par = p.parents
self.assertEqual(len(par), 3)
self.assertEqual(par[0], P('a/b'))
self.assertEqual(par[1], P('a'))
self.assertEqual(par[2], P('.'))
self.assertEqual(list(par), [P('a/b'), P('a'), P('.')])
with self.assertRaises(IndexError):
par[-1]
with self.assertRaises(IndexError):
par[3]
with self.assertRaises(TypeError):
par[0] = p
# Anchored
p = P('/a/b/c')
par = p.parents
self.assertEqual(len(par), 3)
self.assertEqual(par[0], P('/a/b'))
self.assertEqual(par[1], P('/a'))
self.assertEqual(par[2], P('/'))
self.assertEqual(list(par), [P('/a/b'), P('/a'), P('/')])
with self.assertRaises(IndexError):
par[3]
def test_drive_common(self):
P = self.cls
self.assertEqual(P('a/b').drive, '')
self.assertEqual(P('/a/b').drive, '')
self.assertEqual(P('').drive, '')
def test_root_common(self):
P = self.cls
sep = self.sep
self.assertEqual(P('').root, '')
self.assertEqual(P('a/b').root, '')
self.assertEqual(P('/').root, sep)
self.assertEqual(P('/a/b').root, sep)
def test_anchor_common(self):
P = self.cls
sep = self.sep
self.assertEqual(P('').anchor, '')
self.assertEqual(P('a/b').anchor, '')
self.assertEqual(P('/').anchor, sep)
self.assertEqual(P('/a/b').anchor, sep)
def test_name_common(self):
P = self.cls
self.assertEqual(P('').name, '')
self.assertEqual(P('.').name, '')
self.assertEqual(P('/').name, '')
self.assertEqual(P('a/b').name, 'b')
self.assertEqual(P('/a/b').name, 'b')
self.assertEqual(P('/a/b/.').name, 'b')
self.assertEqual(P('a/b.py').name, 'b.py')
self.assertEqual(P('/a/b.py').name, 'b.py')
def test_suffix_common(self):
P = self.cls
self.assertEqual(P('').suffix, '')
self.assertEqual(P('.').suffix, '')
self.assertEqual(P('..').suffix, '')
self.assertEqual(P('/').suffix, '')
self.assertEqual(P('a/b').suffix, '')
self.assertEqual(P('/a/b').suffix, '')
self.assertEqual(P('/a/b/.').suffix, '')
self.assertEqual(P('a/b.py').suffix, '.py')
self.assertEqual(P('/a/b.py').suffix, '.py')
self.assertEqual(P('a/.hgrc').suffix, '')
self.assertEqual(P('/a/.hgrc').suffix, '')
self.assertEqual(P('a/.hg.rc').suffix, '.rc')
self.assertEqual(P('/a/.hg.rc').suffix, '.rc')
self.assertEqual(P('a/b.tar.gz').suffix, '.gz')
self.assertEqual(P('/a/b.tar.gz').suffix, '.gz')
self.assertEqual(P('a/Some name. Ending with a dot.').suffix, '')
self.assertEqual(P('/a/Some name. Ending with a dot.').suffix, '')
def test_suffixes_common(self):
P = self.cls
self.assertEqual(P('').suffixes, [])
self.assertEqual(P('.').suffixes, [])
self.assertEqual(P('/').suffixes, [])
self.assertEqual(P('a/b').suffixes, [])
self.assertEqual(P('/a/b').suffixes, [])
self.assertEqual(P('/a/b/.').suffixes, [])
self.assertEqual(P('a/b.py').suffixes, ['.py'])
self.assertEqual(P('/a/b.py').suffixes, ['.py'])
self.assertEqual(P('a/.hgrc').suffixes, [])
self.assertEqual(P('/a/.hgrc').suffixes, [])
self.assertEqual(P('a/.hg.rc').suffixes, ['.rc'])
self.assertEqual(P('/a/.hg.rc').suffixes, ['.rc'])
self.assertEqual(P('a/b.tar.gz').suffixes, ['.tar', '.gz'])
self.assertEqual(P('/a/b.tar.gz').suffixes, ['.tar', '.gz'])
self.assertEqual(P('a/Some name. Ending with a dot.').suffixes, [])
self.assertEqual(P('/a/Some name. Ending with a dot.').suffixes, [])
def test_stem_common(self):
P = self.cls
self.assertEqual(P('').stem, '')
self.assertEqual(P('.').stem, '')
self.assertEqual(P('..').stem, '..')
self.assertEqual(P('/').stem, '')
self.assertEqual(P('a/b').stem, 'b')
self.assertEqual(P('a/b.py').stem, 'b')
self.assertEqual(P('a/.hgrc').stem, '.hgrc')
self.assertEqual(P('a/.hg.rc').stem, '.hg')
self.assertEqual(P('a/b.tar.gz').stem, 'b.tar')
self.assertEqual(P('a/Some name. Ending with a dot.').stem,
'Some name. Ending with a dot.')
def test_with_name_common(self):
P = self.cls
self.assertEqual(P('a/b').with_name('d.xml'), P('a/d.xml'))
self.assertEqual(P('/a/b').with_name('d.xml'), P('/a/d.xml'))
self.assertEqual(P('a/b.py').with_name('d.xml'), P('a/d.xml'))
self.assertEqual(P('/a/b.py').with_name('d.xml'), P('/a/d.xml'))
self.assertEqual(P('a/Dot ending.').with_name('d.xml'), P('a/d.xml'))
self.assertEqual(P('/a/Dot ending.').with_name('d.xml'), P('/a/d.xml'))
self.assertRaises(ValueError, P('').with_name, 'd.xml')
self.assertRaises(ValueError, P('.').with_name, 'd.xml')
self.assertRaises(ValueError, P('/').with_name, 'd.xml')
self.assertRaises(ValueError, P('a/b').with_name, '')
self.assertRaises(ValueError, P('a/b').with_name, '/c')
self.assertRaises(ValueError, P('a/b').with_name, 'c/')
self.assertRaises(ValueError, P('a/b').with_name, 'c/d')
def test_with_suffix_common(self):
P = self.cls
self.assertEqual(P('a/b').with_suffix('.gz'), P('a/b.gz'))
self.assertEqual(P('/a/b').with_suffix('.gz'), P('/a/b.gz'))
self.assertEqual(P('a/b.py').with_suffix('.gz'), P('a/b.gz'))
self.assertEqual(P('/a/b.py').with_suffix('.gz'), P('/a/b.gz'))
# Stripping suffix
self.assertEqual(P('a/b.py').with_suffix(''), P('a/b'))
self.assertEqual(P('/a/b').with_suffix(''), P('/a/b'))
# Path doesn't have a "filename" component
self.assertRaises(ValueError, P('').with_suffix, '.gz')
self.assertRaises(ValueError, P('.').with_suffix, '.gz')
self.assertRaises(ValueError, P('/').with_suffix, '.gz')
# Invalid suffix
self.assertRaises(ValueError, P('a/b').with_suffix, 'gz')
self.assertRaises(ValueError, P('a/b').with_suffix, '/')
self.assertRaises(ValueError, P('a/b').with_suffix, '.')
self.assertRaises(ValueError, P('a/b').with_suffix, '/.gz')
self.assertRaises(ValueError, P('a/b').with_suffix, 'c/d')
self.assertRaises(ValueError, P('a/b').with_suffix, '.c/.d')
self.assertRaises(ValueError, P('a/b').with_suffix, './.d')
self.assertRaises(ValueError, P('a/b').with_suffix, '.d/.')
def test_relative_to_common(self):
P = self.cls
p = P('a/b')
self.assertRaises(TypeError, p.relative_to)
self.assertRaises(TypeError, p.relative_to, b'a')
self.assertEqual(p.relative_to(P()), P('a/b'))
self.assertEqual(p.relative_to(''), P('a/b'))
self.assertEqual(p.relative_to(P('a')), P('b'))
self.assertEqual(p.relative_to('a'), P('b'))
self.assertEqual(p.relative_to('a/'), P('b'))
self.assertEqual(p.relative_to(P('a/b')), P())
self.assertEqual(p.relative_to('a/b'), P())
# With several args
self.assertEqual(p.relative_to('a', 'b'), P())
# Unrelated paths
self.assertRaises(ValueError, p.relative_to, P('c'))
self.assertRaises(ValueError, p.relative_to, P('a/b/c'))
self.assertRaises(ValueError, p.relative_to, P('a/c'))
self.assertRaises(ValueError, p.relative_to, P('/a'))
p = P('/a/b')
self.assertEqual(p.relative_to(P('/')), P('a/b'))
self.assertEqual(p.relative_to('/'), P('a/b'))
self.assertEqual(p.relative_to(P('/a')), P('b'))
self.assertEqual(p.relative_to('/a'), P('b'))
self.assertEqual(p.relative_to('/a/'), P('b'))
self.assertEqual(p.relative_to(P('/a/b')), P())
self.assertEqual(p.relative_to('/a/b'), P())
# Unrelated paths
self.assertRaises(ValueError, p.relative_to, P('/c'))
self.assertRaises(ValueError, p.relative_to, P('/a/b/c'))
self.assertRaises(ValueError, p.relative_to, P('/a/c'))
self.assertRaises(ValueError, p.relative_to, P())
self.assertRaises(ValueError, p.relative_to, '')
self.assertRaises(ValueError, p.relative_to, P('a'))
def test_pickling_common(self):
P = self.cls
p = P('/a/b')
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
dumped = pickle.dumps(p, proto)
pp = pickle.loads(dumped)
self.assertIs(pp.__class__, p.__class__)
self.assertEqual(pp, p)
self.assertEqual(hash(pp), hash(p))
self.assertEqual(str(pp), str(p))
class PurePosixPathTest(_BasePurePathTest, unittest.TestCase):
cls = pathlib.PurePosixPath
def test_root(self):
P = self.cls
self.assertEqual(P('/a/b').root, '/')
self.assertEqual(P('///a/b').root, '/')
# POSIX special case for two leading slashes
self.assertEqual(P('//a/b').root, '//')
def test_eq(self):
P = self.cls
self.assertNotEqual(P('a/b'), P('A/b'))
self.assertEqual(P('/a'), P('///a'))
self.assertNotEqual(P('/a'), P('//a'))
def test_as_uri(self):
P = self.cls
self.assertEqual(P('/').as_uri(), 'file:///')
self.assertEqual(P('/a/b.c').as_uri(), 'file:///a/b.c')
self.assertEqual(P('/a/b%#c').as_uri(), 'file:///a/b%25%23c')
def test_as_uri_non_ascii(self):
from urllib.parse import quote_from_bytes
P = self.cls
try:
os.fsencode('\xe9')
except UnicodeEncodeError:
self.skipTest("\\xe9 cannot be encoded to the filesystem encoding")
self.assertEqual(P('/a/b\xe9').as_uri(),
'file:///a/b' + quote_from_bytes(os.fsencode('\xe9')))
def test_match(self):
P = self.cls
self.assertFalse(P('A.py').match('a.PY'))
def test_is_absolute(self):
P = self.cls
self.assertFalse(P().is_absolute())
self.assertFalse(P('a').is_absolute())
self.assertFalse(P('a/b/').is_absolute())
self.assertTrue(P('/').is_absolute())
self.assertTrue(P('/a').is_absolute())
self.assertTrue(P('/a/b/').is_absolute())
self.assertTrue(P('//a').is_absolute())
self.assertTrue(P('//a/b').is_absolute())
def test_is_reserved(self):
P = self.cls
self.assertIs(False, P('').is_reserved())
self.assertIs(False, P('/').is_reserved())
self.assertIs(False, P('/foo/bar').is_reserved())
self.assertIs(False, P('/dev/con/PRN/NUL').is_reserved())
def test_join(self):
P = self.cls
p = P('//a')
pp = p.joinpath('b')
self.assertEqual(pp, P('//a/b'))
pp = P('/a').joinpath('//c')
self.assertEqual(pp, P('//c'))
pp = P('//a').joinpath('/c')
self.assertEqual(pp, P('/c'))
def test_div(self):
# Basically the same as joinpath()
P = self.cls
p = P('//a')
pp = p / 'b'
self.assertEqual(pp, P('//a/b'))
pp = P('/a') / '//c'
self.assertEqual(pp, P('//c'))
pp = P('//a') / '/c'
self.assertEqual(pp, P('/c'))
class PureWindowsPathTest(_BasePurePathTest, unittest.TestCase):
cls = pathlib.PureWindowsPath
equivalences = _BasePurePathTest.equivalences.copy()
equivalences.update({
'c:a': [ ('c:', 'a'), ('c:', 'a/'), ('/', 'c:', 'a') ],
'c:/a': [
('c:/', 'a'), ('c:', '/', 'a'), ('c:', '/a'),
('/z', 'c:/', 'a'), ('//x/y', 'c:/', 'a'),
],
'//a/b/': [ ('//a/b',) ],
'//a/b/c': [
('//a/b', 'c'), ('//a/b/', 'c'),
],
})
def test_str(self):
p = self.cls('a/b/c')
self.assertEqual(str(p), 'a\\b\\c')
p = self.cls('c:/a/b/c')
self.assertEqual(str(p), 'c:\\a\\b\\c')
p = self.cls('//a/b')
self.assertEqual(str(p), '\\\\a\\b\\')
p = self.cls('//a/b/c')
self.assertEqual(str(p), '\\\\a\\b\\c')
p = self.cls('//a/b/c/d')
self.assertEqual(str(p), '\\\\a\\b\\c\\d')
def test_str_subclass(self):
self._check_str_subclass('c:')
self._check_str_subclass('c:a')
self._check_str_subclass('c:a\\b.txt')
self._check_str_subclass('c:\\')
self._check_str_subclass('c:\\a')
self._check_str_subclass('c:\\a\\b.txt')
self._check_str_subclass('\\\\some\\share')
self._check_str_subclass('\\\\some\\share\\a')
self._check_str_subclass('\\\\some\\share\\a\\b.txt')
def test_eq(self):
P = self.cls
self.assertEqual(P('c:a/b'), P('c:a/b'))
self.assertEqual(P('c:a/b'), P('c:', 'a', 'b'))
self.assertNotEqual(P('c:a/b'), P('d:a/b'))
self.assertNotEqual(P('c:a/b'), P('c:/a/b'))
self.assertNotEqual(P('/a/b'), P('c:/a/b'))
# Case-insensitivity
self.assertEqual(P('a/B'), P('A/b'))
self.assertEqual(P('C:a/B'), P('c:A/b'))
self.assertEqual(P('//Some/SHARE/a/B'), P('//somE/share/A/b'))
def test_as_uri(self):
P = self.cls
with self.assertRaises(ValueError):
P('/a/b').as_uri()
with self.assertRaises(ValueError):
P('c:a/b').as_uri()
self.assertEqual(P('c:/').as_uri(), 'file:///c:/')
self.assertEqual(P('c:/a/b.c').as_uri(), 'file:///c:/a/b.c')
self.assertEqual(P('c:/a/b%#c').as_uri(), 'file:///c:/a/b%25%23c')
self.assertEqual(P('c:/a/b\xe9').as_uri(), 'file:///c:/a/b%C3%A9')
self.assertEqual(P('//some/share/').as_uri(), 'file://some/share/')
self.assertEqual(P('//some/share/a/b.c').as_uri(),
'file://some/share/a/b.c')
self.assertEqual(P('//some/share/a/b%#c\xe9').as_uri(),
'file://some/share/a/b%25%23c%C3%A9')
def test_match_common(self):
P = self.cls
# Absolute patterns
self.assertTrue(P('c:/b.py').match('/*.py'))
self.assertTrue(P('c:/b.py').match('c:*.py'))
self.assertTrue(P('c:/b.py').match('c:/*.py'))
self.assertFalse(P('d:/b.py').match('c:/*.py')) # wrong drive
self.assertFalse(P('b.py').match('/*.py'))
self.assertFalse(P('b.py').match('c:*.py'))
self.assertFalse(P('b.py').match('c:/*.py'))
self.assertFalse(P('c:b.py').match('/*.py'))
self.assertFalse(P('c:b.py').match('c:/*.py'))
self.assertFalse(P('/b.py').match('c:*.py'))
self.assertFalse(P('/b.py').match('c:/*.py'))
# UNC patterns
self.assertTrue(P('//some/share/a.py').match('/*.py'))
self.assertTrue(P('//some/share/a.py').match('//some/share/*.py'))
self.assertFalse(P('//other/share/a.py').match('//some/share/*.py'))
self.assertFalse(P('//some/share/a/b.py').match('//some/share/*.py'))
# Case-insensitivity
self.assertTrue(P('B.py').match('b.PY'))
self.assertTrue(P('c:/a/B.Py').match('C:/A/*.pY'))
self.assertTrue(P('//Some/Share/B.Py').match('//somE/sharE/*.pY'))
def test_ordering_common(self):
# Case-insensitivity
def assertOrderedEqual(a, b):
self.assertLessEqual(a, b)
self.assertGreaterEqual(b, a)
P = self.cls
p = P('c:A/b')
q = P('C:a/B')
assertOrderedEqual(p, q)
self.assertFalse(p < q)
self.assertFalse(p > q)
p = P('//some/Share/A/b')
q = P('//Some/SHARE/a/B')
assertOrderedEqual(p, q)
self.assertFalse(p < q)
self.assertFalse(p > q)
def test_parts(self):
P = self.cls
p = P('c:a/b')
parts = p.parts
self.assertEqual(parts, ('c:', 'a', 'b'))
p = P('c:/a/b')
parts = p.parts
self.assertEqual(parts, ('c:\\', 'a', 'b'))
p = P('//a/b/c/d')
parts = p.parts
self.assertEqual(parts, ('\\\\a\\b\\', 'c', 'd'))
def test_parent(self):
# Anchored
P = self.cls
p = P('z:a/b/c')
self.assertEqual(p.parent, P('z:a/b'))
self.assertEqual(p.parent.parent, P('z:a'))
self.assertEqual(p.parent.parent.parent, P('z:'))
self.assertEqual(p.parent.parent.parent.parent, P('z:'))
p = P('z:/a/b/c')
self.assertEqual(p.parent, P('z:/a/b'))
self.assertEqual(p.parent.parent, P('z:/a'))
self.assertEqual(p.parent.parent.parent, P('z:/'))
self.assertEqual(p.parent.parent.parent.parent, P('z:/'))
p = P('//a/b/c/d')
self.assertEqual(p.parent, P('//a/b/c'))
self.assertEqual(p.parent.parent, P('//a/b'))
self.assertEqual(p.parent.parent.parent, P('//a/b'))
def test_parents(self):
# Anchored
P = self.cls
p = P('z:a/b/')
par = p.parents
self.assertEqual(len(par), 2)
self.assertEqual(par[0], P('z:a'))
self.assertEqual(par[1], P('z:'))
self.assertEqual(list(par), [P('z:a'), P('z:')])
with self.assertRaises(IndexError):
par[2]
p = P('z:/a/b/')
par = p.parents
self.assertEqual(len(par), 2)
self.assertEqual(par[0], P('z:/a'))
self.assertEqual(par[1], P('z:/'))
self.assertEqual(list(par), [P('z:/a'), P('z:/')])
with self.assertRaises(IndexError):
par[2]
p = P('//a/b/c/d')
par = p.parents
self.assertEqual(len(par), 2)
self.assertEqual(par[0], P('//a/b/c'))
self.assertEqual(par[1], P('//a/b'))
self.assertEqual(list(par), [P('//a/b/c'), P('//a/b')])
with self.assertRaises(IndexError):
par[2]
def test_drive(self):
P = self.cls
self.assertEqual(P('c:').drive, 'c:')
self.assertEqual(P('c:a/b').drive, 'c:')
self.assertEqual(P('c:/').drive, 'c:')
self.assertEqual(P('c:/a/b/').drive, 'c:')
self.assertEqual(P('//a/b').drive, '\\\\a\\b')
self.assertEqual(P('//a/b/').drive, '\\\\a\\b')
self.assertEqual(P('//a/b/c/d').drive, '\\\\a\\b')
def test_root(self):
P = self.cls
self.assertEqual(P('c:').root, '')
self.assertEqual(P('c:a/b').root, '')
self.assertEqual(P('c:/').root, '\\')
self.assertEqual(P('c:/a/b/').root, '\\')
self.assertEqual(P('//a/b').root, '\\')
self.assertEqual(P('//a/b/').root, '\\')
self.assertEqual(P('//a/b/c/d').root, '\\')
def test_anchor(self):
P = self.cls
self.assertEqual(P('c:').anchor, 'c:')
self.assertEqual(P('c:a/b').anchor, 'c:')
self.assertEqual(P('c:/').anchor, 'c:\\')
self.assertEqual(P('c:/a/b/').anchor, 'c:\\')
self.assertEqual(P('//a/b').anchor, '\\\\a\\b\\')
self.assertEqual(P('//a/b/').anchor, '\\\\a\\b\\')
self.assertEqual(P('//a/b/c/d').anchor, '\\\\a\\b\\')
def test_name(self):
P = self.cls
self.assertEqual(P('c:').name, '')
self.assertEqual(P('c:/').name, '')
self.assertEqual(P('c:a/b').name, 'b')
self.assertEqual(P('c:/a/b').name, 'b')
self.assertEqual(P('c:a/b.py').name, 'b.py')
self.assertEqual(P('c:/a/b.py').name, 'b.py')
self.assertEqual(P('//My.py/Share.php').name, '')
self.assertEqual(P('//My.py/Share.php/a/b').name, 'b')
def test_suffix(self):
P = self.cls
self.assertEqual(P('c:').suffix, '')
self.assertEqual(P('c:/').suffix, '')
self.assertEqual(P('c:a/b').suffix, '')
self.assertEqual(P('c:/a/b').suffix, '')
self.assertEqual(P('c:a/b.py').suffix, '.py')
self.assertEqual(P('c:/a/b.py').suffix, '.py')
self.assertEqual(P('c:a/.hgrc').suffix, '')
self.assertEqual(P('c:/a/.hgrc').suffix, '')
self.assertEqual(P('c:a/.hg.rc').suffix, '.rc')
self.assertEqual(P('c:/a/.hg.rc').suffix, '.rc')
self.assertEqual(P('c:a/b.tar.gz').suffix, '.gz')
self.assertEqual(P('c:/a/b.tar.gz').suffix, '.gz')
self.assertEqual(P('c:a/Some name. Ending with a dot.').suffix, '')
self.assertEqual(P('c:/a/Some name. Ending with a dot.').suffix, '')
self.assertEqual(P('//My.py/Share.php').suffix, '')
self.assertEqual(P('//My.py/Share.php/a/b').suffix, '')
def test_suffixes(self):
P = self.cls
self.assertEqual(P('c:').suffixes, [])
self.assertEqual(P('c:/').suffixes, [])
self.assertEqual(P('c:a/b').suffixes, [])
self.assertEqual(P('c:/a/b').suffixes, [])
self.assertEqual(P('c:a/b.py').suffixes, ['.py'])
self.assertEqual(P('c:/a/b.py').suffixes, ['.py'])
self.assertEqual(P('c:a/.hgrc').suffixes, [])
self.assertEqual(P('c:/a/.hgrc').suffixes, [])
self.assertEqual(P('c:a/.hg.rc').suffixes, ['.rc'])
self.assertEqual(P('c:/a/.hg.rc').suffixes, ['.rc'])
self.assertEqual(P('c:a/b.tar.gz').suffixes, ['.tar', '.gz'])
self.assertEqual(P('c:/a/b.tar.gz').suffixes, ['.tar', '.gz'])
self.assertEqual(P('//My.py/Share.php').suffixes, [])
self.assertEqual(P('//My.py/Share.php/a/b').suffixes, [])
self.assertEqual(P('c:a/Some name. Ending with a dot.').suffixes, [])
self.assertEqual(P('c:/a/Some name. Ending with a dot.').suffixes, [])
def test_stem(self):
P = self.cls
self.assertEqual(P('c:').stem, '')
self.assertEqual(P('c:.').stem, '')
self.assertEqual(P('c:..').stem, '..')
self.assertEqual(P('c:/').stem, '')
self.assertEqual(P('c:a/b').stem, 'b')
self.assertEqual(P('c:a/b.py').stem, 'b')
self.assertEqual(P('c:a/.hgrc').stem, '.hgrc')
self.assertEqual(P('c:a/.hg.rc').stem, '.hg')
self.assertEqual(P('c:a/b.tar.gz').stem, 'b.tar')
self.assertEqual(P('c:a/Some name. Ending with a dot.').stem,
'Some name. Ending with a dot.')
def test_with_name(self):
P = self.cls
self.assertEqual(P('c:a/b').with_name('d.xml'), P('c:a/d.xml'))
self.assertEqual(P('c:/a/b').with_name('d.xml'), P('c:/a/d.xml'))
self.assertEqual(P('c:a/Dot ending.').with_name('d.xml'), P('c:a/d.xml'))
self.assertEqual(P('c:/a/Dot ending.').with_name('d.xml'), P('c:/a/d.xml'))
self.assertRaises(ValueError, P('c:').with_name, 'd.xml')
self.assertRaises(ValueError, P('c:/').with_name, 'd.xml')
self.assertRaises(ValueError, P('//My/Share').with_name, 'd.xml')
self.assertRaises(ValueError, P('c:a/b').with_name, 'd:')
self.assertRaises(ValueError, P('c:a/b').with_name, 'd:e')
self.assertRaises(ValueError, P('c:a/b').with_name, 'd:/e')
self.assertRaises(ValueError, P('c:a/b').with_name, '//My/Share')
def test_with_suffix(self):
P = self.cls
self.assertEqual(P('c:a/b').with_suffix('.gz'), P('c:a/b.gz'))
self.assertEqual(P('c:/a/b').with_suffix('.gz'), P('c:/a/b.gz'))
self.assertEqual(P('c:a/b.py').with_suffix('.gz'), P('c:a/b.gz'))
self.assertEqual(P('c:/a/b.py').with_suffix('.gz'), P('c:/a/b.gz'))
# Path doesn't have a "filename" component
self.assertRaises(ValueError, P('').with_suffix, '.gz')
self.assertRaises(ValueError, P('.').with_suffix, '.gz')
self.assertRaises(ValueError, P('/').with_suffix, '.gz')
self.assertRaises(ValueError, P('//My/Share').with_suffix, '.gz')
# Invalid suffix
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'gz')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '/')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '\\')
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'c:')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '/.gz')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '\\.gz')
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'c:.gz')
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'c/d')
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'c\\d')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '.c/d')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '.c\\d')
def test_relative_to(self):
P = self.cls
p = P('C:Foo/Bar')
self.assertEqual(p.relative_to(P('c:')), P('Foo/Bar'))
self.assertEqual(p.relative_to('c:'), P('Foo/Bar'))
self.assertEqual(p.relative_to(P('c:foO')), P('Bar'))
self.assertEqual(p.relative_to('c:foO'), P('Bar'))
self.assertEqual(p.relative_to('c:foO/'), P('Bar'))
self.assertEqual(p.relative_to(P('c:foO/baR')), P())
self.assertEqual(p.relative_to('c:foO/baR'), P())
# Unrelated paths
self.assertRaises(ValueError, p.relative_to, P())
self.assertRaises(ValueError, p.relative_to, '')
self.assertRaises(ValueError, p.relative_to, P('d:'))
self.assertRaises(ValueError, p.relative_to, P('/'))
self.assertRaises(ValueError, p.relative_to, P('Foo'))
self.assertRaises(ValueError, p.relative_to, P('/Foo'))
self.assertRaises(ValueError, p.relative_to, P('C:/Foo'))
self.assertRaises(ValueError, p.relative_to, P('C:Foo/Bar/Baz'))
self.assertRaises(ValueError, p.relative_to, P('C:Foo/Baz'))
p = P('C:/Foo/Bar')
self.assertEqual(p.relative_to(P('c:')), P('/Foo/Bar'))
self.assertEqual(p.relative_to('c:'), P('/Foo/Bar'))
self.assertEqual(str(p.relative_to(P('c:'))), '\\Foo\\Bar')
self.assertEqual(str(p.relative_to('c:')), '\\Foo\\Bar')
self.assertEqual(p.relative_to(P('c:/')), P('Foo/Bar'))
self.assertEqual(p.relative_to('c:/'), P('Foo/Bar'))
self.assertEqual(p.relative_to(P('c:/foO')), P('Bar'))
self.assertEqual(p.relative_to('c:/foO'), P('Bar'))
self.assertEqual(p.relative_to('c:/foO/'), P('Bar'))
self.assertEqual(p.relative_to(P('c:/foO/baR')), P())
self.assertEqual(p.relative_to('c:/foO/baR'), P())
# Unrelated paths
self.assertRaises(ValueError, p.relative_to, P('C:/Baz'))
self.assertRaises(ValueError, p.relative_to, P('C:/Foo/Bar/Baz'))
self.assertRaises(ValueError, p.relative_to, P('C:/Foo/Baz'))
self.assertRaises(ValueError, p.relative_to, P('C:Foo'))
self.assertRaises(ValueError, p.relative_to, P('d:'))
self.assertRaises(ValueError, p.relative_to, P('d:/'))
self.assertRaises(ValueError, p.relative_to, P('/'))
self.assertRaises(ValueError, p.relative_to, P('/Foo'))
self.assertRaises(ValueError, p.relative_to, P('//C/Foo'))
# UNC paths
p = P('//Server/Share/Foo/Bar')
self.assertEqual(p.relative_to(P('//sErver/sHare')), P('Foo/Bar'))
self.assertEqual(p.relative_to('//sErver/sHare'), P('Foo/Bar'))
self.assertEqual(p.relative_to('//sErver/sHare/'), P('Foo/Bar'))
self.assertEqual(p.relative_to(P('//sErver/sHare/Foo')), P('Bar'))
self.assertEqual(p.relative_to('//sErver/sHare/Foo'), P('Bar'))
self.assertEqual(p.relative_to('//sErver/sHare/Foo/'), P('Bar'))
self.assertEqual(p.relative_to(P('//sErver/sHare/Foo/Bar')), P())
self.assertEqual(p.relative_to('//sErver/sHare/Foo/Bar'), P())
# Unrelated paths
self.assertRaises(ValueError, p.relative_to, P('/Server/Share/Foo'))
self.assertRaises(ValueError, p.relative_to, P('c:/Server/Share/Foo'))
self.assertRaises(ValueError, p.relative_to, P('//z/Share/Foo'))
self.assertRaises(ValueError, p.relative_to, P('//Server/z/Foo'))
def test_is_absolute(self):
P = self.cls
# Under NT, only paths with both a drive and a root are absolute
self.assertFalse(P().is_absolute())
self.assertFalse(P('a').is_absolute())
self.assertFalse(P('a/b/').is_absolute())
self.assertFalse(P('/').is_absolute())
self.assertFalse(P('/a').is_absolute())
self.assertFalse(P('/a/b/').is_absolute())
self.assertFalse(P('c:').is_absolute())
self.assertFalse(P('c:a').is_absolute())
self.assertFalse(P('c:a/b/').is_absolute())
self.assertTrue(P('c:/').is_absolute())
self.assertTrue(P('c:/a').is_absolute())
self.assertTrue(P('c:/a/b/').is_absolute())
# UNC paths are absolute by definition
self.assertTrue(P('//a/b').is_absolute())
self.assertTrue(P('//a/b/').is_absolute())
self.assertTrue(P('//a/b/c').is_absolute())
self.assertTrue(P('//a/b/c/d').is_absolute())
def test_join(self):
P = self.cls
p = P('C:/a/b')
pp = p.joinpath('x/y')
self.assertEqual(pp, P('C:/a/b/x/y'))
pp = p.joinpath('/x/y')
self.assertEqual(pp, P('C:/x/y'))
# Joining with a different drive => the first path is ignored, even
# if the second path is relative.
pp = p.joinpath('D:x/y')
self.assertEqual(pp, P('D:x/y'))
pp = p.joinpath('D:/x/y')
self.assertEqual(pp, P('D:/x/y'))
pp = p.joinpath('//host/share/x/y')
self.assertEqual(pp, P('//host/share/x/y'))
# Joining with the same drive => the first path is appended to if
# the second path is relative.
pp = p.joinpath('c:x/y')
self.assertEqual(pp, P('C:/a/b/x/y'))
pp = p.joinpath('c:/x/y')
self.assertEqual(pp, P('C:/x/y'))
def test_div(self):
# Basically the same as joinpath()
P = self.cls
p = P('C:/a/b')
self.assertEqual(p / 'x/y', P('C:/a/b/x/y'))
self.assertEqual(p / 'x' / 'y', P('C:/a/b/x/y'))
self.assertEqual(p / '/x/y', P('C:/x/y'))
self.assertEqual(p / '/x' / 'y', P('C:/x/y'))
# Joining with a different drive => the first path is ignored, even
# if the second path is relative.
self.assertEqual(p / 'D:x/y', P('D:x/y'))
self.assertEqual(p / 'D:' / 'x/y', P('D:x/y'))
self.assertEqual(p / 'D:/x/y', P('D:/x/y'))
self.assertEqual(p / 'D:' / '/x/y', P('D:/x/y'))
self.assertEqual(p / '//host/share/x/y', P('//host/share/x/y'))
# Joining with the same drive => the first path is appended to if
# the second path is relative.
self.assertEqual(p / 'c:x/y', P('C:/a/b/x/y'))
self.assertEqual(p / 'c:/x/y', P('C:/x/y'))
def test_is_reserved(self):
P = self.cls
self.assertIs(False, P('').is_reserved())
self.assertIs(False, P('/').is_reserved())
self.assertIs(False, P('/foo/bar').is_reserved())
self.assertIs(True, P('con').is_reserved())
self.assertIs(True, P('NUL').is_reserved())
self.assertIs(True, P('NUL.txt').is_reserved())
self.assertIs(True, P('com1').is_reserved())
self.assertIs(True, P('com9.bar').is_reserved())
self.assertIs(False, P('bar.com9').is_reserved())
self.assertIs(True, P('lpt1').is_reserved())
self.assertIs(True, P('lpt9.bar').is_reserved())
self.assertIs(False, P('bar.lpt9').is_reserved())
# Only the last component matters
self.assertIs(False, P('c:/NUL/con/baz').is_reserved())
# UNC paths are never reserved
self.assertIs(False, P('//my/share/nul/con/aux').is_reserved())
class PurePathTest(_BasePurePathTest, unittest.TestCase):
cls = pathlib.PurePath
def test_concrete_class(self):
p = self.cls('a')
self.assertIs(type(p),
pathlib.PureWindowsPath if os.name == 'nt' else pathlib.PurePosixPath)
def test_different_flavours_unequal(self):
p = pathlib.PurePosixPath('a')
q = pathlib.PureWindowsPath('a')
self.assertNotEqual(p, q)
def test_different_flavours_unordered(self):
p = pathlib.PurePosixPath('a')
q = pathlib.PureWindowsPath('a')
with self.assertRaises(TypeError):
p < q
with self.assertRaises(TypeError):
p <= q
with self.assertRaises(TypeError):
p > q
with self.assertRaises(TypeError):
p >= q
#
# Tests for the concrete classes
#
# Make sure any symbolic links in the base test path are resolved
BASE = os.path.realpath(TESTFN)
join = lambda *x: os.path.join(BASE, *x)
rel_join = lambda *x: os.path.join(TESTFN, *x)
def symlink_skip_reason():
if not pathlib.supports_symlinks:
return "no system support for symlinks"
try:
os.symlink(__file__, BASE)
except OSError as e:
return str(e)
else:
support.unlink(BASE)
return None
symlink_skip_reason = symlink_skip_reason()
only_nt = unittest.skipIf(os.name != 'nt',
'test requires a Windows-compatible system')
only_posix = unittest.skipIf(os.name == 'nt',
'test requires a POSIX-compatible system')
with_symlinks = unittest.skipIf(symlink_skip_reason, symlink_skip_reason)
@only_posix
class PosixPathAsPureTest(PurePosixPathTest):
cls = pathlib.PosixPath
@only_nt
class WindowsPathAsPureTest(PureWindowsPathTest):
cls = pathlib.WindowsPath
class _BasePathTest(object):
"""Tests for the FS-accessing functionalities of the Path classes."""
# (BASE)
# |
# |-- dirA/
# |-- linkC -> "../dirB"
# |-- dirB/
# | |-- fileB
# |-- linkD -> "../dirB"
# |-- dirC/
# | |-- fileC
# | |-- fileD
# |-- fileA
# |-- linkA -> "fileA"
# |-- linkB -> "dirB"
#
def setUp(self):
os.mkdir(BASE)
self.addCleanup(support.rmtree, BASE)
os.mkdir(join('dirA'))
os.mkdir(join('dirB'))
os.mkdir(join('dirC'))
os.mkdir(join('dirC', 'dirD'))
with open(join('fileA'), 'wb') as f:
f.write(b"this is file A\n")
with open(join('dirB', 'fileB'), 'wb') as f:
f.write(b"this is file B\n")
with open(join('dirC', 'fileC'), 'wb') as f:
f.write(b"this is file C\n")
with open(join('dirC', 'dirD', 'fileD'), 'wb') as f:
f.write(b"this is file D\n")
if not symlink_skip_reason:
# Relative symlinks
os.symlink('fileA', join('linkA'))
os.symlink('non-existing', join('brokenLink'))
self.dirlink('dirB', join('linkB'))
self.dirlink(os.path.join('..', 'dirB'), join('dirA', 'linkC'))
# This one goes upwards but doesn't create a loop
self.dirlink(os.path.join('..', 'dirB'), join('dirB', 'linkD'))
if os.name == 'nt':
# Workaround for http://bugs.python.org/issue13772
def dirlink(self, src, dest):
os.symlink(src, dest, target_is_directory=True)
else:
def dirlink(self, src, dest):
os.symlink(src, dest)
def assertSame(self, path_a, path_b):
self.assertTrue(os.path.samefile(str(path_a), str(path_b)),
"%r and %r don't point to the same file" %
(path_a, path_b))
def assertFileNotFound(self, func, *args, **kwargs):
with self.assertRaises(FileNotFoundError) as cm:
func(*args, **kwargs)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def _test_cwd(self, p):
q = self.cls(os.getcwd())
self.assertEqual(p, q)
self.assertEqual(str(p), str(q))
self.assertIs(type(p), type(q))
self.assertTrue(p.is_absolute())
def test_cwd(self):
p = self.cls.cwd()
self._test_cwd(p)
def _test_home(self, p):
q = self.cls(os.path.expanduser('~'))
self.assertEqual(p, q)
self.assertEqual(str(p), str(q))
self.assertIs(type(p), type(q))
self.assertTrue(p.is_absolute())
def test_home(self):
p = self.cls.home()
self._test_home(p)
def test_samefile(self):
fileA_path = os.path.join(BASE, 'fileA')
fileB_path = os.path.join(BASE, 'dirB', 'fileB')
p = self.cls(fileA_path)
pp = self.cls(fileA_path)
q = self.cls(fileB_path)
self.assertTrue(p.samefile(fileA_path))
self.assertTrue(p.samefile(pp))
self.assertFalse(p.samefile(fileB_path))
self.assertFalse(p.samefile(q))
# Test the non-existent file case
non_existent = os.path.join(BASE, 'foo')
r = self.cls(non_existent)
self.assertRaises(FileNotFoundError, p.samefile, r)
self.assertRaises(FileNotFoundError, p.samefile, non_existent)
self.assertRaises(FileNotFoundError, r.samefile, p)
self.assertRaises(FileNotFoundError, r.samefile, non_existent)
self.assertRaises(FileNotFoundError, r.samefile, r)
self.assertRaises(FileNotFoundError, r.samefile, non_existent)
def test_empty_path(self):
# The empty path points to '.'
p = self.cls('')
self.assertEqual(p.stat(), os.stat('.'))
def test_expanduser_common(self):
P = self.cls
p = P('~')
self.assertEqual(p.expanduser(), P(os.path.expanduser('~')))
p = P('foo')
self.assertEqual(p.expanduser(), p)
p = P('/~')
self.assertEqual(p.expanduser(), p)
p = P('../~')
self.assertEqual(p.expanduser(), p)
p = P(P('').absolute().anchor) / '~'
self.assertEqual(p.expanduser(), p)
def test_exists(self):
P = self.cls
p = P(BASE)
self.assertIs(True, p.exists())
self.assertIs(True, (p / 'dirA').exists())
self.assertIs(True, (p / 'fileA').exists())
self.assertIs(False, (p / 'fileA' / 'bah').exists())
if not symlink_skip_reason:
self.assertIs(True, (p / 'linkA').exists())
self.assertIs(True, (p / 'linkB').exists())
self.assertIs(True, (p / 'linkB' / 'fileB').exists())
self.assertIs(False, (p / 'linkA' / 'bah').exists())
self.assertIs(False, (p / 'foo').exists())
self.assertIs(False, P('/xyzzy').exists())
def test_open_common(self):
p = self.cls(BASE)
with (p / 'fileA').open('r') as f:
self.assertIsInstance(f, io.TextIOBase)
self.assertEqual(f.read(), "this is file A\n")
with (p / 'fileA').open('rb') as f:
self.assertIsInstance(f, io.BufferedIOBase)
self.assertEqual(f.read().strip(), b"this is file A")
with (p / 'fileA').open('rb', buffering=0) as f:
self.assertIsInstance(f, io.RawIOBase)
self.assertEqual(f.read().strip(), b"this is file A")
def test_read_write_bytes(self):
p = self.cls(BASE)
(p / 'fileA').write_bytes(b'abcdefg')
self.assertEqual((p / 'fileA').read_bytes(), b'abcdefg')
# check that trying to write str does not truncate the file
self.assertRaises(TypeError, (p / 'fileA').write_bytes, 'somestr')
self.assertEqual((p / 'fileA').read_bytes(), b'abcdefg')
def test_read_write_text(self):
p = self.cls(BASE)
(p / 'fileA').write_text('äbcdefg', encoding='latin-1')
self.assertEqual((p / 'fileA').read_text(
encoding='utf-8', errors='ignore'), 'bcdefg')
# check that trying to write bytes does not truncate the file
self.assertRaises(TypeError, (p / 'fileA').write_text, b'somebytes')
self.assertEqual((p / 'fileA').read_text(encoding='latin-1'), 'äbcdefg')
def test_iterdir(self):
P = self.cls
p = P(BASE)
it = p.iterdir()
paths = set(it)
expected = ['dirA', 'dirB', 'dirC', 'fileA']
if not symlink_skip_reason:
expected += ['linkA', 'linkB', 'brokenLink']
self.assertEqual(paths, { P(BASE, q) for q in expected })
@with_symlinks
def test_iterdir_symlink(self):
# __iter__ on a symlink to a directory
P = self.cls
p = P(BASE, 'linkB')
paths = set(p.iterdir())
expected = { P(BASE, 'linkB', q) for q in ['fileB', 'linkD'] }
self.assertEqual(paths, expected)
def test_iterdir_nodir(self):
# __iter__ on something that is not a directory
p = self.cls(BASE, 'fileA')
with self.assertRaises(OSError) as cm:
next(p.iterdir())
# ENOENT or EINVAL under Windows, ENOTDIR otherwise
# (see issue #12802)
self.assertIn(cm.exception.errno, (errno.ENOTDIR,
errno.ENOENT, errno.EINVAL))
def test_glob_common(self):
def _check(glob, expected):
self.assertEqual(set(glob), { P(BASE, q) for q in expected })
P = self.cls
p = P(BASE)
it = p.glob("fileA")
self.assertIsInstance(it, collections.Iterator)
_check(it, ["fileA"])
_check(p.glob("fileB"), [])
_check(p.glob("dir*/file*"), ["dirB/fileB", "dirC/fileC"])
if symlink_skip_reason:
_check(p.glob("*A"), ['dirA', 'fileA'])
else:
_check(p.glob("*A"), ['dirA', 'fileA', 'linkA'])
if symlink_skip_reason:
_check(p.glob("*B/*"), ['dirB/fileB'])
else:
_check(p.glob("*B/*"), ['dirB/fileB', 'dirB/linkD',
'linkB/fileB', 'linkB/linkD'])
if symlink_skip_reason:
_check(p.glob("*/fileB"), ['dirB/fileB'])
else:
_check(p.glob("*/fileB"), ['dirB/fileB', 'linkB/fileB'])
def test_rglob_common(self):
def _check(glob, expected):
self.assertEqual(set(glob), { P(BASE, q) for q in expected })
P = self.cls
p = P(BASE)
it = p.rglob("fileA")
self.assertIsInstance(it, collections.Iterator)
# XXX cannot test because of symlink loops in the test setup
#_check(it, ["fileA"])
#_check(p.rglob("fileB"), ["dirB/fileB"])
#_check(p.rglob("*/fileA"), [""])
#_check(p.rglob("*/fileB"), ["dirB/fileB"])
#_check(p.rglob("file*"), ["fileA", "dirB/fileB"])
# No symlink loops here
p = P(BASE, "dirC")
_check(p.rglob("file*"), ["dirC/fileC", "dirC/dirD/fileD"])
_check(p.rglob("*/*"), ["dirC/dirD/fileD"])
def test_glob_dotdot(self):
# ".." is not special in globs
P = self.cls
p = P(BASE)
self.assertEqual(set(p.glob("..")), { P(BASE, "..") })
self.assertEqual(set(p.glob("dirA/../file*")), { P(BASE, "dirA/../fileA") })
self.assertEqual(set(p.glob("../xyzzy")), set())
def _check_resolve_relative(self, p, expected):
q = p.resolve()
self.assertEqual(q, expected)
def _check_resolve_absolute(self, p, expected):
q = p.resolve()
self.assertEqual(q, expected)
@with_symlinks
def test_resolve_common(self):
P = self.cls
p = P(BASE, 'foo')
with self.assertRaises(OSError) as cm:
p.resolve()
self.assertEqual(cm.exception.errno, errno.ENOENT)
# These are all relative symlinks
p = P(BASE, 'dirB', 'fileB')
self._check_resolve_relative(p, p)
p = P(BASE, 'linkA')
self._check_resolve_relative(p, P(BASE, 'fileA'))
p = P(BASE, 'dirA', 'linkC', 'fileB')
self._check_resolve_relative(p, P(BASE, 'dirB', 'fileB'))
p = P(BASE, 'dirB', 'linkD', 'fileB')
self._check_resolve_relative(p, P(BASE, 'dirB', 'fileB'))
# Now create absolute symlinks
d = tempfile.mkdtemp(suffix='-dirD')
self.addCleanup(support.rmtree, d)
os.symlink(os.path.join(d), join('dirA', 'linkX'))
os.symlink(join('dirB'), os.path.join(d, 'linkY'))
p = P(BASE, 'dirA', 'linkX', 'linkY', 'fileB')
self._check_resolve_absolute(p, P(BASE, 'dirB', 'fileB'))
@with_symlinks
def test_resolve_dot(self):
# See https://bitbucket.org/pitrou/pathlib/issue/9/pathresolve-fails-on-complex-symlinks
p = self.cls(BASE)
self.dirlink('.', join('0'))
self.dirlink(os.path.join('0', '0'), join('1'))
self.dirlink(os.path.join('1', '1'), join('2'))
q = p / '2'
self.assertEqual(q.resolve(), p)
def test_with(self):
p = self.cls(BASE)
it = p.iterdir()
it2 = p.iterdir()
next(it2)
with p:
pass
# I/O operation on closed path
self.assertRaises(ValueError, next, it)
self.assertRaises(ValueError, next, it2)
self.assertRaises(ValueError, p.open)
self.assertRaises(ValueError, p.resolve)
self.assertRaises(ValueError, p.absolute)
self.assertRaises(ValueError, p.__enter__)
def test_chmod(self):
p = self.cls(BASE) / 'fileA'
mode = p.stat().st_mode
# Clear writable bit
new_mode = mode & ~0o222
p.chmod(new_mode)
self.assertEqual(p.stat().st_mode, new_mode)
# Set writable bit
new_mode = mode | 0o222
p.chmod(new_mode)
self.assertEqual(p.stat().st_mode, new_mode)
# XXX also need a test for lchmod
def test_stat(self):
p = self.cls(BASE) / 'fileA'
st = p.stat()
self.assertEqual(p.stat(), st)
# Change file mode by flipping write bit
p.chmod(st.st_mode ^ 0o222)
self.addCleanup(p.chmod, st.st_mode)
self.assertNotEqual(p.stat(), st)
@with_symlinks
def test_lstat(self):
p = self.cls(BASE)/ 'linkA'
st = p.stat()
self.assertNotEqual(st, p.lstat())
def test_lstat_nosymlink(self):
p = self.cls(BASE) / 'fileA'
st = p.stat()
self.assertEqual(st, p.lstat())
@unittest.skipUnless(pwd, "the pwd module is needed for this test")
def test_owner(self):
p = self.cls(BASE) / 'fileA'
uid = p.stat().st_uid
try:
name = pwd.getpwuid(uid).pw_name
except KeyError:
self.skipTest(
"user %d doesn't have an entry in the system database" % uid)
self.assertEqual(name, p.owner())
@unittest.skipUnless(grp, "the grp module is needed for this test")
def test_group(self):
p = self.cls(BASE) / 'fileA'
gid = p.stat().st_gid
try:
name = grp.getgrgid(gid).gr_name
except KeyError:
self.skipTest(
"group %d doesn't have an entry in the system database" % gid)
self.assertEqual(name, p.group())
def test_unlink(self):
p = self.cls(BASE) / 'fileA'
p.unlink()
self.assertFileNotFound(p.stat)
self.assertFileNotFound(p.unlink)
def test_rmdir(self):
p = self.cls(BASE) / 'dirA'
for q in p.iterdir():
q.unlink()
p.rmdir()
self.assertFileNotFound(p.stat)
self.assertFileNotFound(p.unlink)
def test_rename(self):
P = self.cls(BASE)
p = P / 'fileA'
size = p.stat().st_size
# Renaming to another path
q = P / 'dirA' / 'fileAA'
p.rename(q)
self.assertEqual(q.stat().st_size, size)
self.assertFileNotFound(p.stat)
# Renaming to a str of a relative path
r = rel_join('fileAAA')
q.rename(r)
self.assertEqual(os.stat(r).st_size, size)
self.assertFileNotFound(q.stat)
def test_replace(self):
P = self.cls(BASE)
p = P / 'fileA'
size = p.stat().st_size
# Replacing a non-existing path
q = P / 'dirA' / 'fileAA'
p.replace(q)
self.assertEqual(q.stat().st_size, size)
self.assertFileNotFound(p.stat)
# Replacing another (existing) path
r = rel_join('dirB', 'fileB')
q.replace(r)
self.assertEqual(os.stat(r).st_size, size)
self.assertFileNotFound(q.stat)
def test_touch_common(self):
P = self.cls(BASE)
p = P / 'newfileA'
self.assertFalse(p.exists())
p.touch()
self.assertTrue(p.exists())
st = p.stat()
old_mtime = st.st_mtime
old_mtime_ns = st.st_mtime_ns
# Rewind the mtime sufficiently far in the past to work around
# filesystem-specific timestamp granularity.
os.utime(str(p), (old_mtime - 10, old_mtime - 10))
# The file mtime should be refreshed by calling touch() again
p.touch()
st = p.stat()
self.assertGreaterEqual(st.st_mtime_ns, old_mtime_ns)
self.assertGreaterEqual(st.st_mtime, old_mtime)
# Now with exist_ok=False
p = P / 'newfileB'
self.assertFalse(p.exists())
p.touch(mode=0o700, exist_ok=False)
self.assertTrue(p.exists())
self.assertRaises(OSError, p.touch, exist_ok=False)
def test_touch_nochange(self):
P = self.cls(BASE)
p = P / 'fileA'
p.touch()
with p.open('rb') as f:
self.assertEqual(f.read().strip(), b"this is file A")
def test_mkdir(self):
P = self.cls(BASE)
p = P / 'newdirA'
self.assertFalse(p.exists())
p.mkdir()
self.assertTrue(p.exists())
self.assertTrue(p.is_dir())
with self.assertRaises(OSError) as cm:
p.mkdir()
self.assertEqual(cm.exception.errno, errno.EEXIST)
def test_mkdir_parents(self):
# Creating a chain of directories
p = self.cls(BASE, 'newdirB', 'newdirC')
self.assertFalse(p.exists())
with self.assertRaises(OSError) as cm:
p.mkdir()
self.assertEqual(cm.exception.errno, errno.ENOENT)
p.mkdir(parents=True)
self.assertTrue(p.exists())
self.assertTrue(p.is_dir())
with self.assertRaises(OSError) as cm:
p.mkdir(parents=True)
self.assertEqual(cm.exception.errno, errno.EEXIST)
# test `mode` arg
mode = stat.S_IMODE(p.stat().st_mode) # default mode
p = self.cls(BASE, 'newdirD', 'newdirE')
p.mkdir(0o555, parents=True)
self.assertTrue(p.exists())
self.assertTrue(p.is_dir())
if os.name != 'nt':
# the directory's permissions follow the mode argument
self.assertEqual(stat.S_IMODE(p.stat().st_mode), 0o7555 & mode)
# the parent's permissions follow the default process settings
self.assertEqual(stat.S_IMODE(p.parent.stat().st_mode), mode)
def test_mkdir_exist_ok(self):
p = self.cls(BASE, 'dirB')
st_ctime_first = p.stat().st_ctime
self.assertTrue(p.exists())
self.assertTrue(p.is_dir())
with self.assertRaises(FileExistsError) as cm:
p.mkdir()
self.assertEqual(cm.exception.errno, errno.EEXIST)
p.mkdir(exist_ok=True)
self.assertTrue(p.exists())
self.assertEqual(p.stat().st_ctime, st_ctime_first)
def test_mkdir_exist_ok_with_parent(self):
p = self.cls(BASE, 'dirC')
self.assertTrue(p.exists())
with self.assertRaises(FileExistsError) as cm:
p.mkdir()
self.assertEqual(cm.exception.errno, errno.EEXIST)
p = p / 'newdirC'
p.mkdir(parents=True)
st_ctime_first = p.stat().st_ctime
self.assertTrue(p.exists())
with self.assertRaises(FileExistsError) as cm:
p.mkdir(parents=True)
self.assertEqual(cm.exception.errno, errno.EEXIST)
p.mkdir(parents=True, exist_ok=True)
self.assertTrue(p.exists())
self.assertEqual(p.stat().st_ctime, st_ctime_first)
def test_mkdir_with_child_file(self):
p = self.cls(BASE, 'dirB', 'fileB')
self.assertTrue(p.exists())
# An exception is raised when the last path component is an existing
# regular file, regardless of whether exist_ok is true or not.
with self.assertRaises(FileExistsError) as cm:
p.mkdir(parents=True)
self.assertEqual(cm.exception.errno, errno.EEXIST)
with self.assertRaises(FileExistsError) as cm:
p.mkdir(parents=True, exist_ok=True)
self.assertEqual(cm.exception.errno, errno.EEXIST)
def test_mkdir_no_parents_file(self):
p = self.cls(BASE, 'fileA')
self.assertTrue(p.exists())
# An exception is raised when the last path component is an existing
# regular file, regardless of whether exist_ok is true or not.
with self.assertRaises(FileExistsError) as cm:
p.mkdir()
self.assertEqual(cm.exception.errno, errno.EEXIST)
with self.assertRaises(FileExistsError) as cm:
p.mkdir(exist_ok=True)
self.assertEqual(cm.exception.errno, errno.EEXIST)
@with_symlinks
def test_symlink_to(self):
P = self.cls(BASE)
target = P / 'fileA'
# Symlinking a path target
link = P / 'dirA' / 'linkAA'
link.symlink_to(target)
self.assertEqual(link.stat(), target.stat())
self.assertNotEqual(link.lstat(), target.stat())
# Symlinking a str target
link = P / 'dirA' / 'linkAAA'
link.symlink_to(str(target))
self.assertEqual(link.stat(), target.stat())
self.assertNotEqual(link.lstat(), target.stat())
self.assertFalse(link.is_dir())
# Symlinking to a directory
target = P / 'dirB'
link = P / 'dirA' / 'linkAAAA'
link.symlink_to(target, target_is_directory=True)
self.assertEqual(link.stat(), target.stat())
self.assertNotEqual(link.lstat(), target.stat())
self.assertTrue(link.is_dir())
self.assertTrue(list(link.iterdir()))
def test_is_dir(self):
P = self.cls(BASE)
self.assertTrue((P / 'dirA').is_dir())
self.assertFalse((P / 'fileA').is_dir())
self.assertFalse((P / 'non-existing').is_dir())
self.assertFalse((P / 'fileA' / 'bah').is_dir())
if not symlink_skip_reason:
self.assertFalse((P / 'linkA').is_dir())
self.assertTrue((P / 'linkB').is_dir())
self.assertFalse((P/ 'brokenLink').is_dir())
def test_is_file(self):
P = self.cls(BASE)
self.assertTrue((P / 'fileA').is_file())
self.assertFalse((P / 'dirA').is_file())
self.assertFalse((P / 'non-existing').is_file())
self.assertFalse((P / 'fileA' / 'bah').is_file())
if not symlink_skip_reason:
self.assertTrue((P / 'linkA').is_file())
self.assertFalse((P / 'linkB').is_file())
self.assertFalse((P/ 'brokenLink').is_file())
def test_is_symlink(self):
P = self.cls(BASE)
self.assertFalse((P / 'fileA').is_symlink())
self.assertFalse((P / 'dirA').is_symlink())
self.assertFalse((P / 'non-existing').is_symlink())
self.assertFalse((P / 'fileA' / 'bah').is_symlink())
if not symlink_skip_reason:
self.assertTrue((P / 'linkA').is_symlink())
self.assertTrue((P / 'linkB').is_symlink())
self.assertTrue((P/ 'brokenLink').is_symlink())
def test_is_fifo_false(self):
P = self.cls(BASE)
self.assertFalse((P / 'fileA').is_fifo())
self.assertFalse((P / 'dirA').is_fifo())
self.assertFalse((P / 'non-existing').is_fifo())
self.assertFalse((P / 'fileA' / 'bah').is_fifo())
@unittest.skipUnless(hasattr(os, "mkfifo"), "os.mkfifo() required")
def test_is_fifo_true(self):
P = self.cls(BASE, 'myfifo')
os.mkfifo(str(P))
self.assertTrue(P.is_fifo())
self.assertFalse(P.is_socket())
self.assertFalse(P.is_file())
def test_is_socket_false(self):
P = self.cls(BASE)
self.assertFalse((P / 'fileA').is_socket())
self.assertFalse((P / 'dirA').is_socket())
self.assertFalse((P / 'non-existing').is_socket())
self.assertFalse((P / 'fileA' / 'bah').is_socket())
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
def test_is_socket_true(self):
P = self.cls(BASE, 'mysock')
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.addCleanup(sock.close)
try:
sock.bind(str(P))
except OSError as e:
if "AF_UNIX path too long" in str(e):
self.skipTest("cannot bind Unix socket: " + str(e))
self.assertTrue(P.is_socket())
self.assertFalse(P.is_fifo())
self.assertFalse(P.is_file())
def test_is_block_device_false(self):
P = self.cls(BASE)
self.assertFalse((P / 'fileA').is_block_device())
self.assertFalse((P / 'dirA').is_block_device())
self.assertFalse((P / 'non-existing').is_block_device())
self.assertFalse((P / 'fileA' / 'bah').is_block_device())
def test_is_char_device_false(self):
P = self.cls(BASE)
self.assertFalse((P / 'fileA').is_char_device())
self.assertFalse((P / 'dirA').is_char_device())
self.assertFalse((P / 'non-existing').is_char_device())
self.assertFalse((P / 'fileA' / 'bah').is_char_device())
def test_is_char_device_true(self):
# Under Unix, /dev/null should generally be a char device
P = self.cls('/dev/null')
if not P.exists():
self.skipTest("/dev/null required")
self.assertTrue(P.is_char_device())
self.assertFalse(P.is_block_device())
self.assertFalse(P.is_file())
def test_pickling_common(self):
p = self.cls(BASE, 'fileA')
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
dumped = pickle.dumps(p, proto)
pp = pickle.loads(dumped)
self.assertEqual(pp.stat(), p.stat())
def test_parts_interning(self):
P = self.cls
p = P('/usr/bin/foo')
q = P('/usr/local/bin')
# 'usr'
self.assertIs(p.parts[1], q.parts[1])
# 'bin'
self.assertIs(p.parts[2], q.parts[3])
def _check_complex_symlinks(self, link0_target):
# Test solving a non-looping chain of symlinks (issue #19887)
P = self.cls(BASE)
self.dirlink(os.path.join('link0', 'link0'), join('link1'))
self.dirlink(os.path.join('link1', 'link1'), join('link2'))
self.dirlink(os.path.join('link2', 'link2'), join('link3'))
self.dirlink(link0_target, join('link0'))
# Resolve absolute paths
p = (P / 'link0').resolve()
self.assertEqual(p, P)
self.assertEqual(str(p), BASE)
p = (P / 'link1').resolve()
self.assertEqual(p, P)
self.assertEqual(str(p), BASE)
p = (P / 'link2').resolve()
self.assertEqual(p, P)
self.assertEqual(str(p), BASE)
p = (P / 'link3').resolve()
self.assertEqual(p, P)
self.assertEqual(str(p), BASE)
# Resolve relative paths
old_path = os.getcwd()
os.chdir(BASE)
try:
p = self.cls('link0').resolve()
self.assertEqual(p, P)
self.assertEqual(str(p), BASE)
p = self.cls('link1').resolve()
self.assertEqual(p, P)
self.assertEqual(str(p), BASE)
p = self.cls('link2').resolve()
self.assertEqual(p, P)
self.assertEqual(str(p), BASE)
p = self.cls('link3').resolve()
self.assertEqual(p, P)
self.assertEqual(str(p), BASE)
finally:
os.chdir(old_path)
@with_symlinks
def test_complex_symlinks_absolute(self):
self._check_complex_symlinks(BASE)
@with_symlinks
def test_complex_symlinks_relative(self):
self._check_complex_symlinks('.')
@with_symlinks
def test_complex_symlinks_relative_dot_dot(self):
self._check_complex_symlinks(os.path.join('dirA', '..'))
class PathTest(_BasePathTest, unittest.TestCase):
cls = pathlib.Path
def test_concrete_class(self):
p = self.cls('a')
self.assertIs(type(p),
pathlib.WindowsPath if os.name == 'nt' else pathlib.PosixPath)
def test_unsupported_flavour(self):
if os.name == 'nt':
self.assertRaises(NotImplementedError, pathlib.PosixPath)
else:
self.assertRaises(NotImplementedError, pathlib.WindowsPath)
@only_posix
class PosixPathTest(_BasePathTest, unittest.TestCase):
cls = pathlib.PosixPath
def _check_symlink_loop(self, *args):
path = self.cls(*args)
with self.assertRaises(RuntimeError):
print(path.resolve())
def test_open_mode(self):
old_mask = os.umask(0)
self.addCleanup(os.umask, old_mask)
p = self.cls(BASE)
with (p / 'new_file').open('wb'):
pass
st = os.stat(join('new_file'))
self.assertEqual(stat.S_IMODE(st.st_mode), 0o666)
os.umask(0o022)
with (p / 'other_new_file').open('wb'):
pass
st = os.stat(join('other_new_file'))
self.assertEqual(stat.S_IMODE(st.st_mode), 0o644)
def test_touch_mode(self):
old_mask = os.umask(0)
self.addCleanup(os.umask, old_mask)
p = self.cls(BASE)
(p / 'new_file').touch()
st = os.stat(join('new_file'))
self.assertEqual(stat.S_IMODE(st.st_mode), 0o666)
os.umask(0o022)
(p / 'other_new_file').touch()
st = os.stat(join('other_new_file'))
self.assertEqual(stat.S_IMODE(st.st_mode), 0o644)
(p / 'masked_new_file').touch(mode=0o750)
st = os.stat(join('masked_new_file'))
self.assertEqual(stat.S_IMODE(st.st_mode), 0o750)
@with_symlinks
def test_resolve_loop(self):
# Loop detection for broken symlinks under POSIX
# Loops with relative symlinks
os.symlink('linkX/inside', join('linkX'))
self._check_symlink_loop(BASE, 'linkX')
os.symlink('linkY', join('linkY'))
self._check_symlink_loop(BASE, 'linkY')
os.symlink('linkZ/../linkZ', join('linkZ'))
self._check_symlink_loop(BASE, 'linkZ')
# Loops with absolute symlinks
os.symlink(join('linkU/inside'), join('linkU'))
self._check_symlink_loop(BASE, 'linkU')
os.symlink(join('linkV'), join('linkV'))
self._check_symlink_loop(BASE, 'linkV')
os.symlink(join('linkW/../linkW'), join('linkW'))
self._check_symlink_loop(BASE, 'linkW')
def test_glob(self):
P = self.cls
p = P(BASE)
given = set(p.glob("FILEa"))
expect = set() if not support.fs_is_case_insensitive(BASE) else given
self.assertEqual(given, expect)
self.assertEqual(set(p.glob("FILEa*")), set())
def test_rglob(self):
P = self.cls
p = P(BASE, "dirC")
given = set(p.rglob("FILEd"))
expect = set() if not support.fs_is_case_insensitive(BASE) else given
self.assertEqual(given, expect)
self.assertEqual(set(p.rglob("FILEd*")), set())
def test_expanduser(self):
P = self.cls
support.import_module('pwd')
import pwd
pwdent = pwd.getpwuid(os.getuid())
username = pwdent.pw_name
userhome = pwdent.pw_dir.rstrip('/')
# find arbitrary different user (if exists)
for pwdent in pwd.getpwall():
othername = pwdent.pw_name
otherhome = pwdent.pw_dir.rstrip('/')
if othername != username and otherhome:
break
p1 = P('~/Documents')
p2 = P('~' + username + '/Documents')
p3 = P('~' + othername + '/Documents')
p4 = P('../~' + username + '/Documents')
p5 = P('/~' + username + '/Documents')
p6 = P('')
p7 = P('~fakeuser/Documents')
with support.EnvironmentVarGuard() as env:
env.pop('HOME', None)
self.assertEqual(p1.expanduser(), P(userhome) / 'Documents')
self.assertEqual(p2.expanduser(), P(userhome) / 'Documents')
self.assertEqual(p3.expanduser(), P(otherhome) / 'Documents')
self.assertEqual(p4.expanduser(), p4)
self.assertEqual(p5.expanduser(), p5)
self.assertEqual(p6.expanduser(), p6)
self.assertRaises(RuntimeError, p7.expanduser)
env['HOME'] = '/tmp'
self.assertEqual(p1.expanduser(), P('/tmp/Documents'))
self.assertEqual(p2.expanduser(), P(userhome) / 'Documents')
self.assertEqual(p3.expanduser(), P(otherhome) / 'Documents')
self.assertEqual(p4.expanduser(), p4)
self.assertEqual(p5.expanduser(), p5)
self.assertEqual(p6.expanduser(), p6)
self.assertRaises(RuntimeError, p7.expanduser)
@only_nt
class WindowsPathTest(_BasePathTest, unittest.TestCase):
cls = pathlib.WindowsPath
def test_glob(self):
P = self.cls
p = P(BASE)
self.assertEqual(set(p.glob("FILEa")), { P(BASE, "fileA") })
def test_rglob(self):
P = self.cls
p = P(BASE, "dirC")
self.assertEqual(set(p.rglob("FILEd")), { P(BASE, "dirC/dirD/fileD") })
def test_expanduser(self):
P = self.cls
with support.EnvironmentVarGuard() as env:
env.pop('HOME', None)
env.pop('USERPROFILE', None)
env.pop('HOMEPATH', None)
env.pop('HOMEDRIVE', None)
env['USERNAME'] = 'alice'
# test that the path returns unchanged
p1 = P('~/My Documents')
p2 = P('~alice/My Documents')
p3 = P('~bob/My Documents')
p4 = P('/~/My Documents')
p5 = P('d:~/My Documents')
p6 = P('')
self.assertRaises(RuntimeError, p1.expanduser)
self.assertRaises(RuntimeError, p2.expanduser)
self.assertRaises(RuntimeError, p3.expanduser)
self.assertEqual(p4.expanduser(), p4)
self.assertEqual(p5.expanduser(), p5)
self.assertEqual(p6.expanduser(), p6)
def check():
env.pop('USERNAME', None)
self.assertEqual(p1.expanduser(),
P('C:/Users/alice/My Documents'))
self.assertRaises(KeyError, p2.expanduser)
env['USERNAME'] = 'alice'
self.assertEqual(p2.expanduser(),
P('C:/Users/alice/My Documents'))
self.assertEqual(p3.expanduser(),
P('C:/Users/bob/My Documents'))
self.assertEqual(p4.expanduser(), p4)
self.assertEqual(p5.expanduser(), p5)
self.assertEqual(p6.expanduser(), p6)
# test the first lookup key in the env vars
env['HOME'] = 'C:\\Users\\alice'
check()
# test that HOMEPATH is available instead
env.pop('HOME', None)
env['HOMEPATH'] = 'C:\\Users\\alice'
check()
env['HOMEDRIVE'] = 'C:\\'
env['HOMEPATH'] = 'Users\\alice'
check()
env.pop('HOMEDRIVE', None)
env.pop('HOMEPATH', None)
env['USERPROFILE'] = 'C:\\Users\\alice'
check()
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
bdh1011/wau | venv/lib/python2.7/site-packages/pip/commands/completion.py | 435 | 1991 | from __future__ import absolute_import
import sys
from pip.basecommand import Command
BASE_COMPLETION = """
# pip %(shell)s completion start%(script)s# pip %(shell)s completion end
"""
COMPLETION_SCRIPTS = {
'bash': """
_pip_completion()
{
COMPREPLY=( $( COMP_WORDS="${COMP_WORDS[*]}" \\
COMP_CWORD=$COMP_CWORD \\
PIP_AUTO_COMPLETE=1 $1 ) )
}
complete -o default -F _pip_completion pip
""", 'zsh': """
function _pip_completion {
local words cword
read -Ac words
read -cn cword
reply=( $( COMP_WORDS="$words[*]" \\
COMP_CWORD=$(( cword-1 )) \\
PIP_AUTO_COMPLETE=1 $words[1] ) )
}
compctl -K _pip_completion pip
"""}
class CompletionCommand(Command):
"""A helper command to be used for command completion."""
name = 'completion'
summary = 'A helper command to be used for command completion'
hidden = True
def __init__(self, *args, **kw):
super(CompletionCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'--bash', '-b',
action='store_const',
const='bash',
dest='shell',
help='Emit completion code for bash')
cmd_opts.add_option(
'--zsh', '-z',
action='store_const',
const='zsh',
dest='shell',
help='Emit completion code for zsh')
self.parser.insert_option_group(0, cmd_opts)
def run(self, options, args):
"""Prints the completion code of the given shell"""
shells = COMPLETION_SCRIPTS.keys()
shell_options = ['--' + shell for shell in sorted(shells)]
if options.shell in shells:
script = COMPLETION_SCRIPTS.get(options.shell, '')
print(BASE_COMPLETION % {'script': script, 'shell': options.shell})
else:
sys.stderr.write(
'ERROR: You must pass %s\n' % ' or '.join(shell_options)
)
| mit |
nandhp/youtube-dl | youtube_dl/extractor/izlesene.py | 117 | 4307 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
from ..utils import (
determine_ext,
float_or_none,
get_element_by_id,
int_or_none,
parse_iso8601,
str_to_int,
)
class IzleseneIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://(?:(?:www|m)\.)?izlesene\.com/
(?:video|embedplayer)/(?:[^/]+/)?(?P<id>[0-9]+)
'''
_TESTS = [
{
'url': 'http://www.izlesene.com/video/sevincten-cildirtan-dogum-gunu-hediyesi/7599694',
'md5': '4384f9f0ea65086734b881085ee05ac2',
'info_dict': {
'id': '7599694',
'ext': 'mp4',
'title': 'Sevinçten Çıldırtan Doğum Günü Hediyesi',
'description': 'md5:253753e2655dde93f59f74b572454f6d',
'thumbnail': 're:^http://.*\.jpg',
'uploader_id': 'pelikzzle',
'timestamp': int,
'upload_date': '20140702',
'duration': 95.395,
'age_limit': 0,
}
},
{
'url': 'http://www.izlesene.com/video/tarkan-dortmund-2006-konseri/17997',
'md5': '97f09b6872bffa284cb7fa4f6910cb72',
'info_dict': {
'id': '17997',
'ext': 'mp4',
'title': 'Tarkan Dortmund 2006 Konseri',
'description': 'Tarkan Dortmund 2006 Konseri',
'thumbnail': 're:^http://.*\.jpg',
'uploader_id': 'parlayankiz',
'timestamp': int,
'upload_date': '20061112',
'duration': 253.666,
'age_limit': 0,
}
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
url = 'http://www.izlesene.com/video/%s' % video_id
webpage = self._download_webpage(url, video_id)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
thumbnail = self._proto_relative_url(
self._og_search_thumbnail(webpage), scheme='http:')
uploader = self._html_search_regex(
r"adduserUsername\s*=\s*'([^']+)';",
webpage, 'uploader', fatal=False)
timestamp = parse_iso8601(self._html_search_meta(
'uploadDate', webpage, 'upload date'))
duration = float_or_none(self._html_search_regex(
r'"videoduration"\s*:\s*"([^"]+)"',
webpage, 'duration', fatal=False), scale=1000)
view_count = str_to_int(get_element_by_id('videoViewCount', webpage))
comment_count = self._html_search_regex(
r'comment_count\s*=\s*\'([^\']+)\';',
webpage, 'comment_count', fatal=False)
content_url = self._html_search_meta(
'contentURL', webpage, 'content URL', fatal=False)
ext = determine_ext(content_url, 'mp4')
# Might be empty for some videos.
streams = self._html_search_regex(
r'"qualitylevel"\s*:\s*"([^"]+)"', webpage, 'streams', default='')
formats = []
if streams:
for stream in streams.split('|'):
quality, url = re.search(r'\[(\w+)\](.+)', stream).groups()
formats.append({
'format_id': '%sp' % quality if quality else 'sd',
'url': compat_urllib_parse_unquote(url),
'ext': ext,
})
else:
stream_url = self._search_regex(
r'"streamurl"\s*:\s*"([^"]+)"', webpage, 'stream URL')
formats.append({
'format_id': 'sd',
'url': compat_urllib_parse_unquote(stream_url),
'ext': ext,
})
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader_id': uploader,
'timestamp': timestamp,
'duration': duration,
'view_count': int_or_none(view_count),
'comment_count': int_or_none(comment_count),
'age_limit': self._family_friendly_search(webpage),
'formats': formats,
}
| unlicense |
liorvh/golismero | thirdparty_libs/django/db/models/deletion.py | 106 | 13744 | from functools import wraps
from operator import attrgetter
from django.db import connections, transaction, IntegrityError
from django.db.models import signals, sql
from django.utils.datastructures import SortedDict
from django.utils import six
class ProtectedError(IntegrityError):
def __init__(self, msg, protected_objects):
self.protected_objects = protected_objects
super(ProtectedError, self).__init__(msg, protected_objects)
def CASCADE(collector, field, sub_objs, using):
collector.collect(sub_objs, source=field.rel.to,
source_attr=field.name, nullable=field.null)
if field.null and not connections[using].features.can_defer_constraint_checks:
collector.add_field_update(field, None, sub_objs)
def PROTECT(collector, field, sub_objs, using):
raise ProtectedError("Cannot delete some instances of model '%s' because "
"they are referenced through a protected foreign key: '%s.%s'" % (
field.rel.to.__name__, sub_objs[0].__class__.__name__, field.name
),
sub_objs
)
def SET(value):
if callable(value):
def set_on_delete(collector, field, sub_objs, using):
collector.add_field_update(field, value(), sub_objs)
else:
def set_on_delete(collector, field, sub_objs, using):
collector.add_field_update(field, value, sub_objs)
return set_on_delete
SET_NULL = SET(None)
def SET_DEFAULT(collector, field, sub_objs, using):
collector.add_field_update(field, field.get_default(), sub_objs)
def DO_NOTHING(collector, field, sub_objs, using):
pass
def force_managed(func):
@wraps(func)
def decorated(self, *args, **kwargs):
if not transaction.is_managed(using=self.using):
transaction.enter_transaction_management(using=self.using)
forced_managed = True
else:
forced_managed = False
try:
func(self, *args, **kwargs)
if forced_managed:
transaction.commit(using=self.using)
else:
transaction.commit_unless_managed(using=self.using)
finally:
if forced_managed:
transaction.leave_transaction_management(using=self.using)
return decorated
class Collector(object):
def __init__(self, using):
self.using = using
# Initially, {model: set([instances])}, later values become lists.
self.data = {}
self.batches = {} # {model: {field: set([instances])}}
self.field_updates = {} # {model: {(field, value): set([instances])}}
# fast_deletes is a list of queryset-likes that can be deleted without
# fetching the objects into memory.
self.fast_deletes = []
# Tracks deletion-order dependency for databases without transactions
# or ability to defer constraint checks. Only concrete model classes
# should be included, as the dependencies exist only between actual
# database tables; proxy models are represented here by their concrete
# parent.
self.dependencies = {} # {model: set([models])}
def add(self, objs, source=None, nullable=False, reverse_dependency=False):
"""
Adds 'objs' to the collection of objects to be deleted. If the call is
the result of a cascade, 'source' should be the model that caused it,
and 'nullable' should be set to True if the relation can be null.
Returns a list of all objects that were not already collected.
"""
if not objs:
return []
new_objs = []
model = objs[0].__class__
instances = self.data.setdefault(model, set())
for obj in objs:
if obj not in instances:
new_objs.append(obj)
instances.update(new_objs)
# Nullable relationships can be ignored -- they are nulled out before
# deleting, and therefore do not affect the order in which objects have
# to be deleted.
if source is not None and not nullable:
if reverse_dependency:
source, model = model, source
self.dependencies.setdefault(
source._meta.concrete_model, set()).add(model._meta.concrete_model)
return new_objs
def add_batch(self, model, field, objs):
"""
Schedules a batch delete. Every instance of 'model' that is related to
an instance of 'obj' through 'field' will be deleted.
"""
self.batches.setdefault(model, {}).setdefault(field, set()).update(objs)
def add_field_update(self, field, value, objs):
"""
Schedules a field update. 'objs' must be a homogenous iterable
collection of model instances (e.g. a QuerySet).
"""
if not objs:
return
model = objs[0].__class__
self.field_updates.setdefault(
model, {}).setdefault(
(field, value), set()).update(objs)
def can_fast_delete(self, objs, from_field=None):
"""
Determines if the objects in the given queryset-like can be
fast-deleted. This can be done if there are no cascades, no
parents and no signal listeners for the object class.
The 'from_field' tells where we are coming from - we need this to
determine if the objects are in fact to be deleted. Allows also
skipping parent -> child -> parent chain preventing fast delete of
the child.
"""
if from_field and from_field.rel.on_delete is not CASCADE:
return False
if not (hasattr(objs, 'model') and hasattr(objs, '_raw_delete')):
return False
model = objs.model
if (signals.pre_delete.has_listeners(model)
or signals.post_delete.has_listeners(model)
or signals.m2m_changed.has_listeners(model)):
return False
# The use of from_field comes from the need to avoid cascade back to
# parent when parent delete is cascading to child.
opts = model._meta
if any(link != from_field for link in opts.concrete_model._meta.parents.values()):
return False
# Foreign keys pointing to this model, both from m2m and other
# models.
for related in opts.get_all_related_objects(
include_hidden=True, include_proxy_eq=True):
if related.field.rel.on_delete is not DO_NOTHING:
return False
# GFK deletes
for relation in opts.many_to_many:
if not relation.rel.through:
return False
return True
def collect(self, objs, source=None, nullable=False, collect_related=True,
source_attr=None, reverse_dependency=False):
"""
Adds 'objs' to the collection of objects to be deleted as well as all
parent instances. 'objs' must be a homogenous iterable collection of
model instances (e.g. a QuerySet). If 'collect_related' is True,
related objects will be handled by their respective on_delete handler.
If the call is the result of a cascade, 'source' should be the model
that caused it and 'nullable' should be set to True, if the relation
can be null.
If 'reverse_dependency' is True, 'source' will be deleted before the
current model, rather than after. (Needed for cascading to parent
models, the one case in which the cascade follows the forwards
direction of an FK rather than the reverse direction.)
"""
if self.can_fast_delete(objs):
self.fast_deletes.append(objs)
return
new_objs = self.add(objs, source, nullable,
reverse_dependency=reverse_dependency)
if not new_objs:
return
model = new_objs[0].__class__
# Recursively collect concrete model's parent models, but not their
# related objects. These will be found by meta.get_all_related_objects()
concrete_model = model._meta.concrete_model
for ptr in six.itervalues(concrete_model._meta.parents):
if ptr:
# FIXME: This seems to be buggy and execute a query for each
# parent object fetch. We have the parent data in the obj,
# but we don't have a nice way to turn that data into parent
# object instance.
parent_objs = [getattr(obj, ptr.name) for obj in new_objs]
self.collect(parent_objs, source=model,
source_attr=ptr.rel.related_name,
collect_related=False,
reverse_dependency=True)
if collect_related:
for related in model._meta.get_all_related_objects(
include_hidden=True, include_proxy_eq=True):
field = related.field
if field.rel.on_delete == DO_NOTHING:
continue
sub_objs = self.related_objects(related, new_objs)
if self.can_fast_delete(sub_objs, from_field=field):
self.fast_deletes.append(sub_objs)
elif sub_objs:
field.rel.on_delete(self, field, sub_objs, self.using)
# TODO This entire block is only needed as a special case to
# support cascade-deletes for GenericRelation. It should be
# removed/fixed when the ORM gains a proper abstraction for virtual
# or composite fields, and GFKs are reworked to fit into that.
for relation in model._meta.many_to_many:
if not relation.rel.through:
sub_objs = relation.bulk_related_objects(new_objs, self.using)
self.collect(sub_objs,
source=model,
source_attr=relation.rel.related_name,
nullable=True)
def related_objects(self, related, objs):
"""
Gets a QuerySet of objects related to ``objs`` via the relation ``related``.
"""
return related.model._base_manager.using(self.using).filter(
**{"%s__in" % related.field.name: objs}
)
def instances_with_model(self):
for model, instances in six.iteritems(self.data):
for obj in instances:
yield model, obj
def sort(self):
sorted_models = []
concrete_models = set()
models = list(self.data)
while len(sorted_models) < len(models):
found = False
for model in models:
if model in sorted_models:
continue
dependencies = self.dependencies.get(model._meta.concrete_model)
if not (dependencies and dependencies.difference(concrete_models)):
sorted_models.append(model)
concrete_models.add(model._meta.concrete_model)
found = True
if not found:
return
self.data = SortedDict([(model, self.data[model])
for model in sorted_models])
@force_managed
def delete(self):
# sort instance collections
for model, instances in self.data.items():
self.data[model] = sorted(instances, key=attrgetter("pk"))
# if possible, bring the models in an order suitable for databases that
# don't support transactions or cannot defer constraint checks until the
# end of a transaction.
self.sort()
# send pre_delete signals
for model, obj in self.instances_with_model():
if not model._meta.auto_created:
signals.pre_delete.send(
sender=model, instance=obj, using=self.using
)
# fast deletes
for qs in self.fast_deletes:
qs._raw_delete(using=self.using)
# update fields
for model, instances_for_fieldvalues in six.iteritems(self.field_updates):
query = sql.UpdateQuery(model)
for (field, value), instances in six.iteritems(instances_for_fieldvalues):
query.update_batch([obj.pk for obj in instances],
{field.name: value}, self.using)
# reverse instance collections
for instances in six.itervalues(self.data):
instances.reverse()
# delete batches
for model, batches in six.iteritems(self.batches):
query = sql.DeleteQuery(model)
for field, instances in six.iteritems(batches):
query.delete_batch([obj.pk for obj in instances], self.using, field)
# delete instances
for model, instances in six.iteritems(self.data):
query = sql.DeleteQuery(model)
pk_list = [obj.pk for obj in instances]
query.delete_batch(pk_list, self.using)
# send post_delete signals
for model, obj in self.instances_with_model():
if not model._meta.auto_created:
signals.post_delete.send(
sender=model, instance=obj, using=self.using
)
# update collected instances
for model, instances_for_fieldvalues in six.iteritems(self.field_updates):
for (field, value), instances in six.iteritems(instances_for_fieldvalues):
for obj in instances:
setattr(obj, field.attname, value)
for model, instances in six.iteritems(self.data):
for instance in instances:
setattr(instance, model._meta.pk.attname, None)
| gpl-2.0 |
prefetchnta/questlab | bin/python/Lib/encodings/mac_farsi.py | 272 | 15170 | """ Python Character Mapping Codec mac_farsi generated from 'MAPPINGS/VENDORS/APPLE/FARSI.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-farsi',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> CONTROL CHARACTER
'\x01' # 0x01 -> CONTROL CHARACTER
'\x02' # 0x02 -> CONTROL CHARACTER
'\x03' # 0x03 -> CONTROL CHARACTER
'\x04' # 0x04 -> CONTROL CHARACTER
'\x05' # 0x05 -> CONTROL CHARACTER
'\x06' # 0x06 -> CONTROL CHARACTER
'\x07' # 0x07 -> CONTROL CHARACTER
'\x08' # 0x08 -> CONTROL CHARACTER
'\t' # 0x09 -> CONTROL CHARACTER
'\n' # 0x0A -> CONTROL CHARACTER
'\x0b' # 0x0B -> CONTROL CHARACTER
'\x0c' # 0x0C -> CONTROL CHARACTER
'\r' # 0x0D -> CONTROL CHARACTER
'\x0e' # 0x0E -> CONTROL CHARACTER
'\x0f' # 0x0F -> CONTROL CHARACTER
'\x10' # 0x10 -> CONTROL CHARACTER
'\x11' # 0x11 -> CONTROL CHARACTER
'\x12' # 0x12 -> CONTROL CHARACTER
'\x13' # 0x13 -> CONTROL CHARACTER
'\x14' # 0x14 -> CONTROL CHARACTER
'\x15' # 0x15 -> CONTROL CHARACTER
'\x16' # 0x16 -> CONTROL CHARACTER
'\x17' # 0x17 -> CONTROL CHARACTER
'\x18' # 0x18 -> CONTROL CHARACTER
'\x19' # 0x19 -> CONTROL CHARACTER
'\x1a' # 0x1A -> CONTROL CHARACTER
'\x1b' # 0x1B -> CONTROL CHARACTER
'\x1c' # 0x1C -> CONTROL CHARACTER
'\x1d' # 0x1D -> CONTROL CHARACTER
'\x1e' # 0x1E -> CONTROL CHARACTER
'\x1f' # 0x1F -> CONTROL CHARACTER
' ' # 0x20 -> SPACE, left-right
'!' # 0x21 -> EXCLAMATION MARK, left-right
'"' # 0x22 -> QUOTATION MARK, left-right
'#' # 0x23 -> NUMBER SIGN, left-right
'$' # 0x24 -> DOLLAR SIGN, left-right
'%' # 0x25 -> PERCENT SIGN, left-right
'&' # 0x26 -> AMPERSAND, left-right
"'" # 0x27 -> APOSTROPHE, left-right
'(' # 0x28 -> LEFT PARENTHESIS, left-right
')' # 0x29 -> RIGHT PARENTHESIS, left-right
'*' # 0x2A -> ASTERISK, left-right
'+' # 0x2B -> PLUS SIGN, left-right
',' # 0x2C -> COMMA, left-right; in Arabic-script context, displayed as 0x066C ARABIC THOUSANDS SEPARATOR
'-' # 0x2D -> HYPHEN-MINUS, left-right
'.' # 0x2E -> FULL STOP, left-right; in Arabic-script context, displayed as 0x066B ARABIC DECIMAL SEPARATOR
'/' # 0x2F -> SOLIDUS, left-right
'0' # 0x30 -> DIGIT ZERO; in Arabic-script context, displayed as 0x06F0 EXTENDED ARABIC-INDIC DIGIT ZERO
'1' # 0x31 -> DIGIT ONE; in Arabic-script context, displayed as 0x06F1 EXTENDED ARABIC-INDIC DIGIT ONE
'2' # 0x32 -> DIGIT TWO; in Arabic-script context, displayed as 0x06F2 EXTENDED ARABIC-INDIC DIGIT TWO
'3' # 0x33 -> DIGIT THREE; in Arabic-script context, displayed as 0x06F3 EXTENDED ARABIC-INDIC DIGIT THREE
'4' # 0x34 -> DIGIT FOUR; in Arabic-script context, displayed as 0x06F4 EXTENDED ARABIC-INDIC DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE; in Arabic-script context, displayed as 0x06F5 EXTENDED ARABIC-INDIC DIGIT FIVE
'6' # 0x36 -> DIGIT SIX; in Arabic-script context, displayed as 0x06F6 EXTENDED ARABIC-INDIC DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN; in Arabic-script context, displayed as 0x06F7 EXTENDED ARABIC-INDIC DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT; in Arabic-script context, displayed as 0x06F8 EXTENDED ARABIC-INDIC DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE; in Arabic-script context, displayed as 0x06F9 EXTENDED ARABIC-INDIC DIGIT NINE
':' # 0x3A -> COLON, left-right
';' # 0x3B -> SEMICOLON, left-right
'<' # 0x3C -> LESS-THAN SIGN, left-right
'=' # 0x3D -> EQUALS SIGN, left-right
'>' # 0x3E -> GREATER-THAN SIGN, left-right
'?' # 0x3F -> QUESTION MARK, left-right
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET, left-right
'\\' # 0x5C -> REVERSE SOLIDUS, left-right
']' # 0x5D -> RIGHT SQUARE BRACKET, left-right
'^' # 0x5E -> CIRCUMFLEX ACCENT, left-right
'_' # 0x5F -> LOW LINE, left-right
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET, left-right
'|' # 0x7C -> VERTICAL LINE, left-right
'}' # 0x7D -> RIGHT CURLY BRACKET, left-right
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> CONTROL CHARACTER
'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xa0' # 0x81 -> NO-BREAK SPACE, right-left
'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
'\u06ba' # 0x8B -> ARABIC LETTER NOON GHUNNA
'\xab' # 0x8C -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
'\u2026' # 0x93 -> HORIZONTAL ELLIPSIS, right-left
'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
'\xbb' # 0x98 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0x9B -> DIVISION SIGN, right-left
'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
' ' # 0xA0 -> SPACE, right-left
'!' # 0xA1 -> EXCLAMATION MARK, right-left
'"' # 0xA2 -> QUOTATION MARK, right-left
'#' # 0xA3 -> NUMBER SIGN, right-left
'$' # 0xA4 -> DOLLAR SIGN, right-left
'\u066a' # 0xA5 -> ARABIC PERCENT SIGN
'&' # 0xA6 -> AMPERSAND, right-left
"'" # 0xA7 -> APOSTROPHE, right-left
'(' # 0xA8 -> LEFT PARENTHESIS, right-left
')' # 0xA9 -> RIGHT PARENTHESIS, right-left
'*' # 0xAA -> ASTERISK, right-left
'+' # 0xAB -> PLUS SIGN, right-left
'\u060c' # 0xAC -> ARABIC COMMA
'-' # 0xAD -> HYPHEN-MINUS, right-left
'.' # 0xAE -> FULL STOP, right-left
'/' # 0xAF -> SOLIDUS, right-left
'\u06f0' # 0xB0 -> EXTENDED ARABIC-INDIC DIGIT ZERO, right-left (need override)
'\u06f1' # 0xB1 -> EXTENDED ARABIC-INDIC DIGIT ONE, right-left (need override)
'\u06f2' # 0xB2 -> EXTENDED ARABIC-INDIC DIGIT TWO, right-left (need override)
'\u06f3' # 0xB3 -> EXTENDED ARABIC-INDIC DIGIT THREE, right-left (need override)
'\u06f4' # 0xB4 -> EXTENDED ARABIC-INDIC DIGIT FOUR, right-left (need override)
'\u06f5' # 0xB5 -> EXTENDED ARABIC-INDIC DIGIT FIVE, right-left (need override)
'\u06f6' # 0xB6 -> EXTENDED ARABIC-INDIC DIGIT SIX, right-left (need override)
'\u06f7' # 0xB7 -> EXTENDED ARABIC-INDIC DIGIT SEVEN, right-left (need override)
'\u06f8' # 0xB8 -> EXTENDED ARABIC-INDIC DIGIT EIGHT, right-left (need override)
'\u06f9' # 0xB9 -> EXTENDED ARABIC-INDIC DIGIT NINE, right-left (need override)
':' # 0xBA -> COLON, right-left
'\u061b' # 0xBB -> ARABIC SEMICOLON
'<' # 0xBC -> LESS-THAN SIGN, right-left
'=' # 0xBD -> EQUALS SIGN, right-left
'>' # 0xBE -> GREATER-THAN SIGN, right-left
'\u061f' # 0xBF -> ARABIC QUESTION MARK
'\u274a' # 0xC0 -> EIGHT TEARDROP-SPOKED PROPELLER ASTERISK, right-left
'\u0621' # 0xC1 -> ARABIC LETTER HAMZA
'\u0622' # 0xC2 -> ARABIC LETTER ALEF WITH MADDA ABOVE
'\u0623' # 0xC3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE
'\u0624' # 0xC4 -> ARABIC LETTER WAW WITH HAMZA ABOVE
'\u0625' # 0xC5 -> ARABIC LETTER ALEF WITH HAMZA BELOW
'\u0626' # 0xC6 -> ARABIC LETTER YEH WITH HAMZA ABOVE
'\u0627' # 0xC7 -> ARABIC LETTER ALEF
'\u0628' # 0xC8 -> ARABIC LETTER BEH
'\u0629' # 0xC9 -> ARABIC LETTER TEH MARBUTA
'\u062a' # 0xCA -> ARABIC LETTER TEH
'\u062b' # 0xCB -> ARABIC LETTER THEH
'\u062c' # 0xCC -> ARABIC LETTER JEEM
'\u062d' # 0xCD -> ARABIC LETTER HAH
'\u062e' # 0xCE -> ARABIC LETTER KHAH
'\u062f' # 0xCF -> ARABIC LETTER DAL
'\u0630' # 0xD0 -> ARABIC LETTER THAL
'\u0631' # 0xD1 -> ARABIC LETTER REH
'\u0632' # 0xD2 -> ARABIC LETTER ZAIN
'\u0633' # 0xD3 -> ARABIC LETTER SEEN
'\u0634' # 0xD4 -> ARABIC LETTER SHEEN
'\u0635' # 0xD5 -> ARABIC LETTER SAD
'\u0636' # 0xD6 -> ARABIC LETTER DAD
'\u0637' # 0xD7 -> ARABIC LETTER TAH
'\u0638' # 0xD8 -> ARABIC LETTER ZAH
'\u0639' # 0xD9 -> ARABIC LETTER AIN
'\u063a' # 0xDA -> ARABIC LETTER GHAIN
'[' # 0xDB -> LEFT SQUARE BRACKET, right-left
'\\' # 0xDC -> REVERSE SOLIDUS, right-left
']' # 0xDD -> RIGHT SQUARE BRACKET, right-left
'^' # 0xDE -> CIRCUMFLEX ACCENT, right-left
'_' # 0xDF -> LOW LINE, right-left
'\u0640' # 0xE0 -> ARABIC TATWEEL
'\u0641' # 0xE1 -> ARABIC LETTER FEH
'\u0642' # 0xE2 -> ARABIC LETTER QAF
'\u0643' # 0xE3 -> ARABIC LETTER KAF
'\u0644' # 0xE4 -> ARABIC LETTER LAM
'\u0645' # 0xE5 -> ARABIC LETTER MEEM
'\u0646' # 0xE6 -> ARABIC LETTER NOON
'\u0647' # 0xE7 -> ARABIC LETTER HEH
'\u0648' # 0xE8 -> ARABIC LETTER WAW
'\u0649' # 0xE9 -> ARABIC LETTER ALEF MAKSURA
'\u064a' # 0xEA -> ARABIC LETTER YEH
'\u064b' # 0xEB -> ARABIC FATHATAN
'\u064c' # 0xEC -> ARABIC DAMMATAN
'\u064d' # 0xED -> ARABIC KASRATAN
'\u064e' # 0xEE -> ARABIC FATHA
'\u064f' # 0xEF -> ARABIC DAMMA
'\u0650' # 0xF0 -> ARABIC KASRA
'\u0651' # 0xF1 -> ARABIC SHADDA
'\u0652' # 0xF2 -> ARABIC SUKUN
'\u067e' # 0xF3 -> ARABIC LETTER PEH
'\u0679' # 0xF4 -> ARABIC LETTER TTEH
'\u0686' # 0xF5 -> ARABIC LETTER TCHEH
'\u06d5' # 0xF6 -> ARABIC LETTER AE
'\u06a4' # 0xF7 -> ARABIC LETTER VEH
'\u06af' # 0xF8 -> ARABIC LETTER GAF
'\u0688' # 0xF9 -> ARABIC LETTER DDAL
'\u0691' # 0xFA -> ARABIC LETTER RREH
'{' # 0xFB -> LEFT CURLY BRACKET, right-left
'|' # 0xFC -> VERTICAL LINE, right-left
'}' # 0xFD -> RIGHT CURLY BRACKET, right-left
'\u0698' # 0xFE -> ARABIC LETTER JEH
'\u06d2' # 0xFF -> ARABIC LETTER YEH BARREE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| lgpl-2.1 |
xbmc/atv2 | xbmc/lib/libPython/Python/Mac/Modules/drag/dragsupport.py | 39 | 9501 | # This script generates a Python interface for an Apple Macintosh Manager.
# It uses the "bgen" package to generate C code.
# The function specifications are generated by scanning the mamager's header file,
# using the "scantools" package (customized for this particular manager).
import string
# Declarations that change for each manager
MACHEADERFILE = 'Drag.h' # The Apple header file
MODNAME = '_Drag' # The name of the module
OBJECTNAME = 'DragObj' # The basic name of the objects used here
# The following is *usually* unchanged but may still require tuning
MODPREFIX = 'Drag' # The prefix for module-wide routines
OBJECTTYPE = 'DragRef' # The C type used to represent them
OBJECTPREFIX = MODPREFIX + 'Obj' # The prefix for object methods
INPUTFILE = string.lower(MODPREFIX) + 'gen.py' # The file generated by the scanner
OUTPUTFILE = MODNAME + "module.c" # The file generated by this program
from macsupport import *
# Create the type objects
DragRef = OpaqueByValueType(OBJECTTYPE, OBJECTPREFIX)
DragItemRef = Type("ItemReference", "l")
# Old names
DragReference = DragRef
ItemReference = DragItemRef
PixMapHandle = OpaqueByValueType("PixMapHandle", "ResObj")
RgnHandle = OpaqueByValueType("RgnHandle", "ResObj")
AEDesc = OpaqueType('AEDesc')
AEDesc_ptr = AEDesc
RGBColor = OpaqueType("RGBColor", "QdRGB")
FlavorType = OSTypeType("FlavorType")
DragAttributes = Type("DragAttributes", "l")
DragBehaviors = Type("DragBehaviors", "l")
DragImageFlags = Type("DragImageFlags", "l")
DragImageTranslucency = Type("DragImageTranslucency", "l")
DragRegionMessage = Type("DragRegionMessage", "h")
ZoomAcceleration = Type("ZoomAcceleration", "h")
FlavorFlags = Type("FlavorFlags", "l")
DragTrackingMessage = Type("DragTrackingMessage", "h")
includestuff = includestuff + """
#include <Carbon/Carbon.h>
/* Callback glue routines */
DragTrackingHandlerUPP dragglue_TrackingHandlerUPP;
DragReceiveHandlerUPP dragglue_ReceiveHandlerUPP;
DragSendDataUPP dragglue_SendDataUPP;
#if 0
DragInputUPP dragglue_InputUPP;
DragDrawingUPP dragglue_DrawingUPP;
#endif
#ifdef USE_TOOLBOX_OBJECT_GLUE
extern PyObject *_DragObj_New(DragRef);
extern int _DragObj_Convert(PyObject *, DragRef *);
#define DragObj_New _DragObj_New
#define DragObj_Convert _DragObj_Convert
#endif
"""
finalstuff = finalstuff + """
static pascal OSErr
dragglue_TrackingHandler(DragTrackingMessage theMessage, WindowPtr theWindow,
void *handlerRefCon, DragReference theDrag)
{
PyObject *args, *rv;
int i;
args = Py_BuildValue("hO&O&", theMessage, DragObj_New, theDrag, WinObj_WhichWindow, theWindow);
if ( args == NULL )
return -1;
rv = PyEval_CallObject((PyObject *)handlerRefCon, args);
Py_DECREF(args);
if ( rv == NULL ) {
PySys_WriteStderr("Drag: Exception in TrackingHandler\\n");
PyErr_Print();
return -1;
}
i = -1;
if ( rv == Py_None )
i = 0;
else
PyArg_Parse(rv, "l", &i);
Py_DECREF(rv);
return i;
}
static pascal OSErr
dragglue_ReceiveHandler(WindowPtr theWindow, void *handlerRefCon,
DragReference theDrag)
{
PyObject *args, *rv;
int i;
args = Py_BuildValue("O&O&", DragObj_New, theDrag, WinObj_WhichWindow, theWindow);
if ( args == NULL )
return -1;
rv = PyEval_CallObject((PyObject *)handlerRefCon, args);
Py_DECREF(args);
if ( rv == NULL ) {
PySys_WriteStderr("Drag: Exception in ReceiveHandler\\n");
PyErr_Print();
return -1;
}
i = -1;
if ( rv == Py_None )
i = 0;
else
PyArg_Parse(rv, "l", &i);
Py_DECREF(rv);
return i;
}
static pascal OSErr
dragglue_SendData(FlavorType theType, void *dragSendRefCon,
ItemReference theItem, DragReference theDrag)
{
DragObjObject *self = (DragObjObject *)dragSendRefCon;
PyObject *args, *rv;
int i;
if ( self->sendproc == NULL )
return -1;
args = Py_BuildValue("O&l", PyMac_BuildOSType, theType, theItem);
if ( args == NULL )
return -1;
rv = PyEval_CallObject(self->sendproc, args);
Py_DECREF(args);
if ( rv == NULL ) {
PySys_WriteStderr("Drag: Exception in SendDataHandler\\n");
PyErr_Print();
return -1;
}
i = -1;
if ( rv == Py_None )
i = 0;
else
PyArg_Parse(rv, "l", &i);
Py_DECREF(rv);
return i;
}
#if 0
static pascal OSErr
dragglue_Input(Point *mouse, short *modifiers,
void *dragSendRefCon, DragReference theDrag)
{
return 0;
}
static pascal OSErr
dragglue_Drawing(xxxx
void *dragSendRefCon, DragReference theDrag)
{
return 0;
}
#endif
"""
initstuff = initstuff + """
PyMac_INIT_TOOLBOX_OBJECT_NEW(DragRef, DragObj_New);
PyMac_INIT_TOOLBOX_OBJECT_CONVERT(DragRef, DragObj_Convert);
"""
variablestuff = """
dragglue_TrackingHandlerUPP = NewDragTrackingHandlerUPP(dragglue_TrackingHandler);
dragglue_ReceiveHandlerUPP = NewDragReceiveHandlerUPP(dragglue_ReceiveHandler);
dragglue_SendDataUPP = NewDragSendDataUPP(dragglue_SendData);
#if 0
dragglue_InputUPP = NewDragInputUPP(dragglue_Input);
dragglue_DrawingUPP = NewDragDrawingUPP(dragglue_Drawing);
#endif
"""
class MyObjectDefinition(PEP253Mixin, GlobalObjectDefinition):
def outputCheckNewArg(self):
Output("""if (itself == NULL) {
PyErr_SetString(Drag_Error,"Cannot create null Drag");
return NULL;
}""")
def outputFreeIt(self, itselfname):
## Output("DisposeDrag(%s);", itselfname)
Output("Py_XDECREF(self->sendproc);")
## Output("Py_XDECREF(self->inputproc);")
## Output("Py_XDECREF(self->drawingproc);")
def outputStructMembers(self):
GlobalObjectDefinition.outputStructMembers(self)
Output("PyObject *sendproc;")
## Output("PyObject *inputproc;")
## Output("PyObject *drawingproc;")
def outputInitStructMembers(self):
GlobalObjectDefinition.outputInitStructMembers(self)
Output("it->sendproc = NULL;")
## Output("it->inputproc = NULL;")
## Output("it->drawingproc = NULL;")
# Create the generator groups and link them
module = MacModule(MODNAME, MODPREFIX, includestuff, finalstuff, initstuff, variablestuff)
object = MyObjectDefinition(OBJECTNAME, OBJECTPREFIX, OBJECTTYPE)
module.addobject(object)
# Create the generator classes used to populate the lists
Function = OSErrWeakLinkFunctionGenerator
Method = OSErrWeakLinkMethodGenerator
# Create and populate the lists
functions = []
methods = []
execfile(INPUTFILE)
# add the populated lists to the generator groups
for f in functions: module.add(f)
for f in methods: object.add(f)
# Manual generators for the callbacks
installtracking_body = """
PyObject *callback;
WindowPtr theWindow = NULL;
OSErr _err;
if ( !PyArg_ParseTuple(_args, "O|O&", &callback, WinObj_Convert, &theWindow) )
return NULL;
Py_INCREF(callback); /* Cannot decref later, too bad */
_err = InstallTrackingHandler(dragglue_TrackingHandlerUPP, theWindow, (void *)callback);
if (_err != noErr) return PyMac_Error(_err);
Py_INCREF(Py_None);
_res = Py_None;
return _res;
"""
installtracking = ManualGenerator("InstallTrackingHandler", installtracking_body)
module.add(installtracking)
installreceive_body = """
PyObject *callback;
WindowPtr theWindow = NULL;
OSErr _err;
if ( !PyArg_ParseTuple(_args, "O|O&", &callback, WinObj_Convert, &theWindow) )
return NULL;
Py_INCREF(callback); /* Cannot decref later, too bad */
_err = InstallReceiveHandler(dragglue_ReceiveHandlerUPP, theWindow, (void *)callback);
if (_err != noErr) return PyMac_Error(_err);
Py_INCREF(Py_None);
_res = Py_None;
return _res;
"""
installreceive = ManualGenerator("InstallReceiveHandler", installreceive_body)
module.add(installreceive)
removetracking_body = """
WindowPtr theWindow = NULL;
OSErr _err;
if ( !PyArg_ParseTuple(_args, "|O&", WinObj_Convert, &theWindow) )
return NULL;
_err = RemoveTrackingHandler(dragglue_TrackingHandlerUPP, theWindow);
if (_err != noErr) return PyMac_Error(_err);
Py_INCREF(Py_None);
_res = Py_None;
return _res;
"""
removetracking = ManualGenerator("RemoveTrackingHandler", removetracking_body)
module.add(removetracking)
removereceive_body = """
WindowPtr theWindow = NULL;
OSErr _err;
if ( !PyArg_ParseTuple(_args, "|O&", WinObj_Convert, &theWindow) )
return NULL;
_err = RemoveReceiveHandler(dragglue_ReceiveHandlerUPP, theWindow);
if (_err != noErr) return PyMac_Error(_err);
Py_INCREF(Py_None);
_res = Py_None;
return _res;
"""
removereceive = ManualGenerator("RemoveReceiveHandler", removereceive_body)
module.add(removereceive)
# generate output (open the output file as late as possible)
SetOutputFileName(OUTPUTFILE)
module.generate()
| gpl-2.0 |
zhoffice/minos | supervisor/supervisor/medusa/test/test_lb.py | 5 | 4790 | # -*- Mode: Python -*-
# Get a lower bound for Medusa performance with a simple async
# client/server benchmark built on the async lib. The idea is to test
# all the underlying machinery [select, asyncore, asynchat, etc...] in
# a context where there is virtually no processing of the data.
import socket
import select
import sys
# ==================================================
# server
# ==================================================
from supervisor.medusa import asyncore_25 as asyncore
from supervisor.medusa import asynchat_25 as asynchat
class test_channel (asynchat.async_chat):
ac_in_buffer_size = 16384
ac_out_buffer_size = 16384
total_in = 0
def __init__ (self, conn, addr):
asynchat.async_chat.__init__ (self, conn)
self.set_terminator ('\r\n\r\n')
self.buffer = ''
def collect_incoming_data (self, data):
self.buffer = self.buffer + data
test_channel.total_in = test_channel.total_in + len(data)
def found_terminator (self):
# we've gotten the data, now send it back
data = self.buffer
self.buffer = ''
self.push (data+'\r\n\r\n')
def handle_close (self):
sys.stdout.write ('.'); sys.stdout.flush()
self.close()
def log (self, *args):
pass
class test_server (asyncore.dispatcher):
def __init__ (self, addr):
if type(addr) == type(''):
f = socket.AF_UNIX
else:
f = socket.AF_INET
self.create_socket (f, socket.SOCK_STREAM)
self.bind (addr)
self.listen (5)
print 'server started on',addr
def handle_accept (self):
conn, addr = self.accept()
test_channel (conn, addr)
# ==================================================
# client
# ==================================================
# pretty much the same behavior, except that we kick
# off the exchange and decide when to quit
class test_client (test_channel):
def __init__ (self, addr, packet, number):
if type(addr) == type(''):
f = socket.AF_UNIX
else:
f = socket.AF_INET
asynchat.async_chat.__init__ (self)
self.create_socket (f, socket.SOCK_STREAM)
self.set_terminator ('\r\n\r\n')
self.buffer = ''
self.connect (addr)
self.push (packet + '\r\n\r\n')
self.number = number
self.count = 0
def handle_connect (self):
pass
def found_terminator (self):
self.count = self.count + 1
if self.count == self.number:
sys.stdout.write('.'); sys.stdout.flush()
self.close()
else:
test_channel.found_terminator (self)
import time
class timer:
def __init__ (self):
self.start = time.time()
def end (self):
return time.time() - self.start
if __name__ == '__main__':
import string
if '--poll' in sys.argv:
sys.argv.remove ('--poll')
use_poll=1
else:
use_poll=0
if len(sys.argv) == 1:
print 'usage: %s\n' \
' (as a server) [--poll] -s <ip> <port>\n' \
' (as a client) [--poll] -c <ip> <port> <packet-size> <num-packets> <num-connections>\n' % sys.argv[0]
sys.exit(0)
if sys.argv[1] == '-s':
s = test_server ((sys.argv[2], string.atoi (sys.argv[3])))
asyncore.loop(use_poll=use_poll)
elif sys.argv[1] == '-c':
# create the packet
packet = string.atoi(sys.argv[4]) * 'B'
host = sys.argv[2]
port = string.atoi (sys.argv[3])
num_packets = string.atoi (sys.argv[5])
num_conns = string.atoi (sys.argv[6])
t = timer()
for i in range (num_conns):
test_client ((host,port), packet, num_packets)
asyncore.loop(use_poll=use_poll)
total_time = t.end()
# ok, now do some numbers
bytes = test_client.total_in
num_trans = num_packets * num_conns
total_bytes = num_trans * len(packet)
throughput = float (total_bytes) / total_time
trans_per_sec = num_trans / total_time
sys.stderr.write ('total time: %.2f\n' % total_time)
sys.stderr.write ( 'number of transactions: %d\n' % num_trans)
sys.stderr.write ( 'total bytes sent: %d\n' % total_bytes)
sys.stderr.write ( 'total throughput (bytes/sec): %.2f\n' % throughput)
sys.stderr.write ( ' [note, throughput is this amount in each direction]\n')
sys.stderr.write ( 'transactions/second: %.2f\n' % trans_per_sec)
sys.stdout.write (
string.join (
map (str, (num_conns, num_packets, len(packet), throughput, trans_per_sec)),
','
) + '\n'
)
| apache-2.0 |
peap/django-debug-toolbar | tests/test_integration.py | 2 | 7240 | # coding: utf-8
from __future__ import absolute_import, unicode_literals
import os
import unittest
from xml.etree import ElementTree as ET
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.test import RequestFactory, TestCase
from django.test.utils import override_settings
from debug_toolbar.middleware import DebugToolbarMiddleware, show_toolbar
from .base import BaseTestCase
from .views import regular_view
try:
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.wait import WebDriverWait
except ImportError:
webdriver = None
rf = RequestFactory()
@override_settings(DEBUG=True)
class DebugToolbarTestCase(BaseTestCase):
def test_show_toolbar(self):
self.assertTrue(show_toolbar(self.request))
def test_show_toolbar_DEBUG(self):
with self.settings(DEBUG=False):
self.assertFalse(show_toolbar(self.request))
def test_show_toolbar_INTERNAL_IPS(self):
with self.settings(INTERNAL_IPS=[]):
self.assertFalse(show_toolbar(self.request))
def _resolve_stats(self, path):
# takes stats from Request panel
self.request.path = path
panel = self.toolbar.get_panel_by_id('RequestPanel')
panel.process_request(self.request)
panel.process_response(self.request, self.response)
panel.generate_stats(self.request, self.response)
return panel.get_stats()
def test_url_resolving_positional(self):
stats = self._resolve_stats('/resolving1/a/b/')
self.assertEqual(stats['view_urlname'], 'positional-resolving')
self.assertEqual(stats['view_func'], 'tests.views.resolving_view')
self.assertEqual(stats['view_args'], ('a', 'b'))
self.assertEqual(stats['view_kwargs'], {})
def test_url_resolving_named(self):
stats = self._resolve_stats('/resolving2/a/b/')
self.assertEqual(stats['view_args'], ())
self.assertEqual(stats['view_kwargs'], {'arg1': 'a', 'arg2': 'b'})
def test_url_resolving_mixed(self):
stats = self._resolve_stats('/resolving3/a/')
self.assertEqual(stats['view_args'], ('a',))
self.assertEqual(stats['view_kwargs'], {'arg2': 'default'})
def test_url_resolving_bad(self):
stats = self._resolve_stats('/non-existing-url/')
self.assertEqual(stats['view_urlname'], 'None')
self.assertEqual(stats['view_args'], 'None')
self.assertEqual(stats['view_kwargs'], 'None')
self.assertEqual(stats['view_func'], '<no view>')
# Django doesn't guarantee that process_request, process_view and
# process_response always get called in this order.
def test_middleware_view_only(self):
DebugToolbarMiddleware().process_view(self.request, regular_view, ('title',), {})
def test_middleware_response_only(self):
DebugToolbarMiddleware().process_response(self.request, self.response)
def test_middleware_response_insertion(self):
resp = regular_view(self.request, "İ")
DebugToolbarMiddleware().process_response(self.request, resp)
# check toolbar insertion before "</body>"
self.assertContains(resp, '</div>\n</body>')
def test_cache_page(self):
self.client.get('/cached_view/')
self.assertEqual(
len(self.toolbar.get_panel_by_id('CachePanel').calls), 3)
self.client.get('/cached_view/')
self.assertEqual(
len(self.toolbar.get_panel_by_id('CachePanel').calls), 5)
@override_settings(DEBUG=True)
class DebugToolbarIntegrationTestCase(TestCase):
def test_middleware(self):
response = self.client.get('/execute_sql/')
self.assertEqual(response.status_code, 200)
@override_settings(DEFAULT_CHARSET='iso-8859-1')
def test_non_utf8_charset(self):
response = self.client.get('/regular/ASCII/')
self.assertContains(response, 'ASCII') # template
self.assertContains(response, 'djDebug') # toolbar
response = self.client.get('/regular/LÀTÍN/')
self.assertContains(response, 'LÀTÍN') # template
self.assertContains(response, 'djDebug') # toolbar
def test_xml_validation(self):
response = self.client.get('/regular/XML/')
ET.fromstring(response.content) # shouldn't raise ParseError
@unittest.skipIf(webdriver is None, "selenium isn't installed")
@unittest.skipUnless('DJANGO_SELENIUM_TESTS' in os.environ, "selenium tests not requested")
@override_settings(DEBUG=True)
class DebugToolbarLiveTestCase(StaticLiveServerTestCase):
@classmethod
def setUpClass(cls):
super(DebugToolbarLiveTestCase, cls).setUpClass()
cls.selenium = webdriver.Firefox()
@classmethod
def tearDownClass(cls):
cls.selenium.quit()
super(DebugToolbarLiveTestCase, cls).tearDownClass()
def test_basic(self):
self.selenium.get(self.live_server_url + '/regular/basic/')
version_panel = self.selenium.find_element_by_id('VersionsPanel')
# Versions panel isn't loaded
with self.assertRaises(NoSuchElementException):
version_panel.find_element_by_tag_name('table')
# Click to show the versions panel
self.selenium.find_element_by_class_name('VersionsPanel').click()
# Version panel loads
table = WebDriverWait(self.selenium, timeout=10).until(
lambda selenium: version_panel.find_element_by_tag_name('table'))
self.assertIn("Name", table.text)
self.assertIn("Version", table.text)
@override_settings(DEBUG_TOOLBAR_CONFIG={'RESULTS_CACHE_SIZE': 0})
def test_expired_store(self):
self.selenium.get(self.live_server_url + '/regular/basic/')
version_panel = self.selenium.find_element_by_id('VersionsPanel')
# Click to show the version panel
self.selenium.find_element_by_class_name('VersionsPanel').click()
# Version panel doesn't loads
error = WebDriverWait(self.selenium, timeout=10).until(
lambda selenium: version_panel.find_element_by_tag_name('p'))
self.assertIn("Data for this panel isn't available anymore.", error.text)
@override_settings(TEMPLATE_LOADERS=[(
'django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
),
)])
def test_django_cached_template_loader(self):
self.selenium.get(self.live_server_url + '/regular/basic/')
version_panel = self.selenium.find_element_by_id('TemplatesPanel')
# Click to show the versions panel
self.selenium.find_element_by_class_name('TemplatesPanel').click()
# Version panel loads
trigger = WebDriverWait(self.selenium, timeout=10).until(
lambda selenium: version_panel.find_element_by_css_selector(
'.remoteCall'))
trigger.click()
# Verify the code is displayed
WebDriverWait(self.selenium, timeout=10).until(
lambda selenium: self.selenium.find_element_by_css_selector(
'#djDebugWindow code'))
| bsd-3-clause |
ldong/vim_youcompleteme | python/ycm/completers/python/jedi_completer.py | 1 | 5233 | #!/usr/bin/env python
#
# Copyright (C) 2011, 2012 Stephen Sugden <me@stephensugden.com>
# Google Inc.
# Stanislav Golovanov <stgolovanov@gmail.com>
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from ycm.completers.completer import Completer
from ycm.server import responses
try:
import jedi
except ImportError:
raise ImportError(
'Error importing jedi. Make sure the jedi submodule has been checked out. '
'In the YouCompleteMe folder, run "git submodule update --init --recursive"')
class JediCompleter( Completer ):
"""
A Completer that uses the Jedi completion engine.
https://jedi.readthedocs.org/en/latest/
"""
def __init__( self, user_options ):
super( JediCompleter, self ).__init__( user_options )
def SupportedFiletypes( self ):
""" Just python """
return [ 'python' ]
def _GetJediScript( self, request_data ):
filename = request_data[ 'filepath' ]
contents = request_data[ 'file_data' ][ filename ][ 'contents' ]
# Jedi expects lines to start at 1, not 0
line = request_data[ 'line_num' ] + 1
column = request_data[ 'column_num' ]
return jedi.Script( contents, line, column, filename )
def ComputeCandidatesInner( self, request_data ):
script = self._GetJediScript( request_data )
return [ responses.BuildCompletionData(
str( completion.name ),
str( completion.description ),
str( completion.doc ) )
for completion in script.completions() ]
def DefinedSubcommands( self ):
return [ 'GoToDefinition',
'GoToDeclaration',
'GoTo' ]
def OnUserCommand( self, arguments, request_data ):
if not arguments:
raise ValueError( self.UserCommandsHelpMessage() )
command = arguments[ 0 ]
if command == 'GoToDefinition':
return self._GoToDefinition( request_data )
elif command == 'GoToDeclaration':
return self._GoToDeclaration( request_data )
elif command == 'GoTo':
return self._GoTo( request_data )
raise ValueError( self.UserCommandsHelpMessage() )
def _GoToDefinition( self, request_data ):
definitions = self._GetDefinitionsList( request_data )
if definitions:
return self._BuildGoToResponse( definitions )
else:
raise RuntimeError( 'Can\'t jump to definition.' )
def _GoToDeclaration( self, request_data ):
definitions = self._GetDefinitionsList( request_data, declaration = True )
if definitions:
return self._BuildGoToResponse( definitions )
else:
raise RuntimeError( 'Can\'t jump to declaration.' )
def _GoTo( self, request_data ):
definitions = ( self._GetDefinitionsList( request_data ) or
self._GetDefinitionsList( request_data, declaration = True ) )
if definitions:
return self._BuildGoToResponse( definitions )
else:
raise RuntimeError( 'Can\'t jump to definition or declaration.' )
def _GetDefinitionsList( self, request_data, declaration = False ):
definitions = []
script = self._GetJediScript( request_data )
try:
if declaration:
definitions = script.goto_definitions()
else:
definitions = script.goto_assignments()
except jedi.NotFoundError:
raise RuntimeError(
'Cannot follow nothing. Put your cursor on a valid name.' )
return definitions
def _BuildGoToResponse( self, definition_list ):
if len( definition_list ) == 1:
definition = definition_list[ 0 ]
if definition.in_builtin_module():
if definition.is_keyword:
raise RuntimeError(
'Cannot get the definition of Python keywords.' )
else:
raise RuntimeError( 'Builtin modules cannot be displayed.' )
else:
return responses.BuildGoToResponse( definition.module_path,
definition.line - 1,
definition.column )
else:
# multiple definitions
defs = []
for definition in definition_list:
if definition.in_builtin_module():
defs.append( responses.BuildDescriptionOnlyGoToResponse(
'Builtin ' + definition.description ) )
else:
defs.append(
responses.BuildGoToResponse( definition.module_path,
definition.line - 1,
definition.column,
definition.description ) )
return defs
| gpl-3.0 |
alexey4petrov/pythonFlu | Foam/functionObjects/__init__.py | 1 | 5259 | ## pythonFlu - Python wrapping for OpenFOAM C++ API
## Copyright (C) 2010- Alexey Petrov
## Copyright (C) 2009-2010 Pebble Bed Modular Reactor (Pty) Limited (PBMR)
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
## See http://sourceforge.net/projects/pythonflu
##
## Author : Alexey PETROV
##
#----------------------------------------------------------------------------------------
def getfunctionObjectConstructorToTableBase() :
aClass = None
from Foam.OpenFOAM import TConstructorToTableCounter_functionObject
aCounter = TConstructorToTableCounter_functionObject.counter()
aClassName = "functionObjectConstructorToTableBase_%d" % aCounter
anExpression = "from Foam.OpenFOAM import %s; aClass = %s" % ( aClassName, aClassName )
exec anExpression
return aClass
#----------------------------------------------------------------------------------------
from Foam.OpenFOAM import functionObject
class functionObject_pythonFlu( functionObject ):
@staticmethod
def type():
from Foam.OpenFOAM import word
return word( 'pythonFlu' )
def __init__( self, the_name, the_time, the_dict ) :
self._time = the_time
self._dict = the_dict
self._startCode = ''
self._executeCode = ''
self._endCode = ''
self._executionFrame = {}
functionObject.__init__( self, the_name )
self.read( the_dict )
pass
def start( self ) :
self._executionFrame.update( { 'runTime' : self._time, 'self' : self } )
# print "start = \"%s\" in %s" % ( self._startCode, self._executionFrame )
exec self._startCode in self._executionFrame
return True
def execute( self, *args ):
from Foam import FOAM_VERSION
if FOAM_VERSION( ">=", "020000" ):
self._executionFrame[ "forceWrite" ] = args[ 0 ]
# print "forceWrite = ", self._executionFrame[ "forceWrite" ]
pass
# print "execute = \"%s\"" % ( self._executeCode )
exec self._executeCode in self._executionFrame
if FOAM_VERSION( ">=", "020000" ):
del self._executionFrame[ "forceWrite" ]
pass
return True
def end( self ):
# print "end = \"%s\"" % ( self._endCode )
exec self._endCode in self._executionFrame
return True
def read( self, the_dict ):
self._startCode = self._readCode( the_dict, 'start' )
self._executeCode = self._readCode( the_dict, 'execute' )
self._endCode = self._readCode( the_dict, 'end' )
return True
def _readCode( self, the_dict, the_prefix ) :
from Foam.OpenFOAM import word
a_code_prefix = word( the_prefix + 'Code' )
an_is_string = the_dict.found( a_code_prefix )
from Foam.OpenFOAM import word
a_file_prefix = word( the_prefix + 'File' )
an_is_file = the_dict.found( a_file_prefix )
if an_is_string and an_is_file :
raise AssertionError()
a_string = ''
if an_is_string :
from Foam.src.OpenFOAM.primitives.strings.string import string
a_string = str( string( the_dict.lookup( a_code_prefix ) ) )
elif an_is_file :
from Foam.src.OpenFOAM.primitives.strings.string import string
a_filename = str( string( the_dict.lookup( a_file_prefix ) ) )
import os.path
if not os.path.isfile( a_filename ) :
raise AssertionError()
a_file = open( a_filename )
a_string = a_file.readlines().join( '\n' )
pass
return a_string
pass
#----------------------------------------------------------------------------------------
class functionObjectConstructorToTable_pythonFlu( getfunctionObjectConstructorToTableBase() ):
def __init__( self ):
aBaseClass = self.__class__.__bases__[ 0 ]
aBaseClass.__init__( self )
aBaseClass.init( self, self, functionObject_pythonFlu.type() )
pass
def _new_( self, the_name, the_time, the_dict ):
obj = functionObject_pythonFlu( the_name, the_time, the_dict )
from Foam.OpenFOAM import autoPtr_functionObject
return autoPtr_functionObject( obj )
pass
#----------------------------------------------------------------------------------------------------------
pythonFlu_functionObjectConstructorToTable = functionObjectConstructorToTable_pythonFlu()
#----------------------------------------------------------------------------------------------------------
| gpl-3.0 |
estately/sensu-community-plugins | plugins/openstack/nova/nova-server-state-metrics.py | 47 | 1646 | #!/usr/bin/env python
# #RED
from argparse import ArgumentParser
import socket
import time
from novaclient.v3 import Client
DEFAULT_SCHEME = '{}.nova.states'.format(socket.gethostname())
def output_metric(name, value):
print '{}\t{}\t{}'.format(name, value, int(time.time()))
def main():
parser = ArgumentParser()
parser.add_argument('-u', '--user', default='admin')
parser.add_argument('-p', '--password', default='admin')
parser.add_argument('-t', '--tenant', default='admin')
parser.add_argument('-a', '--auth-url', default='http://localhost:5000/v2.0')
parser.add_argument('-S', '--service-type', default='compute')
parser.add_argument('-s', '--scheme', default=DEFAULT_SCHEME)
args = parser.parse_args()
client = Client(args.user, args.password, args.tenant, args.auth_url, service_type=args.service_type)
servers = client.servers.list()
# http://docs.openstack.org/api/openstack-compute/2/content/List_Servers-d1e2078.html
states = {
'ACTIVE': 0,
'BUILD': 0,
'DELETED': 0,
'ERROR': 0,
'HARD_REBOOT': 0,
'PASSWORD': 0,
'REBOOT': 0,
'REBUILD': 0,
'RESCUE': 0,
'RESIZE': 0,
'REVERT_RESIZE': 0,
'SHUTOFF': 0,
'SUSPENDED': 0,
'UNKNOWN': 0,
'VERIFY_RESIZE': 0,
}
for server in servers:
if server.status not in states:
states[server.status] = 0
states[server.status] += 1
for state, count in states.iteritems():
output_metric('{}.{}'.format(args.scheme, state.lower()), count)
if __name__ == '__main__':
main()
| mit |
frankvdp/django | tests/admin_changelist/models.py | 76 | 2971 | import uuid
from django.db import models
class Event(models.Model):
# Oracle can have problems with a column named "date"
date = models.DateField(db_column="event_date")
class Parent(models.Model):
name = models.CharField(max_length=128)
class Child(models.Model):
parent = models.ForeignKey(Parent, models.SET_NULL, editable=False, null=True)
name = models.CharField(max_length=30, blank=True)
age = models.IntegerField(null=True, blank=True)
class Genre(models.Model):
name = models.CharField(max_length=20)
class Band(models.Model):
name = models.CharField(max_length=20)
nr_of_members = models.PositiveIntegerField()
genres = models.ManyToManyField(Genre)
class Musician(models.Model):
name = models.CharField(max_length=30)
age = models.IntegerField(null=True, blank=True)
def __str__(self):
return self.name
class Group(models.Model):
name = models.CharField(max_length=30)
members = models.ManyToManyField(Musician, through='Membership')
def __str__(self):
return self.name
class Concert(models.Model):
name = models.CharField(max_length=30)
group = models.ForeignKey(Group, models.CASCADE)
class Membership(models.Model):
music = models.ForeignKey(Musician, models.CASCADE)
group = models.ForeignKey(Group, models.CASCADE)
role = models.CharField(max_length=15)
class Quartet(Group):
pass
class ChordsMusician(Musician):
pass
class ChordsBand(models.Model):
name = models.CharField(max_length=30)
members = models.ManyToManyField(ChordsMusician, through='Invitation')
class Invitation(models.Model):
player = models.ForeignKey(ChordsMusician, models.CASCADE)
band = models.ForeignKey(ChordsBand, models.CASCADE)
instrument = models.CharField(max_length=15)
class Swallow(models.Model):
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4)
origin = models.CharField(max_length=255)
load = models.FloatField()
speed = models.FloatField()
class Meta:
ordering = ('speed', 'load')
class SwallowOneToOne(models.Model):
swallow = models.OneToOneField(Swallow, models.CASCADE)
class UnorderedObject(models.Model):
"""
Model without any defined `Meta.ordering`.
Refs #17198.
"""
bool = models.BooleanField(default=True)
class OrderedObjectManager(models.Manager):
def get_queryset(self):
return super().get_queryset().order_by('number')
class OrderedObject(models.Model):
"""
Model with Manager that defines a default order.
Refs #17198.
"""
name = models.CharField(max_length=255)
bool = models.BooleanField(default=True)
number = models.IntegerField(default=0, db_column='number_val')
objects = OrderedObjectManager()
class CustomIdUser(models.Model):
uuid = models.AutoField(primary_key=True)
class CharPK(models.Model):
char_pk = models.CharField(max_length=100, primary_key=True)
| bsd-3-clause |
uahic/nest-simulator | pynest/nest/tests/test_parrot_neuron.py | 18 | 9989 | # -*- coding: utf-8 -*-
#
# test_parrot_neuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# This script tests the parrot_neuron in NEST.
# See test_parrot_neuron_ps.py for an equivalent test of the precise parrot.
import nest
import unittest
import math
@nest.check_stack
class ParrotNeuronTestCase(unittest.TestCase):
"""Check parrot_neuron spike repetition properties"""
def setUp(self):
nest.set_verbosity('M_WARNING')
nest.ResetKernel()
# set up source spike generator, as well as parrot neurons
self.spike_time = 1.
self.delay = .2
self.source = nest.Create("spike_generator", 1,
{"spike_times": [self.spike_time]})
self.parrot = nest.Create('parrot_neuron')
self.spikes = nest.Create("spike_detector")
# record source and parrot spikes
nest.Connect(self.source, self.spikes)
nest.Connect(self.parrot, self.spikes)
def test_ParrotNeuronRepeatSpike(self):
"""Check parrot_neuron repeats spikes on port 0"""
# connect with arbitrary delay
nest.Connect(self.source, self.parrot, syn_spec={"delay": self.delay})
nest.Simulate(self.spike_time + 2 * self.delay)
# get spike from parrot neuron
events = nest.GetStatus(self.spikes)[0]["events"]
post_time = events['times'][events['senders'] == self.parrot[0]]
# assert spike was repeated at correct time
assert post_time, "Parrot neuron failed to repeat spike."
assert self.spike_time + self.delay == post_time, \
"Parrot neuron repeated spike at wrong delay"
def test_ParrotNeuronIgnoreSpike(self):
"""Check parrot_neuron ignores spikes on port 1"""
# connect with arbitrary delay to port 1
nest.Connect(self.source, self.parrot,
syn_spec={"receptor_type": 1, "delay": self.delay})
nest.Simulate(self.spike_time + 2. * self.delay)
# get spike from parrot neuron, assert it was ignored
events = nest.GetStatus(self.spikes)[0]["events"]
post_time = events['times'][events['senders'] == self.parrot[0]]
assert len(post_time) == 0, \
"Parrot neuron failed to ignore spike arriving on port 1"
def test_ParrotNeuronOutgoingMultiplicity(self):
"""
Check parrot_neuron correctly repeats multiple spikes
The parrot_neuron receives two spikes in a single time step.
We check that both spikes are forwarded to the spike_detector.
"""
# connect twice
nest.Connect(self.source, self.parrot, syn_spec={"delay": self.delay})
nest.Connect(self.source, self.parrot, syn_spec={"delay": self.delay})
nest.Simulate(self.spike_time + 2. * self.delay)
# get spikes from parrot neuron, assert two were transmitted
events = nest.GetStatus(self.spikes)[0]["events"]
post_times = events['times'][events['senders'] == self.parrot[0]]
assert len(post_times) == 2 and post_times[0] == post_times[1], \
"Parrot neuron failed to correctly repeat multiple spikes."
@nest.check_stack
class ParrotNeuronPoissonTestCase(unittest.TestCase):
"""Check parrot_neuron spike repetition properties"""
def test_ParrotNeuronIncomingMultiplicity(self):
"""
Check parrot_neuron heeds multiplicity information in incoming spikes.
This test relies on the fact that poisson_generator transmits
multiple spikes during a time step using multiplicity, and that
these spikes are delivered directly, i.e., without multiplicity-
unrolling in send_remote().
We create a high-rate poisson_generator. If parrot_neuron
ignored multiplicity, it would only transmit one spike per time
step. We chain two parrot_neurons to check against any loss.
"""
# set up source spike generator, as well as parrot neurons
h = 0.1 # ms
rate = 1000000. # spikes / s
delay = 1. # ms
t_base = 1000. # ms
t_sim = t_base + 3 * delay # after t_sim, spikes from t_base arrived
spikes_expected = rate * t_base / 1000.
spikes_std = math.sqrt(spikes_expected)
# if the test is to be meaningful we must expect signficantly more
# spikes than time steps
assert spikes_expected - 3 * spikes_std > 10. * t_sim / h, \
"Internal inconsistency: too few spikes."
nest.set_verbosity('M_WARNING')
nest.ResetKernel()
nest.SetKernelStatus({'resolution': h,
'grng_seed': 123,
'rng_seeds': [456]})
source = nest.Create('poisson_generator', params={'rate': rate})
parrots = nest.Create('parrot_neuron', 2)
detect = nest.Create('spike_detector')
nest.Connect(source, parrots[:1], syn_spec={'delay': delay})
nest.Connect(parrots[:1], parrots[1:], syn_spec={'delay': delay})
nest.Connect(parrots[1:], detect)
nest.Simulate(t_sim)
n_spikes = nest.GetStatus(detect)[0]['n_events']
assert n_spikes > spikes_expected - 3 * spikes_std, \
"parrot_neuron loses spikes."
assert n_spikes < spikes_expected + 3 * spikes_std, \
"parrot_neuron adds spikes."
@nest.check_stack
class ParrotNeuronSTDPTestCase(unittest.TestCase):
"""
Check STDP protocol between two parrot_neurons connected by a stdp_synapse.
Exact pre- and post-synaptic spike times are set by spike_generators
connected to each parrot neuron. Additional spikes sent through the
stdp_synapse are explicitly ignored in the postsynaptic parrot_neuron
by setting the stdp_synapse to connect to port 1.
"""
def run_protocol(self, dt):
"""Set up a network with pre-post spike pairings
with t_post - t_pre = dt"""
nest.set_verbosity("M_WARNING")
nest.ResetKernel()
# set pre and postsynaptic spike times
delay = 1. # delay for connections
dspike = 100. # ISI
# set the correct real spike times for generators (correcting for
# delays)
pre_times = [100., 100. + dspike]
post_times = [k + dt for k in pre_times]
# create spike_generators with these times
pre_spikes = nest.Create("spike_generator", 1, {
"spike_times": pre_times})
post_spikes = nest.Create("spike_generator", 1, {
"spike_times": post_times})
# create parrot neurons and connect spike_generators
pre_parrot = nest.Create("parrot_neuron", 1)
post_parrot = nest.Create("parrot_neuron", 1)
nest.Connect(pre_spikes, pre_parrot, syn_spec={"delay": delay})
nest.Connect(post_spikes, post_parrot, syn_spec={"delay": delay})
# create spike detector
spikes = nest.Create("spike_detector")
nest.Connect(pre_parrot, spikes)
nest.Connect(post_parrot, spikes)
# connect both parrot neurons with a stdp synapse onto port 1
# thereby spikes transmitted through the stdp connection are
# not repeated postsynaptically.
syn_spec = {
"model": "stdp_synapse",
# set receptor 1 postsynaptically, to not generate extra spikes
"receptor_type": 1,
}
conn_spec = {
"rule": "one_to_one",
}
nest.Connect(pre_parrot, post_parrot,
syn_spec=syn_spec, conn_spec=conn_spec)
# get STDP synapse and weight before protocol
syn = nest.GetConnections(
source=pre_parrot, synapse_model="stdp_synapse")
syn_status = nest.GetStatus(syn)[0]
w_pre = syn_status['weight']
last_time = max(pre_times[-1], post_times[-1])
nest.Simulate(last_time + 2 * delay)
# get weight post protocol
syn_status = nest.GetStatus(syn)[0]
w_post = syn_status['weight']
return w_pre, w_post
def test_ParrotNeuronSTDPProtocolPotentiation(self):
"""Check pre-post spike pairings between parrot_neurons
increments weights."""
dt = 10.
w_pre, w_post = self.run_protocol(dt)
assert w_pre < w_post, "Parrot neuron STDP potentiation \
protocol failed to elicit positive weight changes."
def test_ParrotNeuronSTDPProtocolDepression(self):
"""Check post-pre spike pairings between parrot_neurons
decrement weights."""
dt = -10.
w_pre, w_post = self.run_protocol(dt)
assert w_pre > w_post, "Parrot neuron STDP potentiation \
protocol failed to elicit negative weight changes."
def suite():
# makeSuite is sort of obsolete http://bugs.python.org/issue2721
# using loadTestsFromTestCase instead.
suite1 = unittest.TestLoader().loadTestsFromTestCase(
ParrotNeuronTestCase)
suite2 = unittest.TestLoader().loadTestsFromTestCase(
ParrotNeuronPoissonTestCase)
suite3 = unittest.TestLoader().loadTestsFromTestCase(
ParrotNeuronSTDPTestCase)
return unittest.TestSuite([suite1, suite2, suite3])
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
| gpl-2.0 |
simotek/tanko-bot | src/test-serial.py | 1 | 3379 | # RobotMain - Simon Lees simon@simotek.net
# Copyright (C) 2015 Simon Lees
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import argparse
import threading
import time
from PyLibs.threadedserial import ThreadedSerial
from PyLibs.util import CallbackHelper
from PyLibs.constants import *
class SerialTestParsingException(Exception):
pass
class SerialTestInterfaceCallbacks:
def __init__(self):
self.annMessage = CallbackHelper()
class SerialTestInterface:
def __init__(self, callbacks):
self.__dataLock = threading.RLock()
self.__messageQueue = []
self.__callbacks = callbacks
self.__serial = ThreadedSerial("/dev/ttyS1", 4800)
self.__serial.setSerialRecieveFunction(self.onMessage)
self.__serial.create()
def getCallbacks(self):
return self.__callbacks
def setCallbacks(self, callbacks):
self.__callbacks = callbacks
# Adds message to queue for processing
def onMessage(self, message):
print("msg recieved")
self.__dataLock.acquire()
self.__messageQueue.append(message)
self.__dataLock.release()
# Processes all messages on queue and fires there callbacks
def processMessages(self):
self.__dataLock.acquire()
while self.__messageQueue:
message = self.__messageQueue.pop(0)
#Unlock mutex to avoid holding while signals are triggered
self.__dataLock.release()
self.decodeMessage(message[0])
#relock mutex for next check of the queue
self.__dataLock.acquire()
# Release mutex once finished
self.__dataLock.release()
def decodeMessage(self, message):
data = message
self.__callbacks.annMessage.invoke(data)
def sendMessage(self, message):
print ("Sending: " + message)
self.__serial.write(message)
def sendDriveMotorSpeed(self, args):
message = str(CONST_SERVER_COMMAND_MOTOR_DRIVE+":"+args[0]+","+args[1]+"\n")
print (message)
self.__serial.write(message)
def printMessage(self, args):
print ("MSG:"+args[0])
if __name__ == '__main__':
parser = argparse.ArgumentParser("Main Robot control app")
#parser.add_argument("-s", "--no-serial", type=str, required=False, help="Stub out serial")
parser.add_argument('--no-serial', dest='noserial', action='store_true')
args = parser.parse_args()
serialCallbacks = SerialTestInterfaceCallbacks()
serialCallbacks.annMessage.register(printMessage)
serialInterface = SerialTestInterface(serialCallbacks)
# Main app event loop
while True:
serialInterface.processMessages()
time.sleep(1)
serialInterface.sendMessage("Foo\n")
serialInterface.processMessages()
time.sleep(1)
serialInterface.sendMessage("Baa\n")
| lgpl-2.1 |
michaelhenry/codebehind | codebehind/serializers.py | 1 | 1072 | from django.contrib.auth.models import User, Group
from rest_framework import exceptions
from rest_framework import serializers
from . models import UserSecret
class UserSerializer(serializers.ModelSerializer):
secret = serializers.SerializerMethodField('get_secret_key')
class Meta:
model = User
fields = ('id', 'username', 'password', 'email', 'first_name', 'last_name','secret',)
write_only_fields = ('password',)
read_only_fields = ('id',)
def create(self, validated_data):
user = User.objects.create(
username=validated_data['username'],
email=validated_data['email'],
first_name=validated_data['first_name'],
last_name=validated_data['last_name']
)
user.set_password(validated_data['password'])
user.save()
return user
def get_secret_key(self, obj):
try:
user_secret = UserSecret.objects.get(user=obj)
return user_secret.key
except UserSecret.DoesNotExist:
print "does not exist"
return ""
class GroupSerializer(serializers.ModelSerializer):
class Meta:
model = Group
fields = ('__all__')
| mit |
JeongJunSik/TizenRT | external/iotivity/iotivity_1.2-rel/extlibs/gtest/gtest-1.7.0/test/gtest_shuffle_test.py | 3023 | 12549 | #!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that test shuffling works."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Command to run the gtest_shuffle_test_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_')
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
TEST_FILTER = 'A*.A:A*.B:C*'
ALL_TESTS = []
ACTIVE_TESTS = []
FILTERED_TESTS = []
SHARDED_TESTS = []
SHUFFLED_ALL_TESTS = []
SHUFFLED_ACTIVE_TESTS = []
SHUFFLED_FILTERED_TESTS = []
SHUFFLED_SHARDED_TESTS = []
def AlsoRunDisabledTestsFlag():
return '--gtest_also_run_disabled_tests'
def FilterFlag(test_filter):
return '--gtest_filter=%s' % (test_filter,)
def RepeatFlag(n):
return '--gtest_repeat=%s' % (n,)
def ShuffleFlag():
return '--gtest_shuffle'
def RandomSeedFlag(n):
return '--gtest_random_seed=%s' % (n,)
def RunAndReturnOutput(extra_env, args):
"""Runs the test program and returns its output."""
environ_copy = os.environ.copy()
environ_copy.update(extra_env)
return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output
def GetTestsForAllIterations(extra_env, args):
"""Runs the test program and returns a list of test lists.
Args:
extra_env: a map from environment variables to their values
args: command line flags to pass to gtest_shuffle_test_
Returns:
A list where the i-th element is the list of tests run in the i-th
test iteration.
"""
test_iterations = []
for line in RunAndReturnOutput(extra_env, args).split('\n'):
if line.startswith('----'):
tests = []
test_iterations.append(tests)
elif line.strip():
tests.append(line.strip()) # 'TestCaseName.TestName'
return test_iterations
def GetTestCases(tests):
"""Returns a list of test cases in the given full test names.
Args:
tests: a list of full test names
Returns:
A list of test cases from 'tests', in their original order.
Consecutive duplicates are removed.
"""
test_cases = []
for test in tests:
test_case = test.split('.')[0]
if not test_case in test_cases:
test_cases.append(test_case)
return test_cases
def CalculateTestLists():
"""Calculates the list of tests run under different flags."""
if not ALL_TESTS:
ALL_TESTS.extend(
GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0])
if not ACTIVE_TESTS:
ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0])
if not FILTERED_TESTS:
FILTERED_TESTS.extend(
GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0])
if not SHARDED_TESTS:
SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[])[0])
if not SHUFFLED_ALL_TESTS:
SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations(
{}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_ACTIVE_TESTS:
SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_FILTERED_TESTS:
SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0])
if not SHUFFLED_SHARDED_TESTS:
SHUFFLED_SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(1)])[0])
class GTestShuffleUnitTest(gtest_test_utils.TestCase):
"""Tests test shuffling."""
def setUp(self):
CalculateTestLists()
def testShufflePreservesNumberOfTests(self):
self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS))
self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS))
self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS))
self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS))
def testShuffleChangesTestOrder(self):
self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS)
self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS)
self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS,
SHUFFLED_FILTERED_TESTS)
self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS,
SHUFFLED_SHARDED_TESTS)
def testShuffleChangesTestCaseOrder(self):
self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS),
GetTestCases(SHUFFLED_ALL_TESTS))
self.assert_(
GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS),
GetTestCases(SHUFFLED_ACTIVE_TESTS))
self.assert_(
GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS),
GetTestCases(SHUFFLED_FILTERED_TESTS))
self.assert_(
GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS),
GetTestCases(SHUFFLED_SHARDED_TESTS))
def testShuffleDoesNotRepeatTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test),
'%s appears more than once' % (test,))
def testShuffleDoesNotCreateNewTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,))
def testShuffleIncludesAllTests(self):
for test in ALL_TESTS:
self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,))
for test in ACTIVE_TESTS:
self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,))
for test in FILTERED_TESTS:
self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,))
for test in SHARDED_TESTS:
self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,))
def testShuffleLeavesDeathTestsAtFront(self):
non_death_test_found = False
for test in SHUFFLED_ACTIVE_TESTS:
if 'DeathTest.' in test:
self.assert_(not non_death_test_found,
'%s appears after a non-death test' % (test,))
else:
non_death_test_found = True
def _VerifyTestCasesDoNotInterleave(self, tests):
test_cases = []
for test in tests:
[test_case, _] = test.split('.')
if test_cases and test_cases[-1] != test_case:
test_cases.append(test_case)
self.assertEqual(1, test_cases.count(test_case),
'Test case %s is not grouped together in %s' %
(test_case, tests))
def testShuffleDoesNotInterleaveTestCases(self):
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS)
def testShuffleRestoresOrderAfterEachIteration(self):
# Get the test lists in all 3 iterations, using random seed 1, 2,
# and 3 respectively. Google Test picks a different seed in each
# iteration, and this test depends on the current implementation
# picking successive numbers. This dependency is not ideal, but
# makes the test much easier to write.
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
# Make sure running the tests with random seed 1 gets the same
# order as in iteration 1 above.
[tests_with_seed1] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])
self.assertEqual(tests_in_iteration1, tests_with_seed1)
# Make sure running the tests with random seed 2 gets the same
# order as in iteration 2 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 2.
[tests_with_seed2] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(2)])
self.assertEqual(tests_in_iteration2, tests_with_seed2)
# Make sure running the tests with random seed 3 gets the same
# order as in iteration 3 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 3.
[tests_with_seed3] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(3)])
self.assertEqual(tests_in_iteration3, tests_with_seed3)
def testShuffleGeneratesNewOrderInEachIteration(self):
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
self.assert_(tests_in_iteration1 != tests_in_iteration2,
tests_in_iteration1)
self.assert_(tests_in_iteration1 != tests_in_iteration3,
tests_in_iteration1)
self.assert_(tests_in_iteration2 != tests_in_iteration3,
tests_in_iteration2)
def testShuffleShardedTestsPreservesPartition(self):
# If we run M tests on N shards, the same M tests should be run in
# total, regardless of the random seeds used by the shards.
[tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '0'},
[ShuffleFlag(), RandomSeedFlag(1)])
[tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(20)])
[tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '2'},
[ShuffleFlag(), RandomSeedFlag(25)])
sorted_sharded_tests = tests1 + tests2 + tests3
sorted_sharded_tests.sort()
sorted_active_tests = []
sorted_active_tests.extend(ACTIVE_TESTS)
sorted_active_tests.sort()
self.assertEqual(sorted_active_tests, sorted_sharded_tests)
if __name__ == '__main__':
gtest_test_utils.Main()
| apache-2.0 |
pdebuyl/numpy | numpy/fft/helper.py | 7 | 6205 | """
Discrete Fourier Transforms - helper.py
"""
from numpy.compat import integer_types
from numpy.core import integer, empty, arange, asarray, roll
from numpy.core.overrides import array_function_dispatch, set_module
# Created by Pearu Peterson, September 2002
__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq']
integer_types = integer_types + (integer,)
def _fftshift_dispatcher(x, axes=None):
return (x,)
@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft')
def fftshift(x, axes=None):
"""
Shift the zero-frequency component to the center of the spectrum.
This function swaps half-spaces for all axes listed (defaults to all).
Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even.
Parameters
----------
x : array_like
Input array.
axes : int or shape tuple, optional
Axes over which to shift. Default is None, which shifts all axes.
Returns
-------
y : ndarray
The shifted array.
See Also
--------
ifftshift : The inverse of `fftshift`.
Examples
--------
>>> freqs = np.fft.fftfreq(10, 0.1)
>>> freqs
array([ 0., 1., 2., ..., -3., -2., -1.])
>>> np.fft.fftshift(freqs)
array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.])
Shift the zero-frequency component only along the second axis:
>>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3)
>>> freqs
array([[ 0., 1., 2.],
[ 3., 4., -4.],
[-3., -2., -1.]])
>>> np.fft.fftshift(freqs, axes=(1,))
array([[ 2., 0., 1.],
[-4., 3., 4.],
[-1., -3., -2.]])
"""
x = asarray(x)
if axes is None:
axes = tuple(range(x.ndim))
shift = [dim // 2 for dim in x.shape]
elif isinstance(axes, integer_types):
shift = x.shape[axes] // 2
else:
shift = [x.shape[ax] // 2 for ax in axes]
return roll(x, shift, axes)
@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft')
def ifftshift(x, axes=None):
"""
The inverse of `fftshift`. Although identical for even-length `x`, the
functions differ by one sample for odd-length `x`.
Parameters
----------
x : array_like
Input array.
axes : int or shape tuple, optional
Axes over which to calculate. Defaults to None, which shifts all axes.
Returns
-------
y : ndarray
The shifted array.
See Also
--------
fftshift : Shift zero-frequency component to the center of the spectrum.
Examples
--------
>>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3)
>>> freqs
array([[ 0., 1., 2.],
[ 3., 4., -4.],
[-3., -2., -1.]])
>>> np.fft.ifftshift(np.fft.fftshift(freqs))
array([[ 0., 1., 2.],
[ 3., 4., -4.],
[-3., -2., -1.]])
"""
x = asarray(x)
if axes is None:
axes = tuple(range(x.ndim))
shift = [-(dim // 2) for dim in x.shape]
elif isinstance(axes, integer_types):
shift = -(x.shape[axes] // 2)
else:
shift = [-(x.shape[ax] // 2) for ax in axes]
return roll(x, shift, axes)
@set_module('numpy.fft')
def fftfreq(n, d=1.0):
"""
Return the Discrete Fourier Transform sample frequencies.
The returned float array `f` contains the frequency bin centers in cycles
per unit of the sample spacing (with zero at the start). For instance, if
the sample spacing is in seconds, then the frequency unit is cycles/second.
Given a window length `n` and a sample spacing `d`::
f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) if n is even
f = [0, 1, ..., (n-1)/2, -(n-1)/2, ..., -1] / (d*n) if n is odd
Parameters
----------
n : int
Window length.
d : scalar, optional
Sample spacing (inverse of the sampling rate). Defaults to 1.
Returns
-------
f : ndarray
Array of length `n` containing the sample frequencies.
Examples
--------
>>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float)
>>> fourier = np.fft.fft(signal)
>>> n = signal.size
>>> timestep = 0.1
>>> freq = np.fft.fftfreq(n, d=timestep)
>>> freq
array([ 0. , 1.25, 2.5 , ..., -3.75, -2.5 , -1.25])
"""
if not isinstance(n, integer_types):
raise ValueError("n should be an integer")
val = 1.0 / (n * d)
results = empty(n, int)
N = (n-1)//2 + 1
p1 = arange(0, N, dtype=int)
results[:N] = p1
p2 = arange(-(n//2), 0, dtype=int)
results[N:] = p2
return results * val
@set_module('numpy.fft')
def rfftfreq(n, d=1.0):
"""
Return the Discrete Fourier Transform sample frequencies
(for usage with rfft, irfft).
The returned float array `f` contains the frequency bin centers in cycles
per unit of the sample spacing (with zero at the start). For instance, if
the sample spacing is in seconds, then the frequency unit is cycles/second.
Given a window length `n` and a sample spacing `d`::
f = [0, 1, ..., n/2-1, n/2] / (d*n) if n is even
f = [0, 1, ..., (n-1)/2-1, (n-1)/2] / (d*n) if n is odd
Unlike `fftfreq` (but like `scipy.fftpack.rfftfreq`)
the Nyquist frequency component is considered to be positive.
Parameters
----------
n : int
Window length.
d : scalar, optional
Sample spacing (inverse of the sampling rate). Defaults to 1.
Returns
-------
f : ndarray
Array of length ``n//2 + 1`` containing the sample frequencies.
Examples
--------
>>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float)
>>> fourier = np.fft.rfft(signal)
>>> n = signal.size
>>> sample_rate = 100
>>> freq = np.fft.fftfreq(n, d=1./sample_rate)
>>> freq
array([ 0., 10., 20., ..., -30., -20., -10.])
>>> freq = np.fft.rfftfreq(n, d=1./sample_rate)
>>> freq
array([ 0., 10., 20., 30., 40., 50.])
"""
if not isinstance(n, integer_types):
raise ValueError("n should be an integer")
val = 1.0/(n*d)
N = n//2 + 1
results = arange(0, N, dtype=int)
return results * val
| bsd-3-clause |
SnabbCo/neutron | neutron/plugins/nuage/nuage_models.py | 1 | 3136 | # Copyright 2014 Alcatel-Lucent USA Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Ronak Shah, Nuage Networks, Alcatel-Lucent USA Inc.
from sqlalchemy import Boolean, Column, ForeignKey, String
from neutron.db import model_base
from neutron.db import models_v2
class NetPartition(model_base.BASEV2, models_v2.HasId):
__tablename__ = 'net_partitions'
name = Column(String(64))
l3dom_tmplt_id = Column(String(36))
l2dom_tmplt_id = Column(String(36))
class NetPartitionRouter(model_base.BASEV2):
__tablename__ = "net_partition_router_mapping"
net_partition_id = Column(String(36),
ForeignKey('net_partitions.id',
ondelete="CASCADE"),
primary_key=True)
router_id = Column(String(36),
ForeignKey('routers.id', ondelete="CASCADE"),
primary_key=True)
nuage_router_id = Column(String(36))
class RouterZone(model_base.BASEV2):
__tablename__ = "router_zone_mapping"
router_id = Column(String(36),
ForeignKey('routers.id', ondelete="CASCADE"),
primary_key=True)
nuage_zone_id = Column(String(36))
nuage_user_id = Column(String(36))
nuage_group_id = Column(String(36))
class SubnetL2Domain(model_base.BASEV2):
__tablename__ = 'subnet_l2dom_mapping'
subnet_id = Column(String(36),
ForeignKey('subnets.id', ondelete="CASCADE"),
primary_key=True)
net_partition_id = Column(String(36),
ForeignKey('net_partitions.id',
ondelete="CASCADE"))
nuage_subnet_id = Column(String(36))
nuage_l2dom_tmplt_id = Column(String(36))
nuage_user_id = Column(String(36))
nuage_group_id = Column(String(36))
class PortVPortMapping(model_base.BASEV2):
__tablename__ = 'port_mapping'
port_id = Column(String(36),
ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
nuage_vport_id = Column(String(36))
nuage_vif_id = Column(String(36))
static_ip = Column(Boolean())
class RouterRoutesMapping(model_base.BASEV2, models_v2.Route):
__tablename__ = 'routerroutes_mapping'
router_id = Column(String(36),
ForeignKey('routers.id',
ondelete="CASCADE"),
primary_key=True,
nullable=False)
nuage_route_id = Column(String(36))
| apache-2.0 |
grivon/yabusame-qemu-dpt | scripts/tracetool/backend/simple.py | 55 | 1385 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simple built-in backend.
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@linux.vnet.ibm.com"
from tracetool import out
def c(events):
out('#include "trace.h"',
'',
'TraceEvent trace_list[] = {')
for e in events:
out('{.tp_name = "%(name)s", .state=0},',
name = e.name,
)
out('};')
def h(events):
out('#include "trace/simple.h"',
'')
for num, e in enumerate(events):
if len(e.args):
argstr = e.args.names()
arg_prefix = ', (uint64_t)(uintptr_t)'
cast_args = arg_prefix + arg_prefix.join(argstr)
simple_args = (str(num) + cast_args)
else:
simple_args = str(num)
out('static inline void trace_%(name)s(%(args)s)',
'{',
' trace%(argc)d(%(trace_args)s);',
'}',
name = e.name,
args = e.args,
argc = len(e.args),
trace_args = simple_args,
)
out('#define NR_TRACE_EVENTS %d' % len(events))
out('extern TraceEvent trace_list[NR_TRACE_EVENTS];')
| gpl-2.0 |
mistercrunch/airflow | tests/providers/amazon/aws/operators/test_glacier.py | 10 | 1594 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from unittest import TestCase, mock
from airflow.providers.amazon.aws.operators.glacier import GlacierCreateJobOperator
AWS_CONN_ID = "aws_default"
BUCKET_NAME = "airflow_bucket"
FILENAME = "path/to/file/"
GCP_CONN_ID = "google_cloud_default"
JOB_ID = "1a2b3c4d"
OBJECT_NAME = "file.csv"
TASK_ID = "glacier_job"
VAULT_NAME = "airflow"
class TestGlacierCreateJobOperator(TestCase):
@mock.patch("airflow.providers.amazon.aws.operators.glacier.GlacierHook")
def test_execute(self, hook_mock):
op = GlacierCreateJobOperator(aws_conn_id=AWS_CONN_ID, vault_name=VAULT_NAME, task_id=TASK_ID)
op.execute(mock.MagicMock())
hook_mock.assert_called_once_with(aws_conn_id=AWS_CONN_ID)
hook_mock.return_value.retrieve_inventory.assert_called_once_with(vault_name=VAULT_NAME)
| apache-2.0 |
pgoeser/gnuradio | gr-wxgui/src/python/gui.py | 16 | 4585 | #
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import wx
from gnuradio import gr
#
# Top-level display panel with vertical box sizer. User does not create or
# subclass this class; rather, the user supplies his own class constructor
# that gets invoked with needed parameters.
#
class top_panel(wx.Panel):
def __init__(self, frame, top_block, gui, options, args):
wx.Panel.__init__(self, frame, -1)
vbox = wx.BoxSizer(wx.VERTICAL)
# Create the user's GUI class
if gui is not None:
self.gui = gui(frame, # Top-level window frame
self, # Parent class for user created windows
vbox, # Sizer for user to add windows to
top_block, # GUI-unaware flowgraph to manipulate
options, # Command-line options
args) # Command-line arguments
else:
# User hasn't made their own GUI, create our default
# We don't have a default GUI yet either :)
p = wx.Panel(self)
p.SetSize((640,480))
vbox.Add(p, 1, wx.EXPAND)
self.SetSizer(vbox)
self.SetAutoLayout(True)
vbox.Fit(self)
def shutdown(self):
try:
self.gui.shutdown()
except AttributeError:
pass
#
# Top-level window frame with menu and status bars.
#
class top_frame(wx.Frame):
def __init__ (self, top_block, gui, options, args,
title, nstatus, start, realtime):
wx.Frame.__init__(self, None, -1, title)
self.top_block = top_block
self.CreateStatusBar(nstatus)
mainmenu = wx.MenuBar()
self.SetMenuBar(mainmenu)
menu = wx.Menu()
item = menu.Append(200, 'E&xit', 'Exit Application') # FIXME magic ID
self.Bind(wx.EVT_MENU, self.OnCloseWindow, item)
mainmenu.Append(menu, "&File")
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
# Create main panel, creates user GUI class with supplied parameters
self.panel = top_panel(self, top_block, gui, options, args)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(self.panel, 1, wx.EXPAND)
self.SetSizer(vbox)
self.SetAutoLayout(True)
vbox.Fit(self)
if realtime:
if gr.enable_realtime_scheduling() != gr.RT_OK:
self.SetStatusText("Failed to enable realtime scheduling")
if start and self.top_block is not None:
self.top_block.start()
def OnCloseWindow(self, event):
# Give user API a chance to do something
self.panel.shutdown()
# Stop flowgraph as a convenience
self.SetStatusText("Ensuring flowgraph has completed before exiting...")
if self.top_block is not None:
self.top_block.stop()
self.top_block.wait()
self.Destroy()
#
# Top-level wxPython application object. User creates or subclasses this
# in their GUI script.
#
class app(wx.App):
def __init__ (self, top_block=None, gui=None, options=None, args=None,
title="GNU Radio", nstatus=1, start=False, realtime=False):
self.top_block = top_block
self.gui = gui
self.options = options
self.args = args
self.title = title
self.nstatus = nstatus
self.start = start
self.realtime = realtime
wx.App.__init__ (self, redirect=False)
def OnInit(self):
# Pass user parameters to top window frame
frame = top_frame(self.top_block, self.gui, self.options, self.args,
self.title, self.nstatus, self.start, self.realtime)
frame.Show(True)
self.SetTopWindow(frame)
return True
| gpl-3.0 |
shakamunyi/neutron | neutron/tests/functional/agent/linux/simple_daemon.py | 48 | 1789 | # Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_config import cfg
from neutron.agent.linux import daemon
def main():
class SimpleDaemon(daemon.Daemon):
"""The purpose of this daemon is to serve as an example, and also as
a dummy daemon, which can be invoked by functional testing, it
does nothing but setting the pid file, and staying detached in the
background.
"""
def run(self):
while True:
time.sleep(10)
opts = [
cfg.StrOpt('uuid',
help=_('uuid provided from the command line '
'so external_process can track us via /proc/'
'cmdline interface.'),
required=True),
cfg.StrOpt('pid_file',
help=_('Location of pid file of this process.'),
required=True)
]
cfg.CONF.register_cli_opts(opts)
# Don't get the default configuration file
cfg.CONF(project='neutron', default_config_files=[])
simple_daemon = SimpleDaemon(cfg.CONF.pid_file,
uuid=cfg.CONF.uuid)
simple_daemon.start()
if __name__ == "__main__":
main()
| apache-2.0 |
t794104/ansible | lib/ansible/modules/network/fortios/fortios_firewall_ssh_local_key.py | 24 | 8385 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_ssh_local_key
short_description: SSH proxy local keys in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure firewall_ssh feature and local_key category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip adress.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: true
firewall_ssh_local_key:
description:
- SSH proxy local keys.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
name:
description:
- SSH proxy local key name.
required: true
password:
description:
- Password for SSH private key.
private-key:
description:
- SSH proxy private key, encrypted with a password.
public-key:
description:
- SSH proxy public key.
source:
description:
- SSH proxy local key source type.
choices:
- built-in
- user
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: SSH proxy local keys.
fortios_firewall_ssh_local_key:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
firewall_ssh_local_key:
state: "present"
name: "default_name_3"
password: "<your_own_value>"
private-key: "<your_own_value>"
public-key: "<your_own_value>"
source: "built-in"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_firewall_ssh_local_key_data(json):
option_list = ['name', 'password', 'private-key',
'public-key', 'source']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def firewall_ssh_local_key(data, fos):
vdom = data['vdom']
firewall_ssh_local_key_data = data['firewall_ssh_local_key']
filtered_data = filter_firewall_ssh_local_key_data(firewall_ssh_local_key_data)
if firewall_ssh_local_key_data['state'] == "present":
return fos.set('firewall.ssh',
'local-key',
data=filtered_data,
vdom=vdom)
elif firewall_ssh_local_key_data['state'] == "absent":
return fos.delete('firewall.ssh',
'local-key',
mkey=filtered_data['name'],
vdom=vdom)
def fortios_firewall_ssh(data, fos):
login(data)
methodlist = ['firewall_ssh_local_key']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"firewall_ssh_local_key": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"name": {"required": True, "type": "str"},
"password": {"required": False, "type": "str"},
"private-key": {"required": False, "type": "str"},
"public-key": {"required": False, "type": "str"},
"source": {"required": False, "type": "str",
"choices": ["built-in", "user"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_firewall_ssh(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
stanley-cheung/grpc | src/python/grpcio_channelz/grpc_channelz/v1/channelz.py | 10 | 2419 | # Copyright 2018 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Channelz debug service implementation in gRPC Python."""
import sys
import grpc
import grpc_channelz.v1.channelz_pb2_grpc as _channelz_pb2_grpc
from grpc_channelz.v1._servicer import ChannelzServicer
_add_channelz_servicer_doc = """Add Channelz servicer to a server.
Channelz servicer is in charge of
pulling information from C-Core for entire process. It will allow the
server to response to Channelz queries.
The Channelz statistic is enabled by default inside C-Core. Whether the
statistic is enabled or not is isolated from adding Channelz servicer.
That means you can query Channelz info with a Channelz-disabled channel,
and you can add Channelz servicer to a Channelz-disabled server.
The Channelz statistic can be enabled or disabled by channel option
'grpc.enable_channelz'. Set to 1 to enable, set to 0 to disable.
This is an EXPERIMENTAL API.
Args:
server: A gRPC server to which Channelz service will be added.
"""
if sys.version_info[0] >= 3 and sys.version_info[1] >= 6:
from grpc_channelz.v1 import _async as aio
def add_channelz_servicer(server):
if isinstance(server, grpc.experimental.aio.Server):
_channelz_pb2_grpc.add_ChannelzServicer_to_server(
aio.ChannelzServicer(), server)
else:
_channelz_pb2_grpc.add_ChannelzServicer_to_server(
ChannelzServicer(), server)
add_channelz_servicer.__doc__ = _add_channelz_servicer_doc
__all__ = [
"aio",
"add_channelz_servicer",
"ChannelzServicer",
]
else:
def add_channelz_servicer(server):
_channelz_pb2_grpc.add_ChannelzServicer_to_server(
ChannelzServicer(), server)
add_channelz_servicer.__doc__ = _add_channelz_servicer_doc
__all__ = [
"add_channelz_servicer",
"ChannelzServicer",
]
| apache-2.0 |
marcoarruda/MissionPlanner | Lib/chunk.py | 65 | 5539 | """Simple class to read IFF chunks.
An IFF chunk (used in formats such as AIFF, TIFF, RMFF (RealMedia File
Format)) has the following structure:
+----------------+
| ID (4 bytes) |
+----------------+
| size (4 bytes) |
+----------------+
| data |
| ... |
+----------------+
The ID is a 4-byte string which identifies the type of chunk.
The size field (a 32-bit value, encoded using big-endian byte order)
gives the size of the whole chunk, including the 8-byte header.
Usually an IFF-type file consists of one or more chunks. The proposed
usage of the Chunk class defined here is to instantiate an instance at
the start of each chunk and read from the instance until it reaches
the end, after which a new instance can be instantiated. At the end
of the file, creating a new instance will fail with a EOFError
exception.
Usage:
while True:
try:
chunk = Chunk(file)
except EOFError:
break
chunktype = chunk.getname()
while True:
data = chunk.read(nbytes)
if not data:
pass
# do something with data
The interface is file-like. The implemented methods are:
read, close, seek, tell, isatty.
Extra methods are: skip() (called by close, skips to the end of the chunk),
getname() (returns the name (ID) of the chunk)
The __init__ method has one required argument, a file-like object
(including a chunk instance), and one optional argument, a flag which
specifies whether or not chunks are aligned on 2-byte boundaries. The
default is 1, i.e. aligned.
"""
class Chunk:
def __init__(self, file, align=True, bigendian=True, inclheader=False):
import struct
self.closed = False
self.align = align # whether to align to word (2-byte) boundaries
if bigendian:
strflag = '>'
else:
strflag = '<'
self.file = file
self.chunkname = file.read(4)
if len(self.chunkname) < 4:
raise EOFError
try:
self.chunksize = struct.unpack(strflag+'L', file.read(4))[0]
except struct.error:
raise EOFError
if inclheader:
self.chunksize = self.chunksize - 8 # subtract header
self.size_read = 0
try:
self.offset = self.file.tell()
except (AttributeError, IOError):
self.seekable = False
else:
self.seekable = True
def getname(self):
"""Return the name (ID) of the current chunk."""
return self.chunkname
def getsize(self):
"""Return the size of the current chunk."""
return self.chunksize
def close(self):
if not self.closed:
self.skip()
self.closed = True
def isatty(self):
if self.closed:
raise ValueError, "I/O operation on closed file"
return False
def seek(self, pos, whence=0):
"""Seek to specified position into the chunk.
Default position is 0 (start of chunk).
If the file is not seekable, this will result in an error.
"""
if self.closed:
raise ValueError, "I/O operation on closed file"
if not self.seekable:
raise IOError, "cannot seek"
if whence == 1:
pos = pos + self.size_read
elif whence == 2:
pos = pos + self.chunksize
if pos < 0 or pos > self.chunksize:
raise RuntimeError
self.file.seek(self.offset + pos, 0)
self.size_read = pos
def tell(self):
if self.closed:
raise ValueError, "I/O operation on closed file"
return self.size_read
def read(self, size=-1):
"""Read at most size bytes from the chunk.
If size is omitted or negative, read until the end
of the chunk.
"""
if self.closed:
raise ValueError, "I/O operation on closed file"
if self.size_read >= self.chunksize:
return ''
if size < 0:
size = self.chunksize - self.size_read
if size > self.chunksize - self.size_read:
size = self.chunksize - self.size_read
data = self.file.read(size)
self.size_read = self.size_read + len(data)
if self.size_read == self.chunksize and \
self.align and \
(self.chunksize & 1):
dummy = self.file.read(1)
self.size_read = self.size_read + len(dummy)
return data
def skip(self):
"""Skip the rest of the chunk.
If you are not interested in the contents of the chunk,
this method should be called so that the file points to
the start of the next chunk.
"""
if self.closed:
raise ValueError, "I/O operation on closed file"
if self.seekable:
try:
n = self.chunksize - self.size_read
# maybe fix alignment
if self.align and (self.chunksize & 1):
n = n + 1
self.file.seek(n, 1)
self.size_read = self.size_read + n
return
except IOError:
pass
while self.size_read < self.chunksize:
n = min(8192, self.chunksize - self.size_read)
dummy = self.read(n)
if not dummy:
raise EOFError
| gpl-3.0 |
roadmapper/ansible | test/units/modules/network/f5/test_bigip_profile_analytics.py | 22 | 3514 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_profile_analytics import ApiParameters
from library.modules.bigip_profile_analytics import ModuleParameters
from library.modules.bigip_profile_analytics import ModuleManager
from library.modules.bigip_profile_analytics import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_profile_analytics import ApiParameters
from ansible.modules.network.f5.bigip_profile_analytics import ModuleParameters
from ansible.modules.network.f5.bigip_profile_analytics import ModuleManager
from ansible.modules.network.f5.bigip_profile_analytics import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
parent='bar',
description='foo',
collect_geo=True,
collect_ip=True,
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/bar'
assert p.description == 'foo'
assert p.collect_geo == 'yes'
assert p.collect_ip == 'yes'
def test_api_parameters(self):
args = load_fixture('load_ltm_profile_analytics_1.json')
p = ApiParameters(params=args)
assert p.name == 'foo'
assert p.collect_geo == 'no'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
name='foo',
parent='bar',
description='foo',
collect_geo=True,
collect_ip=True,
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
| gpl-3.0 |
rjurney/Agile_Data_Code_2 | ch02/pyspark_mongodb.py | 1 | 2096 | # This code sample is meant to be executed line-by-line in a
# pyspark session.
#
# Prior to launching pyspark, run the following line in the
# shell where pyspark will be launched.
#
# export PYSPARK_DRIVER_PYTHON=ipython
#
# The pyspark launch command needs to have additional command line
# arguments passed to ensure that Java classes used to connect to
# MongoDB are found.
#
# The Java classes reside in JAR files that were
# preinstalled via the boostrap.sh script and placed in the
# lib directory. You will need to note the version of the
# libraries by inspecting the JAR filenames. For example,
# if running the following shell command:
#
# $ ls Agile_Data_Code_2/lib/mongo*.jar
#
# yields the following listing:
#
# Agile_Data_Code_2/lib/mongo-hadoop-2.0.2.jar
# Agile_Data_Code_2/lib/mongo-hadoop-spark-2.0.2.jar
# Agile_Data_Code_2/lib/mongo-java-driver-3.6.1.jar
#
# then the mongo-hadoop version would be 2.0.2, and the
# Mongo-Java version would be 3.6.1.
#
# Choosing to set these versions as environment variables
# will make the invocation of the command much less error
# prone.
#
# MONGOHADOOP_VERSION=2.0.2
# MONGOJAVA_VERSION=3.6.1
#
# The names of the JAR files can then be pieced together
# from the version strings.
#
# MONGOHADOOPSPARK_JAR=./lib/mongo-hadoop-spark-$MONGOHADOOP_VERSION.jar
# MONGOJAVADRIVER_JAR=./lib/mongo-java-driver-$MONGOJAVA_VERSION.jar
# MONGOHADOOP_JAR=./lib/mongo-hadoop-$MONGOHADOOP_VERSION.jar
#
# You can then launch the pyspark session using the following
# shell command from the Agile_Data_Code_2 directory:
#
# pyspark \
# --jars $MONGOHADOOPSPARK_JAR,$MONGOJAVADRIVER_JAR,$MONGOHADOOP_JAR \
# --driver-class-path $MONGOHADOOPSPARK_JAR:$MONGOJAVADRIVER_JAR:$MONGOHADOOP_JAR
import pymongo_spark
# Important: activate pymongo_spark.
pymongo_spark.activate()
csv_lines = sc.textFile("data/example.csv")
data = csv_lines.map(lambda line: line.split(","))
schema_data = data.map(lambda x: {'name': x[0], 'company': x[1], 'title': x[2]})
schema_data.saveToMongoDB('mongodb://localhost:27017/agile_data_science.executives')
| mit |
Dhivyap/ansible | lib/ansible/modules/network/nxos/nxos_bfd_global.py | 18 | 9548 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_bfd_global
extends_documentation_fragment: nxos
version_added: "2.9"
short_description: Bidirectional Forwarding Detection (BFD) global-level configuration
description:
- Manages Bidirectional Forwarding Detection (BFD) global-level configuration.
author:
- Chris Van Heuveln (@chrisvanheuveln)
notes:
- Tested against NXOSv 9.2(2)
- BFD global will automatically enable 'feature bfd' if it is disabled.
- BFD global does not have a 'state' parameter. All of the BFD commands are unique and are defined if 'feature bfd' is enabled.
options:
# Top-level commands
echo_interface:
description:
- Loopback interface used for echo frames.
- Valid values are loopback interface name or 'deleted'.
- Not supported on N5K/N6K
required: false
type: str
echo_rx_interval:
description:
- BFD Echo receive interval in milliseconds.
required: false
type: int
interval:
description:
- BFD interval timer values.
- Value must be a dict defining values for keys (tx, min_rx, and multiplier)
required: false
type: dict
slow_timer:
description:
- BFD slow rate timer in milliseconds.
required: false
type: int
startup_timer:
description:
- BFD delayed startup timer in seconds.
- Not supported on N5K/N6K/N7K
required: false
type: int
# IPv4/IPv6 specific commands
ipv4_echo_rx_interval:
description:
- BFD IPv4 session echo receive interval in milliseconds.
required: false
type: int
ipv4_interval:
description:
- BFD IPv4 interval timer values.
- Value must be a dict defining values for keys (tx, min_rx, and multiplier).
required: false
type: dict
ipv4_slow_timer:
description:
- BFD IPv4 slow rate timer in milliseconds.
required: false
type: int
ipv6_echo_rx_interval:
description:
- BFD IPv6 session echo receive interval in milliseconds.
required: false
type: int
ipv6_interval:
description:
- BFD IPv6 interval timer values.
- Value must be a dict defining values for keys (tx, min_rx, and multiplier).
required: false
type: dict
ipv6_slow_timer:
description:
- BFD IPv6 slow rate timer in milliseconds.
required: false
type: int
# Fabricpath commands
fabricpath_interval:
description:
- BFD fabricpath interval timer values.
- Value must be a dict defining values for keys (tx, min_rx, and multiplier).
required: false
type: dict
fabricpath_slow_timer:
description:
- BFD fabricpath slow rate timer in milliseconds.
required: false
type: int
fabricpath_vlan:
description:
- BFD fabricpath control vlan.
required: false
type: int
'''
EXAMPLES = '''
- nxos_bfd_global:
echo_interface: Ethernet1/2
echo_rx_interval: 50
interval:
tx: 50
min_rx: 50
multiplier: 4
'''
RETURN = '''
cmds:
description: commands sent to the device
returned: always
type: list
sample: ["bfd echo-interface loopback1", "bfd slow-timer 2000"]
'''
import re
from ansible.module_utils.network.nxos.nxos import NxosCmdRef
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec
from ansible.module_utils.network.nxos.nxos import load_config
from ansible.module_utils.basic import AnsibleModule
BFD_CMD_REF = """
# The cmd_ref is a yaml formatted list of module commands.
# A leading underscore denotes a non-command variable; e.g. _template.
# BFD does not have convenient json data so this cmd_ref uses raw cli configs.
---
_template: # _template holds common settings for all commands
# Enable feature bfd if disabled
feature: bfd
# Common get syntax for BFD commands
get_command: show run bfd all | incl '^(no )*bfd'
echo_interface:
kind: str
getval: (no )*bfd echo-interface *(\\S+)*$
setval: 'bfd echo-interface {0}'
default: ~
echo_rx_interval:
_exclude: ['N5K', 'N6K']
kind: int
getval: bfd echo-rx-interval (\\d+)$
setval: bfd echo-rx-interval {0}
default: 50
N3K:
default: 250
interval:
kind: dict
getval: bfd interval (?P<tx>\\d+) min_rx (?P<min_rx>\\d+) multiplier (?P<multiplier>\\d+)
setval: bfd interval {tx} min_rx {min_rx} multiplier {multiplier}
default: &def_interval
tx: 50
min_rx: 50
multiplier: 3
N3K:
default: &n3k_def_interval
tx: 250
min_rx: 250
multiplier: 3
slow_timer:
kind: int
getval: bfd slow-timer (\\d+)$
setval: bfd slow-timer {0}
default: 2000
startup_timer:
_exclude: ['N5K', 'N6K', 'N7K']
kind: int
getval: bfd startup-timer (\\d+)$
setval: bfd startup-timer {0}
default: 5
# IPv4/IPv6 specific commands
ipv4_echo_rx_interval:
_exclude: ['N5K', 'N6K']
kind: int
getval: bfd ipv4 echo-rx-interval (\\d+)$
setval: bfd ipv4 echo-rx-interval {0}
default: 50
N3K:
default: 250
ipv4_interval:
_exclude: ['N5K', 'N6K']
kind: dict
getval: bfd ipv4 interval (?P<tx>\\d+) min_rx (?P<min_rx>\\d+) multiplier (?P<multiplier>\\d+)
setval: bfd ipv4 interval {tx} min_rx {min_rx} multiplier {multiplier}
default: *def_interval
N3K:
default: *n3k_def_interval
ipv4_slow_timer:
_exclude: ['N5K', 'N6K']
kind: int
getval: bfd ipv4 slow-timer (\\d+)$
setval: bfd ipv4 slow-timer {0}
default: 2000
ipv6_echo_rx_interval:
_exclude: ['N35', 'N5K', 'N6K']
kind: int
getval: bfd ipv6 echo-rx-interval (\\d+)$
setval: bfd ipv6 echo-rx-interval {0}
default: 50
N3K:
default: 250
ipv6_interval:
_exclude: ['N35', 'N5K', 'N6K']
kind: dict
getval: bfd ipv6 interval (?P<tx>\\d+) min_rx (?P<min_rx>\\d+) multiplier (?P<multiplier>\\d+)
setval: bfd ipv6 interval {tx} min_rx {min_rx} multiplier {multiplier}
default: *def_interval
N3K:
default: *n3k_def_interval
ipv6_slow_timer:
_exclude: ['N35', 'N5K', 'N6K']
kind: int
getval: bfd ipv6 slow-timer (\\d+)$
setval: bfd ipv6 slow-timer {0}
default: 2000
# Fabricpath Commands
fabricpath_interval:
_exclude: ['N35', 'N3K', 'N9K']
kind: dict
getval: bfd fabricpath interval (?P<tx>\\d+) min_rx (?P<min_rx>\\d+) multiplier (?P<multiplier>\\d+)
setval: bfd fabricpath interval {tx} min_rx {min_rx} multiplier {multiplier}
default: *def_interval
fabricpath_slow_timer:
_exclude: ['N35', 'N3K', 'N9K']
kind: int
getval: bfd fabricpath slow-timer (\\d+)$
setval: bfd fabricpath slow-timer {0}
default: 2000
fabricpath_vlan:
_exclude: ['N35', 'N3K', 'N9K']
kind: int
getval: bfd fabricpath vlan (\\d+)$
setval: bfd fabricpath vlan {0}
default: 1
"""
def reorder_cmds(cmds):
'''
There is a bug in some image versions where bfd echo-interface and
bfd echo-rx-interval need to be applied last for them to nvgen properly.
'''
regex1 = re.compile(r'^bfd echo-interface')
regex2 = re.compile(r'^bfd echo-rx-interval')
filtered_cmds = [i for i in cmds if not regex1.match(i)]
filtered_cmds = [i for i in filtered_cmds if not regex2.match(i)]
echo_int_cmd = [i for i in cmds if regex1.match(i)]
echo_rx_cmd = [i for i in cmds if regex2.match(i)]
filtered_cmds.extend(echo_int_cmd)
filtered_cmds.extend(echo_rx_cmd)
return filtered_cmds
def main():
argument_spec = dict(
echo_interface=dict(required=False, type='str'),
echo_rx_interval=dict(required=False, type='int'),
interval=dict(required=False, type='dict'),
slow_timer=dict(required=False, type='int'),
startup_timer=dict(required=False, type='int'),
ipv4_echo_rx_interval=dict(required=False, type='int'),
ipv4_interval=dict(required=False, type='dict'),
ipv4_slow_timer=dict(required=False, type='int'),
ipv6_echo_rx_interval=dict(required=False, type='int'),
ipv6_interval=dict(required=False, type='dict'),
ipv6_slow_timer=dict(required=False, type='int'),
fabricpath_interval=dict(required=False, type='dict'),
fabricpath_slow_timer=dict(required=False, type='int'),
fabricpath_vlan=dict(required=False, type='int'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
warnings = list()
cmd_ref = NxosCmdRef(module, BFD_CMD_REF)
cmd_ref.get_existing()
cmd_ref.get_playvals()
cmds = reorder_cmds(cmd_ref.get_proposed())
result = {'changed': False, 'commands': cmds, 'warnings': warnings,
'check_mode': module.check_mode}
if cmds:
result['changed'] = True
if not module.check_mode:
load_config(module, cmds)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
Eaglemania/ASS | pyglet/text/formats/structured.py | 39 | 8965 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Base class for structured (hierarchical) document formats.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import re
import pyglet
class ImageElement(pyglet.text.document.InlineElement):
def __init__(self, image, width=None, height=None):
self.image = image.get_texture()
self.width = width is None and image.width or width
self.height = height is None and image.height or height
self.vertex_lists = {}
anchor_y = self.height // image.height * image.anchor_y
ascent = max(0, self.height - anchor_y)
descent = min(0, -anchor_y)
super(ImageElement, self).__init__(ascent, descent, self.width)
def place(self, layout, x, y):
group = pyglet.graphics.TextureGroup(self.image.texture,
layout.top_group)
x1 = x
y1 = y + self.descent
x2 = x + self.width
y2 = y + self.height + self.descent
vertex_list = layout.batch.add(4, pyglet.gl.GL_QUADS, group,
('v2i', (x1, y1, x2, y1, x2, y2, x1, y2)),
('c3B', (255, 255, 255) * 4),
('t3f', self.image.tex_coords))
self.vertex_lists[layout] = vertex_list
def remove(self, layout):
self.vertex_lists[layout].delete()
del self.vertex_lists[layout]
def _int_to_roman(input):
# From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/81611
if not 0 < input < 4000:
raise ValueError, "Argument must be between 1 and 3999"
ints = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1)
nums = ('M', 'CM', 'D', 'CD','C', 'XC','L','XL','X','IX','V','IV','I')
result = ""
for i in range(len(ints)):
count = int(input / ints[i])
result += nums[i] * count
input -= ints[i] * count
return result
class ListBuilder(object):
def begin(self, decoder, style):
'''Begin a list.
:Parameters:
`decoder` : `StructuredTextDecoder`
Decoder.
`style` : dict
Style dictionary that applies over the entire list.
'''
left_margin = decoder.current_style.get('margin_left') or 0
tab_stops = decoder.current_style.get('tab_stops')
if tab_stops:
tab_stops = list(tab_stops)
else:
tab_stops = []
tab_stops.append(left_margin + 50)
style['margin_left'] = left_margin + 50
style['indent'] = -30
style['tab_stops'] = tab_stops
def item(self, decoder, style, value=None):
'''Begin a list item.
:Parameters:
`decoder` : `StructuredTextDecoder`
Decoder.
`style` : dict
Style dictionary that applies over the list item.
`value` : str
Optional value of the list item. The meaning is list-type
dependent.
'''
mark = self.get_mark(value)
if mark:
decoder.add_text(mark)
decoder.add_text('\t')
def get_mark(self, value=None):
'''Get the mark text for the next list item.
:Parameters:
`value` : str
Optional value of the list item. The meaning is list-type
dependent.
:rtype: str
'''
return ''
class UnorderedListBuilder(ListBuilder):
def __init__(self, mark):
'''Create an unordered list with constant mark text.
:Parameters:
`mark` : str
Mark to prepend to each list item.
'''
self.mark = mark
def get_mark(self, value):
return self.mark
class OrderedListBuilder(ListBuilder):
format_re = re.compile('(.*?)([1aAiI])(.*)')
def __init__(self, start, format):
'''Create an ordered list with sequentially numbered mark text.
The format is composed of an optional prefix text, a numbering
scheme character followed by suffix text. Valid numbering schemes
are:
``1``
Decimal Arabic
``a``
Lowercase alphanumeric
``A``
Uppercase alphanumeric
``i``
Lowercase Roman
``I``
Uppercase Roman
Prefix text may typically be ``(`` or ``[`` and suffix text is
typically ``.``, ``)`` or empty, but either can be any string.
:Parameters:
`start` : int
First list item number.
`format` : str
Format style, for example ``"1."``.
'''
self.next_value = start
self.prefix, self.numbering, self.suffix = self.format_re.match(format).groups()
assert self.numbering in '1aAiI'
def get_mark(self, value):
if value is None:
value = self.next_value
self.next_value = value + 1
if self.numbering in 'aA':
try:
mark = 'abcdefghijklmnopqrstuvwxyz'[value - 1]
except ValueError:
mark = '?'
if self.numbering == 'A':
mark = mark.upper()
return '%s%s%s' % (self.prefix, mark, self.suffix)
elif self.numbering in 'iI':
try:
mark = _int_to_roman(value)
except ValueError:
mark = '?'
if self.numbering == 'i':
mark = mark.lower()
return '%s%s%s' % (self.prefix, mark, self.suffix)
else:
return '%s%d%s' % (self.prefix, value, self.suffix)
class StructuredTextDecoder(pyglet.text.DocumentDecoder):
def decode(self, text, location=None):
self.len_text = 0
self.current_style = {}
self.next_style = {}
self.stack = []
self.list_stack = []
self.document = pyglet.text.document.FormattedDocument()
if location is None:
location = pyglet.resource.FileLocation('')
self.decode_structured(text, location)
return self.document
def decode_structured(self, text, location):
raise NotImplementedError('abstract')
def push_style(self, key, styles):
old_styles = {}
for name in styles.keys():
old_styles[name] = self.current_style.get(name)
self.stack.append((key, old_styles))
self.current_style.update(styles)
self.next_style.update(styles)
def pop_style(self, key):
# Don't do anything if key is not in stack
for match, _ in self.stack:
if key == match:
break
else:
return
# Remove all innermost elements until key is closed.
while True:
match, old_styles = self.stack.pop()
self.next_style.update(old_styles)
self.current_style.update(old_styles)
if match == key:
break
def add_text(self, text):
self.document.insert_text(self.len_text, text, self.next_style)
self.next_style.clear()
self.len_text += len(text)
def add_element(self, element):
self.document.insert_element(self.len_text, element, self.next_style)
self.next_style.clear()
self.len_text += 1
| gpl-2.0 |
Universal-Model-Converter/UMC3.0a | data/Python/x86/Lib/site-packages/OpenGL/raw/GL/ARB/draw_buffers_blend.py | 1 | 1932 | '''OpenGL extension ARB.draw_buffers_blend
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_ARB_draw_buffers_blend'
_DEPRECATED = False
glBlendEquationiARB = platform.createExtensionFunction(
'glBlendEquationiARB',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLuint,constants.GLenum,),
doc='glBlendEquationiARB(GLuint(buf), GLenum(mode)) -> None',
argNames=('buf','mode',),
deprecated=_DEPRECATED,
)
glBlendEquationSeparateiARB = platform.createExtensionFunction(
'glBlendEquationSeparateiARB',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLuint,constants.GLenum,constants.GLenum,),
doc='glBlendEquationSeparateiARB(GLuint(buf), GLenum(modeRGB), GLenum(modeAlpha)) -> None',
argNames=('buf','modeRGB','modeAlpha',),
deprecated=_DEPRECATED,
)
glBlendFunciARB = platform.createExtensionFunction(
'glBlendFunciARB',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLuint,constants.GLenum,constants.GLenum,),
doc='glBlendFunciARB(GLuint(buf), GLenum(src), GLenum(dst)) -> None',
argNames=('buf','src','dst',),
deprecated=_DEPRECATED,
)
glBlendFuncSeparateiARB = platform.createExtensionFunction(
'glBlendFuncSeparateiARB',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLuint,constants.GLenum,constants.GLenum,constants.GLenum,constants.GLenum,),
doc='glBlendFuncSeparateiARB(GLuint(buf), GLenum(srcRGB), GLenum(dstRGB), GLenum(srcAlpha), GLenum(dstAlpha)) -> None',
argNames=('buf','srcRGB','dstRGB','srcAlpha','dstAlpha',),
deprecated=_DEPRECATED,
)
def glInitDrawBuffersBlendARB():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
| mit |
colab/colab | colab/utils/tests/test_conf.py | 1 | 4536 | import sys
import os
from django.test import TestCase, override_settings, Client
from django.conf import settings
from ..conf import (DatabaseUndefined, validate_database,
InaccessibleSettings, _load_py_file, load_py_settings,
load_colab_apps, load_widgets_settings)
from mock import patch
test_files_dir = "./colab/utils/tests"
class TestConf(TestCase):
@override_settings(DEBUG=False, DATABASES={
'default': {
'NAME': settings.DEFAULT_DATABASE,
},
})
def test_database_undefined(self):
with self.assertRaises(DatabaseUndefined):
validate_database(settings.DATABASES, settings.DEFAULT_DATABASE,
settings.DEBUG)
def test_load_py_file_with_io_error(self):
self.assertRaises(InaccessibleSettings,
_load_py_file, 'settings_test', '/etc/colab/')
def test_load_py_file_with_syntax_error(self):
with file('/tmp/settings_with_syntax_error.py', 'w') as temp_settings:
temp_settings.write('(')
self.assertRaises(InaccessibleSettings,
_load_py_file, 'settings_with_syntax_error', '/tmp')
def test_load_py_file(self):
py_settings = _load_py_file('colab_settings', test_files_dir)
self.assertIn('SOCIAL_NETWORK_ENABLED', py_settings)
self.assertTrue(py_settings['SOCIAL_NETWORK_ENABLED'])
self.assertIn('EMAIL_PORT', py_settings)
self.assertEquals(py_settings['EMAIL_PORT'], 25)
@patch('os.getenv', return_value='/path/fake/settings.py')
def test_load_py_settings_with_inaccessible_settings(self, mock):
self.assertRaises(InaccessibleSettings, load_py_settings)
def test_load_py_settings_without_settings_d(self):
COLAB_SETTINGS_DIR = ''
if 'COLAB_SETTINGS_DIR' in os.environ:
COLAB_SETTINGS_DIR = os.environ['COLAB_SETTINGS_DIR']
del os.environ['COLAB_SETTINGS_DIR']
py_settings = load_py_settings('/path/fake/settings.d/test.py')
self.assertIn('SOCIAL_NETWORK_ENABLED', py_settings)
self.assertTrue(py_settings['SOCIAL_NETWORK_ENABLED'])
self.assertIn('EMAIL_PORT', py_settings)
self.assertEquals(py_settings['EMAIL_PORT'], 25)
if COLAB_SETTINGS_DIR:
os.environ['COLAB_SETTINGS_DIR'] = COLAB_SETTINGS_DIR
@patch('os.listdir', return_value=[test_files_dir + '/settings.d/test.py',
'non_python_file'])
@patch('colab.utils.conf._load_py_file',
side_effect=[{'SOCIAL_NETWORK_ENABLED': True, 'EMAIL_PORT': 25},
{'TEST': 'test'}])
def test_load_py_settings_with_settings_d(self, mock_py, mock_listdir):
py_settings = load_py_settings(test_files_dir + '/settings.d/')
self.assertIn('SOCIAL_NETWORK_ENABLED', py_settings)
self.assertTrue(py_settings['SOCIAL_NETWORK_ENABLED'])
self.assertIn('EMAIL_PORT', py_settings)
self.assertEquals(py_settings['EMAIL_PORT'], 25)
self.assertIn('TEST', py_settings)
self.assertEquals(py_settings['TEST'], 'test')
@patch('os.getenv', return_value='/path/fake/plugins.d/')
def test_load_colab_apps_without_plugins_d_directory(self, mock):
colab_apps = load_colab_apps()
self.assertIn('COLAB_APPS', colab_apps)
self.assertEquals(colab_apps['COLAB_APPS'], {})
@patch('os.getenv', return_value=test_files_dir + '/plugins.d/')
def test_load_colab_apps_with_plugins_d_directory(self, os_getenv):
sys.path.insert(0, os_getenv.return_value)
colab_apps = load_colab_apps()
self.assertIn('gitlab', colab_apps['COLAB_APPS'])
self.assertIn('noosfero', colab_apps['COLAB_APPS'])
sys.path.remove(os_getenv.return_value)
self.assertNotIn(os_getenv.return_value, sys.path)
@patch('os.getenv', return_value='/path/fake/widgets_settings.py')
def test_load_widgets_settings_without_settings(self, mock):
self.assertIsNone(load_widgets_settings())
@patch('os.getenv', side_effect=[test_files_dir + '/colab_settings.py',
'/path/fake/widgets_settings.py'])
def test_load_widgets_settings_without_settings_d(self, mock):
self.assertIsNone(load_widgets_settings())
def test_blacklist(self):
client = Client()
response = client.get('/test_blacklist')
self.assertEquals(403, response.status_code)
| gpl-2.0 |
Cog-Creators/Red-DiscordBot | redbot/core/checks.py | 4 | 1998 | import warnings
from typing import Awaitable, TYPE_CHECKING, Dict
import discord
from .commands import (
bot_has_permissions,
bot_in_a_guild,
has_permissions,
is_owner,
guildowner,
guildowner_or_permissions,
admin,
admin_or_permissions,
mod,
mod_or_permissions,
)
from .utils.mod import (
is_mod_or_superior as _is_mod_or_superior,
is_admin_or_superior as _is_admin_or_superior,
check_permissions as _check_permissions,
)
if TYPE_CHECKING:
from .bot import Red
from .commands import Context
__all__ = [
"bot_has_permissions",
"bot_in_a_guild",
"has_permissions",
"is_owner",
"guildowner",
"guildowner_or_permissions",
"admin",
"admin_or_permissions",
"mod",
"mod_or_permissions",
"is_mod_or_superior",
"is_admin_or_superior",
"check_permissions",
]
def is_mod_or_superior(ctx: "Context") -> Awaitable[bool]:
warnings.warn(
"`redbot.core.checks.is_mod_or_superior` is deprecated and will be removed in a future "
"release, please use `redbot.core.utils.mod.is_mod_or_superior` instead.",
category=DeprecationWarning,
stacklevel=2,
)
return _is_mod_or_superior(ctx.bot, ctx.author)
def is_admin_or_superior(ctx: "Context") -> Awaitable[bool]:
warnings.warn(
"`redbot.core.checks.is_admin_or_superior` is deprecated and will be removed in a future "
"release, please use `redbot.core.utils.mod.is_admin_or_superior` instead.",
category=DeprecationWarning,
stacklevel=2,
)
return _is_admin_or_superior(ctx.bot, ctx.author)
def check_permissions(ctx: "Context", perms: Dict[str, bool]) -> Awaitable[bool]:
warnings.warn(
"`redbot.core.checks.check_permissions` is deprecated and will be removed in a future "
"release, please use `redbot.core.utils.mod.check_permissions`.",
DeprecationWarning,
stacklevel=2,
)
return _check_permissions(ctx, perms)
| gpl-3.0 |
qitianchan/flaskbb | flaskbb/utils/populate.py | 9 | 10091 | # -*- coding: utf-8 -*-
"""
flaskbb.utils.populate
~~~~~~~~~~~~~~~~~~~~
A module that makes creating data more easily
:copyright: (c) 2014 by the FlaskBB Team.
:license: BSD, see LICENSE for more details.
"""
from flaskbb.management.models import Setting, SettingsGroup
from flaskbb.user.models import User, Group
from flaskbb.forum.models import Post, Topic, Forum, Category
def delete_settings_from_fixture(fixture):
"""Deletes the settings from a fixture from the database.
Returns the deleted groups and settings.
:param fixture: The fixture that should be deleted.
"""
deleted_settings = {}
for settingsgroup in fixture:
group = SettingsGroup.query.filter_by(key=settingsgroup[0]).first()
deleted_settings[group] = []
for settings in settingsgroup[1]["settings"]:
setting = Setting.query.filter_by(key=settings[0]).first()
if setting:
deleted_settings[group].append(setting)
setting.delete()
group.delete()
return deleted_settings
def create_settings_from_fixture(fixture):
"""Inserts the settings from a fixture into the database.
Returns the created groups and settings.
:param fixture: The fixture which should inserted.
"""
created_settings = {}
for settingsgroup in fixture:
group = SettingsGroup(
key=settingsgroup[0],
name=settingsgroup[1]["name"],
description=settingsgroup[1]["description"]
)
group.save()
created_settings[group] = []
for settings in settingsgroup[1]["settings"]:
setting = Setting(
key=settings[0],
value=settings[1]["value"],
value_type=settings[1]["value_type"],
name=settings[1]["name"],
description=settings[1]["description"],
extra=settings[1].get("extra", ""), # Optional field
settingsgroup=group.key
)
if setting:
setting.save()
created_settings[group].append(setting)
return created_settings
def update_settings_from_fixture(fixture, overwrite_group=False,
overwrite_setting=False):
"""Updates the database settings from a fixture.
Returns the updated groups and settings.
:param fixture: The fixture which should be inserted/updated.
:param overwrite_group: Set this to ``True`` if you want to overwrite
the group if it already exists.
Defaults to ``False``.
:param overwrite_setting: Set this to ``True`` if you want to overwrite the
setting if it already exists.
Defaults to ``False``.
"""
updated_settings = {}
for settingsgroup in fixture:
group = SettingsGroup.query.filter_by(key=settingsgroup[0]).first()
if (group is not None and overwrite_group) or group is None:
if group is not None:
group.name = settingsgroup[1]["name"]
group.description = settingsgroup[1]["description"]
else:
group = SettingsGroup(
key=settingsgroup[0],
name=settingsgroup[1]["name"],
description=settingsgroup[1]["description"]
)
group.save()
updated_settings[group] = []
for settings in settingsgroup[1]["settings"]:
setting = Setting.query.filter_by(key=settings[0]).first()
if (setting is not None and overwrite_setting) or setting is None:
if setting is not None:
setting.value = settings[1]["value"]
setting.value_type = settings[1]["value_type"]
setting.name = settings[1]["name"]
setting.description = settings[1]["description"]
setting.extra = settings[1].get("extra", "")
setting.settingsgroup = group.key
else:
setting = Setting(
key=settings[0],
value=settings[1]["value"],
value_type=settings[1]["value_type"],
name=settings[1]["name"],
description=settings[1]["description"],
extra=settings[1].get("extra", ""),
settingsgroup=group.key
)
setting.save()
updated_settings[group].append(setting)
return updated_settings
def create_default_settings():
"""Creates the default settings."""
from flaskbb.fixtures.settings import fixture
create_settings_from_fixture(fixture)
def create_default_groups():
"""This will create the 5 default groups."""
from flaskbb.fixtures.groups import fixture
result = []
for key, value in fixture.items():
group = Group(name=key)
for k, v in value.items():
setattr(group, k, v)
group.save()
result.append(group)
return result
def create_admin_user(username, password, email):
"""Creates the administrator user.
Returns the created admin user.
:param username: The username of the user.
:param password: The password of the user.
:param email: The email address of the user.
"""
admin_group = Group.query.filter_by(admin=True).first()
user = User()
user.username = username
user.password = password
user.email = email
user.primary_group_id = admin_group.id
user.save()
return user
def create_welcome_forum():
"""This will create the `welcome forum` with a welcome topic.
Returns True if it's created successfully.
"""
if User.query.count() < 1:
return False
user = User.query.filter_by(id=1).first()
category = Category(title="My Category", position=1)
category.save()
forum = Forum(title="Welcome", description="Your first forum",
category_id=category.id)
forum.save()
topic = Topic(title="Welcome!")
post = Post(content="Have fun with your new FlaskBB Forum!")
topic.save(user=user, forum=forum, post=post)
return True
def create_test_data(users=5, categories=2, forums=2, topics=1, posts=1):
"""Creates 5 users, 2 categories and 2 forums in each category.
It also creates a new topic topic in each forum with a post.
Returns the amount of created users, categories, forums, topics and posts
as a dict.
:param users: The number of users.
:param categories: The number of categories.
:param forums: The number of forums which are created in each category.
:param topics: The number of topics which are created in each forum.
:param posts: The number of posts which are created in each topic.
"""
create_default_groups()
create_default_settings()
data_created = {'users': 0, 'categories': 0, 'forums': 0,
'topics': 0, 'posts': 0}
# create 5 users
for u in range(1, users + 1):
username = "test%s" % u
email = "test%s@example.org" % u
user = User(username=username, password="test", email=email)
user.primary_group_id = u
user.save()
data_created['users'] += 1
user1 = User.query.filter_by(id=1).first()
user2 = User.query.filter_by(id=2).first()
# lets send them a few private messages
for i in range(1, 3):
# TODO
pass
# create 2 categories
for i in range(1, categories + 1):
category_title = "Test Category %s" % i
category = Category(title=category_title,
description="Test Description")
category.save()
data_created['categories'] += 1
# create 2 forums in each category
for j in range(1, forums + 1):
if i == 2:
j += 2
forum_title = "Test Forum %s %s" % (j, i)
forum = Forum(title=forum_title, description="Test Description",
category_id=i)
forum.save()
data_created['forums'] += 1
for t in range(1, topics + 1):
# create a topic
topic = Topic()
post = Post()
topic.title = "Test Title %s" % j
post.content = "Test Content"
topic.save(post=post, user=user1, forum=forum)
data_created['topics'] += 1
for p in range(1, posts + 1):
# create a second post in the forum
post = Post()
post.content = "Test Post"
post.save(user=user2, topic=topic)
data_created['posts'] += 1
return data_created
def insert_mass_data(topics=100, posts=100):
"""Creates a few topics in the first forum and each topic has
a few posts. WARNING: This might take very long!
Returns the count of created topics and posts.
:param topics: The amount of topics in the forum.
:param posts: The number of posts in each topic.
"""
user1 = User.query.filter_by(id=1).first()
user2 = User.query.filter_by(id=2).first()
forum = Forum.query.filter_by(id=1).first()
created_posts = 0
created_topics = 0
if not (user1 or user2 or forum):
return False
# create 1000 topics
for i in range(1, topics + 1):
# create a topic
topic = Topic()
post = Post()
topic.title = "Test Title %s" % i
post.content = "Test Content"
topic.save(post=post, user=user1, forum=forum)
created_topics += 1
# create 100 posts in each topic
for j in range(1, posts + 1):
post = Post()
post.content = "Test Post"
post.save(user=user2, topic=topic)
created_posts += 1
return created_topics, created_posts
| bsd-3-clause |
voc/voctomix | voctocore/lib/videomix.py | 1 | 16530 | #!/usr/bin/env python3
import logging
from configparser import NoOptionError
from enum import Enum, unique
import gi
gi.require_version('GstController', '1.0')
from gi.repository import Gst
from lib.config import Config
from vocto.transitions import Composites, Transitions, Frame, fade_alpha
from lib.scene import Scene
from lib.overlay import Overlay
from lib.args import Args
from vocto.composite_commands import CompositeCommand
class VideoMix(object):
log = logging.getLogger('VideoMix')
def __init__(self):
# read sources from confg file
self.bgSources = Config.getBackgroundSources()
self.sources = Config.getSources()
self.log.info('Configuring mixer for %u source(s) and %u background source(s)', len(self.sources), len(self.bgSources))
# load composites from config
self.log.info("Reading transitions configuration...")
self.composites = Config.getComposites()
# load transitions from configuration
self.transitions = Config.getTransitions(self.composites)
self.scene = None
self.bgScene = None
self.overlay = None
Config.getAudioStreams()
# build GStreamer mixing pipeline descriptor
self.bin = "" if Args.no_bins else """
bin.(
name=VideoMix
"""
self.bin += """
compositor
name=videomixer
"""
if Config.hasOverlay():
self.bin += """\
! queue
max-size-time=3000000000
name=queue-overlay
! gdkpixbufoverlay
name=overlay
overlay-width={width}
overlay-height={height}
""".format(
width=Config.getVideoResolution()[0],
height=Config.getVideoResolution()[1]
)
if Config.getOverlayFile():
self.bin += """\
location={overlay}
alpha=1.0
""".format(overlay=Config.getOverlayFilePath(Config.getOverlayFile()))
else:
self.log.info("No initial overlay source configured.")
self.bin += """\
! identity
name=sig
! {vcaps}
! queue
max-size-time=3000000000
! tee
name=video-mix
""".format(
vcaps=Config.getVideoCaps()
)
for idx, background in enumerate(self.bgSources):
self.bin += """
video-{name}.
! queue
max-size-time=3000000000
name=queue-video-{name}
! videomixer.
""".format(name=background)
for idx, name in enumerate(self.sources):
self.bin += """
video-{name}.
! queue
max-size-time=3000000000
name=queue-cropper-{name}
! videobox
name=cropper-{name}
! queue
max-size-time=3000000000
name=queue-videomixer-{name}
! videomixer.
""".format(
name=name,
idx=idx
)
self.bin += "" if Args.no_bins else """)
"""
def attach(self, pipeline):
self.log.debug('Binding Handoff-Handler for '
'Synchronus mixer manipulation')
self.pipeline = pipeline
sig = pipeline.get_by_name('sig')
sig.connect('handoff', self.on_handoff)
self.log.debug('Initializing Mixer-State')
# initialize pipeline bindings for all sources
self.bgScene = Scene(self.bgSources, pipeline, self.transitions.fps, 0, cropping=False)
self.scene = Scene(self.sources, pipeline, self.transitions.fps, len(self.bgSources))
self.compositeMode = None
self.sourceA = None
self.sourceB = None
self.setCompositeEx(Composites.targets(self.composites)[
0].name, self.sources[0], self.sources[1])
if Config.hasOverlay():
self.overlay = Overlay(
pipeline, Config.getOverlayFile(), Config.getOverlayBlendTime())
def __str__(self):
return 'VideoMix'
def getPlayTime(self):
# get play time from mixing pipeline or assume zero
return self.pipeline.get_pipeline_clock().get_time() - \
self.pipeline.get_base_time()
def on_handoff(self, object, buffer):
playTime = self.getPlayTime()
if self.bgScene and self.bgScene.dirty:
# push background scene to gstreamer
self.log.debug('Applying new background at %d ms',
playTime / Gst.MSECOND)
self.bgScene.push(playTime)
if self.scene and self.scene.dirty:
# push scene to gstreamer
self.log.debug('Applying new mix at %d ms',
playTime / Gst.MSECOND)
self.scene.push(playTime)
def setCompositeEx(self, newCompositeName=None, newA=None, newB=None, useTransitions=False, dry=False):
# expect strings or None as parameters
assert not newCompositeName or type(newCompositeName) == str
assert not newA or type(newA) == str
assert not newB or type(newB) == str
# get current composite
if not self.compositeMode:
curCompositeName = None
self.log.info("Request composite %s(%s,%s)",
newCompositeName, newA, newB)
else:
curCompositeName = self.compositeMode
curA = self.sourceA
curB = self.sourceB
self.log.info("Request composite change from %s(%s,%s) to %s(%s,%s)",
curCompositeName, curA, curB, newCompositeName, newA, newB)
# check if there is any None parameter and fill it up with
# reasonable value from the current scene
if curCompositeName and not (newCompositeName and newA and newB):
# use current state if not defined by parameter
if not newCompositeName:
newCompositeName = curCompositeName
if not newA:
newA = curA if newB != curA else curB
if not newB:
newB = curA if newA == curB else curB
self.log.debug("Completing wildcarded composite to %s(%s,%s)",
newCompositeName, newA, newB)
# post condition: we should have all parameters now
assert newA != newB
assert newCompositeName and newA and newB
# fetch composites
curComposite = self.composites[curCompositeName] if curCompositeName else None
newComposite = self.composites[newCompositeName]
# if new scene is complete
if newComposite and newA in self.sources and newB in self.sources:
self.log.debug("New composite shall be %s(%s,%s)",
newComposite.name, newA, newB)
# try to find a matching transition from current to new scene
transition = None
targetA, targetB = newA, newB
if useTransitions:
if curComposite:
old = (curA,curB,newA,newB)
# check if whe have a three-channel scenario
if len(set(old)) == 3:
self.log.debug("Current composite includes three different frames: (%s,%s) -> (%s,%s)", *old)
# check if current composite hides B
if curComposite.single():
self.log.debug("Current composite hides channel B so we can secretly change it.")
# check for (A,B) -> (A,C)
if curA == newA:
# change into (A,C) -> (A,C)
curB = newB
# check for (A,B) -> (C,A)
elif curA == newB:
# change into (A,C) -> (C,A)
curB = newA
# check another case where new composite also hides B
elif newComposite.single():
self.log.debug("New composite also hides channel B so we can secretly change it.")
# change (A,B) -> (C,B) into (A,C) -> (C,A)
newB = curA
curB = newA
elif newComposite.single():
# check for (A,B) -> (A,C)
if curA == newA:
newB = curB
# check for (A,B) -> (B,C)
if curB == newA:
newB = curA
# check if whe have a four-channel scenario
if len(set(old)) == 4:
self.log.debug("Current composite includes four different frames: (%s,%s) -> (%s,%s)", *old)
# check if both composites hide channel B
if curComposite.single() and newComposite.single():
self.log.debug("Current and new composite hide channel B so we can secretly change it.")
# change (A,B) -> (C,D) into (A,C) -> (C,A)
curB = newA
newB = curA
# log if whe changed somtehing
if old != (curA,curB,newA,newB):
self.log.info("Changing requested transition from (%s,%s) -> (%s,%s) to (%s,%s) -> (%s,%s)", *old, curA,curB,newA,newB)
swap = False
if (curA, curB) == (newA, newB) and curComposite != newComposite:
transition, swap = self.transitions.solve(
curComposite, newComposite, False)
elif (curA, curB) == (newB, newA):
transition, swap = self.transitions.solve(
curComposite, newComposite, True)
if not swap:
targetA, targetB = newB, newA
if transition and not dry:
self.log.warning("No transition found")
if dry:
return (newA, newB) if transition else False
# z-orders of A and B
below = 100
above = 101
# found transition?
if transition:
# apply found transition
self.log.debug(
"committing transition '%s' to scene", transition.name())
self.scene.commit(targetA, transition.Az(below, above))
self.scene.commit(targetB, transition.Bz(above, below))
else:
# apply new scene (hard cut)
self.log.debug(
"setting composite '%s' to scene", newComposite.name)
self.scene.set(targetA, newComposite.Az(below))
self.scene.set(targetB, newComposite.Bz(above))
# make all other sources invisible
for source in self.sources:
if source not in [targetA, targetB]:
self.log.debug("making source %s invisible", source)
self.scene.set(source, Frame(True, alpha=0, zorder=-1))
# get current and new background source by the composites
curBgSource = Config.getBackgroundSource(curCompositeName)
newBgSource = Config.getBackgroundSource(newCompositeName)
if curBgSource != newBgSource:
# found transition?
if transition:
# apply found transition
self.log.debug("committing background fading to scene")
# keep showing old background at z-order 0
curBgFrame = Frame(True, zorder=0, rect=[0,0,*Config.getVideoResolution()])
self.bgScene.set(curBgSource, curBgFrame)
# fade new background in at z-order 1 so it will cover the old one at end
newBgFrame = Frame(True, alpha=0, zorder=1, rect=[0,0,*Config.getVideoResolution()])
self.bgScene.commit(newBgSource, fade_alpha(newBgFrame,255,transition.frames()))
else:
# apply new scene (hard cut)
self.log.debug(
"setting new background to scene")
# just switch to new background
bgFrame = Frame(True, zorder=0, rect=[0,0,*Config.getVideoResolution()])
self.bgScene.set(newBgSource, bgFrame)
# make all other background sources invisible
for source in self.bgSources:
if source not in [curBgSource,newBgSource]:
self.log.debug("making background source %s invisible", source)
self.bgScene.set(source, Frame(True, alpha=0, zorder=-1))
else:
# report unknown elements of the target scene
if not newComposite:
self.log.error("Unknown composite '%s'", newCompositeName)
if not newA in self.sources:
self.log.error("Unknown source '%s'", newA)
if not newB in self.sources:
self.log.error("Unknown source '%s'", newB)
# remember scene we've set
self.compositeMode = newComposite.name
self.sourceA = newA
self.sourceB = newB
def setComposite(self, command, useTransitions=False):
''' parse switch to the composite described by string command '''
# expect string as parameter
assert type(command) == str
# parse command
command = CompositeCommand.from_str(command)
self.log.debug("Setting new composite by string '%s'", command)
self.setCompositeEx(command.composite, command.A,
command.B, useTransitions)
def testCut(self, command):
# expect string as parameter
assert type(command) == str
# parse command
command = CompositeCommand.from_str(command)
if (command.composite != self.compositeMode or command.A != self.sourceA or command.B != self.sourceB):
return command.A, command.B
else:
return False
def testTransition(self, command):
# expect string as parameter
assert type(command) == str
# parse command
command = CompositeCommand.from_str(command)
self.log.debug("Testing if transition is available to '%s'", command)
return self.setCompositeEx(command.composite, command.A,
command.B, True, True)
def getVideoSources(self):
''' legacy command '''
return [self.sourceA, self.sourceB]
def setVideoSourceA(self, source):
''' legacy command '''
self.setCompositeEx(None, source, None, useTransitions=False)
def getVideoSourceA(self):
''' legacy command '''
return self.sourceA
def setVideoSourceB(self, source):
''' legacy command '''
self.setCompositeEx(None, None, source, useTransitions=False)
def getVideoSourceB(self):
''' legacy command '''
return self.sourceB
def setCompositeMode(self, mode):
''' legacy command '''
self.setCompositeEx(mode, None, None, useTransitions=False)
def getCompositeMode(self):
''' legacy command '''
return self.compositeMode
def getComposite(self):
''' legacy command '''
return str(CompositeCommand(self.compositeMode, self.sourceA, self.sourceB))
def setOverlay(self, location):
''' set up overlay file by location '''
self.overlay.set(location)
def showOverlay(self, visible):
''' set overlay visibility '''
self.overlay.show(visible, self.getPlayTime())
def getOverlay(self):
''' get current overlay file location '''
return self.overlay.get()
def getOverlayVisible(self):
''' get overlay visibility '''
return self.overlay.visible()
| mit |
aherlihy/mongo-python-driver | bson/json_util.py | 2 | 25013 | # Copyright 2009-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for using Python's :mod:`json` module with BSON documents.
This module provides two helper methods `dumps` and `loads` that wrap the native
:mod:`json` methods and provide explicit BSON conversion to and from
JSON. :class:`~bson.json_util.JSONOptions` provides a way to control how JSON is
emitted and parsed, with the default being the legacy PyMongo format.
:mod:`~bson.json_util` can also generate and parse `canonical extended JSON`_ when
:data:`~bson.json_util.CANONICAL_JSON_OPTIONS` is provided.
.. _canonical extended JSON: https://github.com/mongodb/specifications/blob/master/source/extended-json.rst
Example usage (serialization):
.. doctest::
>>> from bson import Binary, Code
>>> from bson.json_util import dumps
>>> dumps([{'foo': [1, 2]},
... {'bar': {'hello': 'world'}},
... {'code': Code("function x() { return 1; }", {})},
... {'bin': Binary(b"\x01\x02\x03\x04")}])
'[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }", "$scope": {}}}, {"bin": {"$binary": "AQIDBA==", "$type": "00"}}]'
Example usage (deserialization):
.. doctest::
>>> from bson.json_util import loads
>>> loads('[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$scope": {}, "$code": "function x() { return 1; }"}}, {"bin": {"$type": "00", "$binary": "AQIDBA=="}}]')
[{u'foo': [1, 2]}, {u'bar': {u'hello': u'world'}}, {u'code': Code('function x() { return 1; }', {})}, {u'bin': Binary('...', 0)}]
Example usage (with a :class:`~bson.json_util.JSONOptions` given):
.. doctest::
>>> from bson import Binary, Code
>>> from bson.json_util import dumps, CANONICAL_JSON_OPTIONS
>>> dumps([{'foo': [1, 2]},
... {'bar': {'hello': 'world'}},
... {'code': Code("function x() { return 1; }")},
... {'bin': Binary(b"\x01\x02\x03\x04")}],
... json_options=CANONICAL_JSON_OPTIONS)
'[{"foo": [{"$numberInt": "1"}, {"$numberInt": "2"}]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }"}}, {"bin": {"$binary": "AQIDBA==", "$type": "00"}}]'
Alternatively, you can manually pass the `default` to :func:`json.dumps`.
It won't handle :class:`~bson.binary.Binary` and :class:`~bson.code.Code`
instances (as they are extended strings you can't provide custom defaults),
but it will be faster as there is less recursion.
.. note::
If your application does not need the flexibility offered by
:class:`JSONOptions` and spends a large amount of time in the `json_util`
module, look to
`python-bsonjs <https://pypi.python.org/pypi/python-bsonjs>`_ for a nice
performance improvement. `python-bsonjs` is a fast BSON to MongoDB
Extended JSON converter for Python built on top of
`libbson <https://github.com/mongodb/libbson>`_. `python-bsonjs` works best
with PyMongo when using :class:`~bson.raw_bson.RawBSONDocument`.
.. versionchanged:: 2.8
The output format for :class:`~bson.timestamp.Timestamp` has changed from
'{"t": <int>, "i": <int>}' to '{"$timestamp": {"t": <int>, "i": <int>}}'.
This new format will be decoded to an instance of
:class:`~bson.timestamp.Timestamp`. The old format will continue to be
decoded to a python dict as before. Encoding to the old format is no longer
supported as it was never correct and loses type information.
Added support for $numberLong and $undefined - new in MongoDB 2.6 - and
parsing $date in ISO-8601 format.
.. versionchanged:: 2.7
Preserves order when rendering SON, Timestamp, Code, Binary, and DBRef
instances.
.. versionchanged:: 2.3
Added dumps and loads helpers to automatically handle conversion to and
from json and supports :class:`~bson.binary.Binary` and
:class:`~bson.code.Code`
"""
import base64
import collections
import datetime
import math
import re
import sys
import uuid
_HAS_OBJECT_PAIRS_HOOK = True
if sys.version_info[:2] == (2, 6):
# In Python 2.6, json does not include object_pairs_hook. Use simplejson
# instead.
try:
import simplejson as json
except ImportError:
import json
_HAS_OBJECT_PAIRS_HOOK = False
else:
import json
from pymongo.errors import ConfigurationError
import bson
from bson import EPOCH_AWARE, RE_TYPE, SON
from bson.binary import (Binary, JAVA_LEGACY, CSHARP_LEGACY, OLD_UUID_SUBTYPE,
UUID_SUBTYPE)
from bson.code import Code
from bson.codec_options import CodecOptions
from bson.dbref import DBRef
from bson.decimal128 import Decimal128
from bson.int64 import Int64
from bson.max_key import MaxKey
from bson.min_key import MinKey
from bson.objectid import ObjectId
from bson.py3compat import PY3, iteritems, integer_types, string_type, text_type
from bson.regex import Regex
from bson.timestamp import Timestamp
from bson.tz_util import utc
_RE_OPT_TABLE = {
"i": re.I,
"l": re.L,
"m": re.M,
"s": re.S,
"u": re.U,
"x": re.X,
}
# Dollar-prefixed keys which may appear in DBRefs.
_DBREF_KEYS = frozenset(['$id', '$ref', '$db'])
class DatetimeRepresentation:
LEGACY = 0
"""Legacy MongoDB Extended JSON datetime representation.
:class:`datetime.datetime` instances will be encoded to JSON in the
format `{"$date": <dateAsMilliseconds>}`, where `dateAsMilliseconds` is
a 64-bit signed integer giving the number of milliseconds since the Unix
epoch UTC. This was the default encoding before PyMongo version 3.4.
.. versionadded:: 3.4
"""
NUMBERLONG = 1
"""NumberLong datetime representation.
:class:`datetime.datetime` instances will be encoded to JSON in the
format `{"$date": {"$numberLong": "<dateAsMilliseconds>"}}`,
where `dateAsMilliseconds` is the string representation of a 64-bit signed
integer giving the number of milliseconds since the Unix epoch UTC.
.. versionadded:: 3.4
"""
ISO8601 = 2
"""ISO-8601 datetime representation.
:class:`datetime.datetime` instances greater than or equal to the Unix
epoch UTC will be encoded to JSON in the format `{"$date": "<ISO-8601>"}`.
:class:`datetime.datetime` instances before the Unix epoch UTC will be
encoded as if the datetime representation is
:const:`~DatetimeRepresentation.NUMBERLONG`.
.. versionadded:: 3.4
"""
class JSONOptions(CodecOptions):
"""Encapsulates JSON options for :func:`dumps` and :func:`loads`.
Raises :exc:`~pymongo.errors.ConfigurationError` on Python 2.6 if
`simplejson <https://pypi.python.org/pypi/simplejson>`_ is not installed
and document_class is not the default (:class:`dict`).
:Parameters:
- `strict_number_long`: If ``True``, :class:`~bson.int64.Int64` objects
are encoded to MongoDB Extended JSON's *Strict mode* type
`NumberLong`, ie ``'{"$numberLong": "<number>" }'``. Otherwise they
will be encoded as an `int`. Defaults to ``False``.
- `datetime_representation`: The representation to use when encoding
instances of :class:`datetime.datetime`. Defaults to
:const:`~DatetimeRepresentation.LEGACY`.
- `strict_uuid`: If ``True``, :class:`uuid.UUID` object are encoded to
MongoDB Extended JSON's *Strict mode* type `Binary`. Otherwise it
will be encoded as ``'{"$uuid": "<hex>" }'``. Defaults to ``False``.
- `canonical_extended_json`: If ``True``, use Canonical Extended JSON
representations for all BSON values. This option implies
``strict_number_long=True``,
``datetime_representation=DatetimeRepresentation.NUMBERLONG``, and
``strict_uuid=True``.
- `document_class`: BSON documents returned by :func:`loads` will be
decoded to an instance of this class. Must be a subclass of
:class:`collections.MutableMapping`. Defaults to :class:`dict`.
- `uuid_representation`: The BSON representation to use when encoding
and decoding instances of :class:`uuid.UUID`. Defaults to
:const:`~bson.binary.PYTHON_LEGACY`.
- `tz_aware`: If ``True``, MongoDB Extended JSON's *Strict mode* type
`Date` will be decoded to timezone aware instances of
:class:`datetime.datetime`. Otherwise they will be naive. Defaults
to ``True``.
- `tzinfo`: A :class:`datetime.tzinfo` subclass that specifies the
timezone from which :class:`~datetime.datetime` objects should be
decoded. Defaults to :const:`~bson.tz_util.utc`.
- `args`: arguments to :class:`~bson.codec_options.CodecOptions`
- `kwargs`: arguments to :class:`~bson.codec_options.CodecOptions`
.. seealso:: The documentation for `MongoDB Extended JSON
<http://www.mongodb.org/display/DOCS/Mongo+Extended+JSON>`_.
.. versionadded:: 3.4
.. versionchanged:: 3.5
Accepts the optional parameter `canonical_extended_json`.
"""
def __new__(cls, strict_number_long=False,
datetime_representation=DatetimeRepresentation.LEGACY,
strict_uuid=False, canonical_extended_json=False,
*args, **kwargs):
kwargs["tz_aware"] = kwargs.get("tz_aware", True)
if kwargs["tz_aware"]:
kwargs["tzinfo"] = kwargs.get("tzinfo", utc)
if datetime_representation not in (DatetimeRepresentation.LEGACY,
DatetimeRepresentation.NUMBERLONG,
DatetimeRepresentation.ISO8601):
raise ConfigurationError(
"JSONOptions.datetime_representation must be one of LEGACY,"
"NUMBERLONG, or ISO8601 from DatetimeRepresentation.")
self = super(JSONOptions, cls).__new__(cls, *args, **kwargs)
if not _HAS_OBJECT_PAIRS_HOOK and self.document_class != dict:
raise ConfigurationError(
"Support for JSONOptions.document_class on Python 2.6 "
"requires simplejson "
"(https://pypi.python.org/pypi/simplejson) to be installed.")
self.canonical_extended_json = canonical_extended_json
if self.canonical_extended_json:
self.strict_number_long = True
self.datetime_representation = DatetimeRepresentation.NUMBERLONG
self.strict_uuid = True
else:
self.strict_number_long = strict_number_long
self.datetime_representation = datetime_representation
self.strict_uuid = strict_uuid
return self
def _arguments_repr(self):
return ('strict_number_long=%r, '
'datetime_representation=%r, '
'strict_uuid=%r, canonical_extended_json=%r, %s' % (
self.strict_number_long,
self.datetime_representation,
self.strict_uuid,
self.canonical_extended_json,
super(JSONOptions, self)._arguments_repr()))
DEFAULT_JSON_OPTIONS = JSONOptions()
"""The default :class:`JSONOptions` for JSON encoding/decoding.
.. versionadded:: 3.4
"""
STRICT_JSON_OPTIONS = JSONOptions(
strict_number_long=True,
datetime_representation=DatetimeRepresentation.ISO8601,
strict_uuid=True)
""":class:`JSONOptions` for MongoDB Extended JSON's *Strict mode* encoding.
.. versionadded:: 3.4
"""
CANONICAL_JSON_OPTIONS = JSONOptions(canonical_extended_json=True)
""":class:`JSONOptions` for `canonical extended JSON`_.
.. versionadded:: 3.5
"""
def dumps(obj, *args, **kwargs):
"""Helper function that wraps :func:`json.dumps`.
Recursive function that handles all BSON types including
:class:`~bson.binary.Binary` and :class:`~bson.code.Code`.
:Parameters:
- `json_options`: A :class:`JSONOptions` instance used to modify the
encoding of MongoDB Extended JSON types. Defaults to
:const:`DEFAULT_JSON_OPTIONS`.
.. versionchanged:: 3.4
Accepts optional parameter `json_options`. See :class:`JSONOptions`.
.. versionchanged:: 2.7
Preserves order when rendering SON, Timestamp, Code, Binary, and DBRef
instances.
"""
json_options = kwargs.pop("json_options", DEFAULT_JSON_OPTIONS)
return json.dumps(_json_convert(obj, json_options), *args, **kwargs)
def loads(s, *args, **kwargs):
"""Helper function that wraps :func:`json.loads`.
Automatically passes the object_hook for BSON type conversion.
:Parameters:
- `json_options`: A :class:`JSONOptions` instance used to modify the
decoding of MongoDB Extended JSON types. Defaults to
:const:`DEFAULT_JSON_OPTIONS`.
.. versionchanged:: 3.4
Accepts optional parameter `json_options`. See :class:`JSONOptions`.
"""
json_options = kwargs.pop("json_options", DEFAULT_JSON_OPTIONS)
if _HAS_OBJECT_PAIRS_HOOK:
kwargs["object_pairs_hook"] = lambda pairs: object_pairs_hook(
pairs, json_options)
else:
kwargs["object_hook"] = lambda obj: (
canonical_object_hook(obj, json_options)
if json_options.canonical_extended_json
else object_hook(obj, json_options))
return json.loads(s, *args, **kwargs)
def _json_convert(obj, json_options=DEFAULT_JSON_OPTIONS):
"""Recursive helper method that converts BSON types so they can be
converted into json.
"""
if hasattr(obj, 'iteritems') or hasattr(obj, 'items'): # PY3 support
return SON(((k, _json_convert(v, json_options))
for k, v in iteritems(obj)))
elif hasattr(obj, '__iter__') and not isinstance(obj, (text_type, bytes)):
return list((_json_convert(v, json_options) for v in obj))
try:
return default(obj, json_options)
except TypeError:
return obj
def object_pairs_hook(pairs, json_options=DEFAULT_JSON_OPTIONS):
document = json_options.document_class(pairs)
if json_options.canonical_extended_json:
return canonical_object_hook(document, json_options)
return object_hook(document, json_options)
def object_hook(dct, json_options=DEFAULT_JSON_OPTIONS):
if "$oid" in dct:
return ObjectId(str(dct["$oid"]))
if "$ref" in dct:
return DBRef(dct["$ref"], dct["$id"], dct.get("$db", None))
if "$date" in dct:
return _get_date(dct, json_options)
if "$regex" in dct:
flags = 0
# PyMongo always adds $options but some other tools may not.
for opt in dct.get("$options", ""):
flags |= _RE_OPT_TABLE.get(opt, 0)
return Regex(dct["$regex"], flags)
if "$minKey" in dct:
return MinKey()
if "$maxKey" in dct:
return MaxKey()
if "$binary" in dct:
return _get_binary(dct, json_options)
if "$code" in dct:
return Code(dct["$code"], dct.get("$scope"))
if "$uuid" in dct:
return uuid.UUID(dct["$uuid"])
if "$undefined" in dct:
return None
if "$numberLong" in dct:
return Int64(dct["$numberLong"])
if "$timestamp" in dct:
tsp = dct["$timestamp"]
return Timestamp(tsp["t"], tsp["i"])
if "$numberDecimal" in dct:
return Decimal128(dct["$numberDecimal"])
return dct
def _get_binary(doc, json_options):
if isinstance(doc["$type"], int):
doc["$type"] = "%02x" % doc["$type"]
subtype = int(doc["$type"], 16)
if subtype >= 0xffffff80: # Handle mongoexport values
subtype = int(doc["$type"][6:], 16)
data = base64.b64decode(doc["$binary"].encode())
# special handling for UUID
if subtype == OLD_UUID_SUBTYPE:
if json_options.uuid_representation == CSHARP_LEGACY:
return uuid.UUID(bytes_le=data)
if json_options.uuid_representation == JAVA_LEGACY:
data = data[7::-1] + data[:7:-1]
return uuid.UUID(bytes=data)
if subtype == UUID_SUBTYPE:
return uuid.UUID(bytes=data)
return Binary(data, subtype)
def _get_date(doc, json_options):
dtm = doc["$date"]
# mongoexport 2.6 and newer
if isinstance(dtm, string_type):
# Parse offset
if dtm[-1] == 'Z':
dt = dtm[:-1]
offset = 'Z'
elif dtm[-3] == ':':
# (+|-)HH:MM
dt = dtm[:-6]
offset = dtm[-6:]
elif dtm[-5] in ('+', '-'):
# (+|-)HHMM
dt = dtm[:-5]
offset = dtm[-5:]
elif dtm[-3] in ('+', '-'):
# (+|-)HH
dt = dtm[:-3]
offset = dtm[-3:]
else:
dt = dtm
offset = ''
aware = datetime.datetime.strptime(
dt, "%Y-%m-%dT%H:%M:%S.%f").replace(tzinfo=utc)
if offset and offset != 'Z':
if len(offset) == 6:
hours, minutes = offset[1:].split(':')
secs = (int(hours) * 3600 + int(minutes) * 60)
elif len(offset) == 5:
secs = (int(offset[1:3]) * 3600 + int(offset[3:]) * 60)
elif len(offset) == 3:
secs = int(offset[1:3]) * 3600
if offset[0] == "-":
secs *= -1
aware = aware - datetime.timedelta(seconds=secs)
if json_options.tz_aware:
if json_options.tzinfo:
aware = aware.astimezone(json_options.tzinfo)
return aware
else:
return aware.replace(tzinfo=None)
# mongoexport 2.6 and newer, time before the epoch (SERVER-15275)
elif isinstance(dtm, collections.Mapping):
millis = int(dtm["$numberLong"])
# mongoexport before 2.6
else:
millis = int(dtm)
return bson._millis_to_datetime(millis, json_options)
def _get_dbpointer(doc, json_options):
dbref = doc['$dbPointer']
if isinstance(dbref, DBRef):
# DBPointer must not contain $db in its value.
if dbref.database is None:
return dbref
# Otherwise, this is just a regular document.
return json_options.document_class(
[('$dbPointer', json_options.document_class(dbref.as_doc()))])
return doc
_CANONICAL_JSON_TABLE = {
frozenset(['$oid']): lambda d, _: ObjectId(d['$oid']),
frozenset(['$numberDecimal']): lambda d, _: Decimal128(d['$numberDecimal']),
frozenset(['$symbol']): lambda d, _: text_type(d['$symbol']),
frozenset(['$numberInt']): lambda d, _: int(d['$numberInt']),
frozenset(['$numberDouble']): lambda d, _: float(d['$numberDouble']),
frozenset(['$numberLong']): lambda d, _: Int64(d['$numberLong']),
frozenset(['$date']): _get_date,
frozenset(['$minKey']): lambda dummy0, dummy1: MinKey(),
frozenset(['$maxKey']): lambda dummy0, dummy1: MaxKey(),
frozenset(['$undefined']): lambda dummy0, dummy1: None,
frozenset(['$dbPointer']): _get_dbpointer,
frozenset(['$ref', '$id']): lambda d, _: DBRef(
d.pop('$ref'), d.pop('$id'), **d),
frozenset(['$ref', '$id', '$db']): lambda d, _: DBRef(
d.pop('$ref'), d.pop('$id'), d.pop('$db'), **d),
frozenset(['$regex', '$options']): lambda d, _: Regex(
d['$regex'], d['$options']),
frozenset(['$binary', '$type']): _get_binary,
frozenset(['$code']): lambda d, _: Code(d['$code']),
frozenset(['$code', '$scope']): lambda d, _: Code(
d['$code'], d['$scope']),
frozenset(['$timestamp']): lambda d, _: Timestamp(
int(d['$timestamp']) >> 32, int(d['$timestamp']) & 0xffffffff)
}
def canonical_object_hook(dct, json_options=CANONICAL_JSON_OPTIONS):
keyset = frozenset(key for key in dct if key.startswith('$'))
converter = _CANONICAL_JSON_TABLE.get(keyset)
if converter:
return converter(dct, json_options)
elif '$ref' in dct and '$id' in dct:
# DBRef may contain other keys that don't start with $.
if keyset - _DBREF_KEYS:
# Other keys start with $, so dct cannot be parsed as a DBRef.
return dct
else:
return DBRef(dct.pop('$ref'), dct.pop('$id'),
dct.pop('$db', None), **dct)
return dct
def default(obj, json_options=DEFAULT_JSON_OPTIONS):
# We preserve key order when rendering SON, DBRef, etc. as JSON by
# returning a SON for those types instead of a dict.
if isinstance(obj, ObjectId):
return {"$oid": str(obj)}
if isinstance(obj, DBRef):
return _json_convert(obj.as_doc(), json_options=json_options)
if isinstance(obj, datetime.datetime):
if (json_options.datetime_representation ==
DatetimeRepresentation.ISO8601):
if not obj.tzinfo:
obj = obj.replace(tzinfo=utc)
if obj >= EPOCH_AWARE:
off = obj.tzinfo.utcoffset(obj)
if (off.days, off.seconds, off.microseconds) == (0, 0, 0):
tz_string = 'Z'
else:
tz_string = obj.strftime('%z')
return {"$date": "%s.%03d%s" % (
obj.strftime("%Y-%m-%dT%H:%M:%S"),
int(obj.microsecond / 1000),
tz_string)}
millis = bson._datetime_to_millis(obj)
if (json_options.datetime_representation ==
DatetimeRepresentation.LEGACY):
return {"$date": millis}
return {"$date": {"$numberLong": str(millis)}}
if json_options.strict_number_long and isinstance(obj, Int64):
return {"$numberLong": str(obj)}
if isinstance(obj, (RE_TYPE, Regex)):
flags = ""
if obj.flags & re.IGNORECASE:
flags += "i"
if obj.flags & re.LOCALE:
flags += "l"
if obj.flags & re.MULTILINE:
flags += "m"
if obj.flags & re.DOTALL:
flags += "s"
if obj.flags & re.UNICODE:
flags += "u"
if obj.flags & re.VERBOSE:
flags += "x"
if isinstance(obj.pattern, text_type):
pattern = obj.pattern
else:
pattern = obj.pattern.decode('utf-8')
return SON([("$regex", pattern), ("$options", flags)])
if isinstance(obj, MinKey):
return {"$minKey": 1}
if isinstance(obj, MaxKey):
return {"$maxKey": 1}
if isinstance(obj, Timestamp):
if json_options.canonical_extended_json:
return {'$timestamp': str((obj.time << 32) + obj.inc)}
return {"$timestamp": SON([("t", obj.time), ("i", obj.inc)])}
if isinstance(obj, Code):
if obj.scope is None:
return SON([('$code', str(obj))])
return SON([
('$code', str(obj)),
('$scope', _json_convert(obj.scope, json_options))])
if isinstance(obj, Binary):
return SON([
('$binary', base64.b64encode(obj).decode()),
('$type', "%02x" % obj.subtype)])
if PY3 and isinstance(obj, bytes):
return SON([
('$binary', base64.b64encode(obj).decode()),
('$type', "00")])
if isinstance(obj, uuid.UUID):
if json_options.strict_uuid:
data = obj.bytes
subtype = OLD_UUID_SUBTYPE
if json_options.uuid_representation == CSHARP_LEGACY:
data = obj.bytes_le
elif json_options.uuid_representation == JAVA_LEGACY:
data = data[7::-1] + data[:7:-1]
elif json_options.uuid_representation == UUID_SUBTYPE:
subtype = UUID_SUBTYPE
return SON([
('$binary', base64.b64encode(data).decode()),
('$type', "%02x" % subtype)])
else:
return {"$uuid": obj.hex}
if isinstance(obj, Decimal128):
return {"$numberDecimal": str(obj)}
if isinstance(obj, bool):
return obj
if json_options.canonical_extended_json and isinstance(obj, integer_types):
if -2 ** 31 <= obj < 2 ** 31:
return {'$numberInt': text_type(obj)}
return {'$numberLong': text_type(obj)}
if json_options.canonical_extended_json and isinstance(obj, float):
if math.isnan(obj):
representation = 'NaN'
elif math.isinf(obj):
representation = 'Infinity' if obj > 0 else '-Infinity'
else:
# repr() will return the shortest string guaranteed to produce the
# original value, when float() is called on it. str produces a
# shorter string in Python 2.
representation = text_type(repr(obj))
return {'$numberDouble': representation}
raise TypeError("%r is not JSON serializable" % obj)
| apache-2.0 |
kekivelez/DjangoSkeleton | tcshealth/users/serializers.py | 1 | 3528 | from django.utils.encoding import smart_str
from django.contrib.auth.hashers import make_password
from rest_framework import serializers
from .models import User
class UserSerializer(serializers.ModelSerializer):
"""
Serializers used for User objects.
"""
created_at = serializers.DateTimeField(read_only=True)
class Meta:
model = User
fields = ('id', 'email', 'first_name', 'last_name', 'gender',
'height', 'created_at')
class RegisterUserSerializer(serializers.ModelSerializer):
"""
Serializer used to register a new user.
"""
password = serializers.CharField(write_only=True)
google_token = serializers.CharField(write_only=True, required=False)
class Meta:
model = User
fields = ('email', 'password', 'first_name', 'last_name', 'gender',
'height', 'birth_date', 'google_token')
def validate_password(self, value):
if value:
value = make_password(smart_str(value))
return value
class ChangePasswordSerializer(serializers.ModelSerializer):
"""
Serializer that handles change password in user settings endpoint.
"""
current_password = serializers.CharField(write_only=True)
password1 = serializers.CharField(min_length=6, write_only=True)
password2 = serializers.CharField(min_length=6, write_only=True)
class Meta:
model = User
fields = ('current_password', 'password1', 'password2')
def validate_current_password(self, value):
user = self.instance
if user and not user.check_password(value):
msg = 'Current password is invalid.'
raise serializers.ValidationError(msg)
return value
def validate_password2(self, value):
password_confirmation = value
password1 = self.initial_data['password1']
if password_confirmation != password1:
msg = "Password doesn't match the confirmation."
raise serializers.ValidationError(msg)
value = smart_str(password_confirmation)
return value
def update(self, instance, validated_data):
instance.change_password(validated_data.get('password2'))
return instance
class ForgotPasswordSerializer(serializers.Serializer):
"""
Serializer that handles forgot password endpoint.
"""
email = serializers.EmailField(max_length=254)
def validate_email(self, value):
try:
self.user = User.objects.get(email__iexact=value)
except User.DoesNotExist:
msg = 'No user found.'
raise serializers.ValidationError(msg)
return value
def send_password_reset_email(self):
self.user.send_password_reset_email()
class ResetPasswordSerializer(serializers.Serializer):
"""
Serializer that handles reset password endpoint.
"""
token = serializers.CharField(write_only=True)
new_password = serializers.CharField(write_only=True)
def validate_new_password(self, value):
if value:
value = smart_str(value)
return value
def validate_token(self, value):
self.user = User.objects.get_from_password_reset_token(value)
if not self.user:
msg = 'Invalid password reset token.'
raise serializers.ValidationError(msg)
return value
def validate(self, attrs):
self.user.change_password(attrs['new_password'])
return {
'password_reset': True,
} | gpl-2.0 |
xiangel/hue | desktop/core/ext-py/pycrypto-2.6.1/lib/Crypto/SelfTest/Hash/test_RIPEMD.py | 116 | 2685 | # -*- coding: utf-8 -*-
#
# SelfTest/Hash/test_RIPEMD.py: Self-test for the RIPEMD-160 hash function
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
#"""Self-test suite for Crypto.Hash.RIPEMD"""
__revision__ = "$Id$"
from Crypto.Util.py3compat import *
# This is a list of (expected_result, input[, description]) tuples.
test_data = [
# Test vectors downloaded 2008-09-12 from
# http://homes.esat.kuleuven.be/~bosselae/ripemd160.html
('9c1185a5c5e9fc54612808977ee8f548b2258d31', '', "'' (empty string)"),
('0bdc9d2d256b3ee9daae347be6f4dc835a467ffe', 'a'),
('8eb208f7e05d987a9b044a8e98c6b087f15a0bfc', 'abc'),
('5d0689ef49d2fae572b881b123a85ffa21595f36', 'message digest'),
('f71c27109c692c1b56bbdceb5b9d2865b3708dbc',
'abcdefghijklmnopqrstuvwxyz',
'a-z'),
('12a053384a9c0c88e405a06c27dcf49ada62eb2b',
'abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq',
'abcdbcd...pnopq'),
('b0e20b6e3116640286ed3a87a5713079b21f5189',
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789',
'A-Z, a-z, 0-9'),
('9b752e45573d4b39f4dbd3323cab82bf63326bfb',
'1234567890' * 8,
"'1234567890' * 8"),
('52783243c1697bdbe16d37f97f68f08325dc1528',
'a' * 10**6,
'"a" * 10**6'),
]
def get_tests(config={}):
from Crypto.Hash import RIPEMD
from common import make_hash_tests
return make_hash_tests(RIPEMD, "RIPEMD", test_data,
digest_size=20,
oid="\x06\x05\x2b\x24\x03\02\x01")
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| apache-2.0 |
jampekka/openhilma | infer_hilma_schema.py | 1 | 2219 | #!/usr/bin/env python3
from pathlib import Path
import argh
import xml.etree.ElementTree as ET
import json
import sys
import itertools
def always_applies_for(eliter, predicate):
seen_tags = set()
not_matching = set()
for el in eliter:
seen_tags.add(el.tag)
if not predicate(el):
not_matching.add(el.tag)
return seen_tags - not_matching
def is_trivial_tag(el):
if next(iter(el.attrib.items()), None) is not None:
return False
if next(iter(el), None) is not None:
return False
return True
def is_numeric_tag(el):
if not is_trivial_tag(el):
return False
if el.text is None: return False
try:
float(el.text)
except ValueError:
return False
return True
def is_datetime_tag(el):
if el.attrib:
return False
children = list(el)
if len(children) != 4:
return False
names = sorted(c.tag for c in children)
if names == ['DAY', 'MONTH', 'TIME', 'YEAR']:
return True
return False
def is_date_tag(el):
if el.attrib:
return False
children = list(el)
if len(children) != 3:
return False
names = sorted(c.tag for c in children)
if names == ['DAY', 'MONTH', 'YEAR']:
return True
return False
def is_simple_container_tag(el):
return len(el) < 2
def notices(directory):
for fpath in directory.glob("*.xml"):
for entry in ET.parse(str(fpath)).getroot().iterfind('WRAPPED_NOTICE'):
yield entry
def main(input_directory):
from pprint import pprint
directory = Path(input_directory)
def applies_for(predicate):
all_docs = notices(directory)
all_elements = itertools.chain(*(root.iter() for root in all_docs))
return always_applies_for(all_elements, predicate)
trivial_tags = applies_for(is_trivial_tag)
numeric_tags = applies_for(is_numeric_tag)
container_tags = applies_for(is_simple_container_tag) - trivial_tags
text_tags = trivial_tags - numeric_tags
print("TEXT_TAGS = " + repr((text_tags)))
print("NUMERIC_TAGS = " + repr((numeric_tags)))
print("CONTAINER_TAGS = " + repr((container_tags)))
print("DATETIME_TAGS = " + repr((applies_for(is_datetime_tag))))
print("DATE_TAGS = " + repr((applies_for(is_date_tag))))
entries = (etree_to_dict(n) for n in notices(directory))
if __name__ == '__main__':
argh.dispatch_command(main)
| agpl-3.0 |
nathania/pysal | pysal/contrib/network/lincs.py | 15 | 13081 | #!/usr/bin/env python
"""
A library for computing local indicators of network-constrained clusters
Author:
Myunghwa Hwang mhwang4@gmail.com
"""
import unittest
import numpy as np
import scipy.stats as stats
import geodanet.network as pynet
import pysal, copy
import time
def unconditional_sim(event, base, s):
"""
Parameters:
event: n*1 numpy array with integer values
observed values for an event variable
base: n*1 numpy array with integer values
observed values for a population variable
s: integer
the number of simulations
Returns:
: n*s numpy array
"""
mean_risk = event.sum()*1.0/base.sum()
if base.dtype != int:
base = np.array([int(v) for v in base])
base_zeros = (base == 0.0)
base[base_zeros] += 1.0
sims = np.random.binomial(base, mean_risk, (s, len(event))).transpose()
sims[base_zeros, :] = 0.0
return sims
def unconditional_sim_poisson(event, base, s):
"""
Parameters:
event: n*1 numpy array with integer values
observed values for an event variable
base: n*1 numpy array with integer values
observed values for a population variable
s: integer
the number of simulations
Returns:
: n*s numpy array
"""
mean_risk = event.sum()*1.0/base.sum()
E = base*mean_risk
return np.random.poisson(E, (s, len(event))).transpose()
def conditional_multinomial(event, base, s):
"""
Parameters:
event: n*1 numpy array with integer values
observed values for an event variable
base: n*1 numpy array with integer values
observed values for a population variable
s: integer
the number of simulations
Returns:
: n*s numpy array
"""
m = int(event.sum())
props = base*1.0/base.sum()
return np.random.multinomial(m, props, s).transpose()
def pseudo_pvalues(obs, sims):
"""
Get pseudo p-values from a set of observed indices and their simulated ones.
Parameters:
obs: n*1 numpy array for observed values
sims: n*sims numpy array; sims is the number of simulations
Returns:
p_sim : n*1 numpy array for pseudo p-values
E_sim : mean of p_sim
SE_sim: standard deviation of p_sim
V_sim: variance of p_sim
z_sim: standardarized observed values
p_z_sim: p-value of z_sim based on normal distribution
"""
sims = np.transpose(sims)
permutations = sims.shape[0]
above = sims >= obs
larger = sum(above)
low_extreme = (permutations - larger) < larger
larger[low_extreme] = permutations - larger[low_extreme]
p_sim = (larger + 1.0)/(permutations + 1.0)
E_sim = sims.mean()
SE_sim = sims.std()
V_sim = SE_sim*SE_sim
z_sim = (obs - E_sim)/SE_sim
p_z_sim = 1 - stats.norm.cdf(np.abs(z_sim))
return p_sim, E_sim, SE_sim, V_sim, z_sim, p_z_sim
def node_weights(network, attribute=False):
"""
Obtains a spatial weights matrix of edges in a network
if two edges share a node, they are neighbors
Parameters:
network: a network with/without attributes
attribute: boolean
if true, attributes of edges are added to a dictionary of edges,
which is a return value
Returns:
w: a spatial weights instance
id2link: an associative dictionary that connects a sequential id to a unique
edge on the network
if attribute is true, each item in the dictionary includes the attributes
"""
link2id, id2link = {}, {}
counter = 0
neighbors, weights = {},{}
for n1 in network:
for n2 in network[n1]:
if (n1,n2) not in link2id or link2id[(n1,n2)] not in neighbors:
if (n1,n2) not in link2id:
link2id[(n1,n2)] = counter
link2id[(n2,n1)] = counter
if not attribute:
id2link[counter] = (n1, n2)
else:
id2link[counter] = tuple([(n1,n2)] + list(network[n1][n2][1:]))
counter += 1
neighbors_from_n1 = [(n1, n) for n in network[n1] if n != n2]
neighbors_from_n2 = [(n2, n) for n in network[n2] if n != n1]
neighbors_all = neighbors_from_n1 + neighbors_from_n2
neighbor_ids = []
for edge in neighbors_all:
if edge not in link2id:
link2id[edge] = counter
link2id[(edge[-1], edge[0])] = counter
if not attribute:
id2link[counter] = edge
else:
id2link[counter] = tuple([edge] + list(network[edge[0]][edge[1]][1:]))
neighbor_ids.append(counter)
counter += 1
else:
neighbor_ids.append(link2id[edge])
neighbors[link2id[(n1,n2)]] = neighbor_ids
weights[link2id[(n1,n2)]] = [1.0]*(len(neighbors_from_n1) + len(neighbors_from_n2))
return pysal.weights.W(neighbors, weights), id2link
def edgepoints_from_network(network, attribute=False):
"""
Obtains a list of projected points which are midpoints of edges
Parameters:
network: a network with/without attributes
attribute: boolean
if true, one of return values includes attributes for each edge
Returns:
id2linkpoints: a dictionary that associates a sequential id to a projected, midpoint of each edge
id2attr: a dictionary that associates a sequential id to the attributes of each edge
link2id: a dictionary that associates each edge to its id
"""
link2id, id2linkpoints, id2attr = {}, {}, {}
counter = 0
for n1 in network:
for n2 in network[n1]:
if (n1,n2) not in link2id or (n2,n1) not in link2id:
link2id[(n1,n2)] = counter
link2id[(n2,n1)] = counter
if type(network[n1][n2]) != list:
half_dist = network[n1][n2]/2
else:
half_dist = network[n1][n2][0]/2
if n1[0] < n2[0] or (n1[0] == n2[0] and n1[1] < n2[1]):
id2linkpoints[counter] = (n1,n2,half_dist,half_dist)
else:
id2linkpoints[counter] = (n2,n1,half_dist,half_dist)
if attribute:
id2attr[counter] = network[n1][n2][1:]
counter += 1
return id2linkpoints, id2attr, link2id
def dist_weights(network, id2linkpoints, link2id, bandwidth):
"""
Obtains a distance-based spatial weights matrix using network distance
Parameters:
network: an undirected network without additional attributes
id2linkpoints: a dictionary that includes a list of network-projected, midpoints of edges in the network
link2id: a dictionary that associates each edge to a unique id
bandwidth: a threshold distance for creating a spatial weights matrix
Returns:
w : a distance-based, binary spatial weights matrix
id2link: a dictionary that associates a unique id to each edge of the network
"""
linkpoints = id2linkpoints.values()
neighbors, id2link = {}, {}
net_distances = {}
for linkpoint in id2linkpoints:
if linkpoints[linkpoint] not in net_distances:
net_distances[linkpoints[linkpoint][0]] = pynet.dijkstras(network, linkpoints[linkpoint][0], r=bandwidth)
net_distances[linkpoints[linkpoint][1]] = pynet.dijkstras(network, linkpoints[linkpoint][1], r=bandwidth)
ngh = pynet.proj_distances_undirected(network, linkpoints[linkpoint], linkpoints, r=bandwidth, cache=net_distances)
#ngh = pynet.proj_distances_undirected(network, linkpoints[linkpoint], linkpoints, r=bandwidth)
if linkpoints[linkpoint] in ngh:
del ngh[linkpoints[linkpoint]]
if linkpoint not in neighbors:
neighbors[linkpoint] = []
for k in ngh.keys():
neighbor = link2id[k[:2]]
if neighbor not in neighbors[linkpoint]:
neighbors[linkpoint].append(neighbor)
if neighbor not in neighbors:
neighbors[neighbor] = []
if linkpoint not in neighbors[neighbor]:
neighbors[neighbor].append(linkpoint)
id2link[linkpoint] = id2linkpoints[linkpoint][:2]
weights = copy.copy(neighbors)
for ngh in weights:
weights[ngh] = [1.0]*len(weights[ngh])
return pysal.weights.W(neighbors, weights), id2link
def lincs(network, event, base, weight, dist=None, lisa_func='moran', sim_method="permutations", sim_num=99):
"""
Compute local Moran's I for edges in the network
Parameters:
network: a clean network where each edge has up to three attributes:
Its length, an event variable, and a base variable
event: integer
an index for the event variable
base: integer
an index for the base variable
weight: string
type of binary spatial weights
two options are allowed: Node-based, Distance-based
dist: float
threshold distance value for the distance-based weight
lisa_func: string
type of LISA functions
three options allowed: moran, g, and g_star
sim_method: string
type of simulation methods
four options allowed: permutations, binomial (unconditional),
poisson (unconditional), multinomial (conditional)
sim_num: integer
the number of simulations
Returns:
: a dictionary of edges
an edge and its moran's I are the key and item
: a Weights object
PySAL spatial weights object
"""
if lisa_func in ['g', 'g_star'] and weight == 'Node-based':
print 'Local G statistics can work only with distance-based weights matrix'
raise
if lisa_func == 'moran':
lisa_func = pysal.esda.moran.Moran_Local
else:
lisa_func = pysal.esda.getisord.G_Local
star = False
if lisa_func == 'g_star':
star = True
if base:
def getBase(edges, edge, base):
return edges[edge][base]
else:
def getBase(edges, edge, base):
return 1.0
w, edges, e, b, edges_geom = None, None, None, None, []
if weight == 'Node-based':
w, edges = node_weights(network, attribute=True)
n = len(edges)
e, b = np.zeros(n), np.zeros(n)
for edge in edges:
edges_geom.append(edges[edge][0])
e[edge] = edges[edge][event]
b[edge] = getBase(edges, edge, base)
w.id_order = edges.keys()
elif dist is not None:
id2edgepoints, id2attr, edge2id = edgepoints_from_network(network, attribute=True)
for n1 in network:
for n2 in network[n1]:
network[n1][n2] = network[n1][n2][0]
w, edges = dist_weights(network, id2edgepoints, edge2id, dist)
n = len(id2attr)
e, b = np.zeros(n), np.zeros(n)
if base:
base -= 1
for edge in id2attr:
edges_geom.append(edges[edge])
e[edge] = id2attr[edge][event - 1]
b[edge] = getBase(id2attr, edge, base)
w.id_order = id2attr.keys()
Is, p_sim, Zs = None,None, None
if sim_method == 'permutation':
if lisa_func == pysal.esda.moran.Moran_Local:
lisa_i = lisa_func(e*1.0/b,w,transformation="r",permutations=sim_num)
Is = lisa_i.Is
Zs = lisa_i.q
else:
lisa_i = lisa_func(e*1.0/b,w,transform="R",permutations=sim_num,star=star)
Is = lisa_i.Gs
Zs = lisa_i.Zs
p_sim = lisa_i.p_sim
else:
sims = None
if lisa_func == pysal.esda.moran.Moran_Local:
lisa_i = lisa_func(e*1.0/b,w,transformation="r",permutations=0)
Is = lisa_i.Is
Zs = lisa_i.q
else:
lisa_i = lisa_func(e*1.0/b,w,transform="R",permutations=0,star=star)
Is = lisa_i.Gs
Zs = lisa_i.Zs
if sim_method == 'binomial':
sims = unconditional_sim(e, b, sim_num)
elif sim_method == 'poisson':
sims = unconditional_sim_poisson(e, b, sim_num)
else:
sims = conditional_multinomial(e, b, sim_num)
if lisa_func == pysal.esda.moran.Moran_Local:
for i in range(sim_num):
sims[:,i] = lisa_func(sims[:,i]*1.0/b,w,transformation="r",permutations=0).Is
else:
for i in range(sim_num):
sims[:,i] = lisa_func(sims[:,i]*1.0/b,w,permutations=0,star=star).Gs
sim_res = pseudo_pvalues(Is, sims)
p_sim = sim_res[0]
w.transform = 'O'
return zip(edges_geom, e, b, Is, Zs, p_sim), w
| bsd-3-clause |
MotorolaMobilityLLC/external-chromium_org | chrome/common/extensions/docs/server2/chained_compiled_file_system.py | 31 | 3233 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from compiled_file_system import CompiledFileSystem
from file_system import FileNotFoundError
class ChainedCompiledFileSystem(object):
''' A CompiledFileSystem implementation that fetches data from a chain of
CompiledFileSystems that have different file systems and separate cache
namespaces.
The rules for the compiled file system chain are:
- Versions are fetched from the first compiled file system's underlying
file system.
- Each compiled file system is read in the reverse order (the last one is
read first). If the version matches, return the data. Otherwise, read
from the previous compiled file system until the first one is read.
It is used to chain compiled file systems whose underlying file systems are
slightly different. This makes it possible to reuse cached compiled data in
one of them without recompiling everything that is shared by them.
'''
class Factory(CompiledFileSystem.Factory):
def __init__(self,
factory_and_fs_chain):
self._factory_and_fs_chain = factory_and_fs_chain
def Create(self, populate_function, cls, category=None):
return ChainedCompiledFileSystem(
[(factory.Create(populate_function, cls, category), fs)
for factory, fs in self._factory_and_fs_chain])
def __init__(self, compiled_fs_chain):
assert len(compiled_fs_chain) > 0
self._compiled_fs_chain = compiled_fs_chain
def GetFromFile(self, path, binary=False):
# It's possible that a new file is added in the first compiled file system
# and it doesn't exist in other compiled file systems.
try:
first_compiled_fs, first_file_system = self._compiled_fs_chain[0]
# The first file system contains both files of a newer version and files
# shared with other compiled file systems. We are going to try each
# compiled file system in the reverse order and return the data when
# version matches. Data cached in other compiled file system will be
# reused whenever possible so that we don't need to recompile things that
# are not changed across these file systems.
version = first_file_system.Stat(path).version
for compiled_fs, _ in reversed(self._compiled_fs_chain):
if compiled_fs.StatFile(path) == version:
return compiled_fs.GetFromFile(path, binary)
except FileNotFoundError:
pass
# Try first operation again to generate the correct stack trace
return first_compiled_fs.GetFromFile(path, binary)
def GetFromFileListing(self, path):
if not path.endswith('/'):
path += '/'
try:
first_compiled_fs, first_file_system = self._compiled_fs_chain[0]
version = first_file_system.Stat(path).version
for compiled_fs, _ in reversed(self._compiled_fs_chain):
if compiled_fs.StatFileListing(path) == version:
return compiled_fs.GetFromFileListing(path)
except FileNotFoundError:
pass
# Try first operation again to generate the correct stack trace
return first_compiled_fs.GetFromFileListing(path)
| bsd-3-clause |
ShySec/scrimmage-scoreboard | web2py/applications/admin/languages/cs.py | 12 | 23108 | # -*- coding: utf-8 -*-
{
'!langcode!': 'cs-cz',
'!langname!': 'čeština',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': 'Kolonka "Upravit" je nepovinný výraz, například "pole1=\'nováhodnota\'". Výsledky databázového JOINu nemůžete mazat ani upravovat.',
'"User Exception" debug mode. An error ticket could be issued!': '"User Exception" debug mode. An error ticket could be issued!',
'%%{Row} in Table': '%%{řádek} v tabulce',
'%%{Row} selected': 'označených %%{řádek}',
'%s %%{row} deleted': '%s smazaných %%{záznam}',
'%s %%{row} updated': '%s upravených %%{záznam}',
'%s selected': '%s označených',
'%Y-%m-%d': '%d.%m.%Y',
'%Y-%m-%d %H:%M:%S': '%d.%m.%Y %H:%M:%S',
'(requires internet access)': '(vyžaduje připojení k internetu)',
'(requires internet access, experimental)': '(requires internet access, experimental)',
'(something like "it-it")': '(například "cs-cs")',
'@markmin\x01(file **gluon/contrib/plural_rules/%s.py** is not found)': '(soubor **gluon/contrib/plural_rules/%s.py** nenalezen)',
'@markmin\x01Searching: **%s** %%{file}': 'Hledání: **%s** %%{soubor}',
'About': 'O programu',
'About application': 'O aplikaci',
'Access Control': 'Řízení přístupu',
'Add breakpoint': 'Přidat bod přerušení',
'Additional code for your application': 'Další kód pro Vaši aplikaci',
'Admin design page': 'Admin design page',
'Admin language': 'jazyk rozhraní',
'Administrative interface': 'pro administrátorské rozhraní klikněte sem',
'Administrative Interface': 'Administrátorské rozhraní',
'administrative interface': 'rozhraní pro správu',
'Administrator Password:': 'Administrátorské heslo:',
'Ajax Recipes': 'Recepty s ajaxem',
'An error occured, please %s the page': 'An error occured, please %s the page',
'and rename it:': 'a přejmenovat na:',
'appadmin': 'appadmin',
'appadmin is disabled because insecure channel': 'appadmin je zakázaná bez zabezpečeného spojení',
'Application': 'Application',
'application "%s" uninstalled': 'application "%s" odinstalována',
'application compiled': 'aplikace zkompilována',
'Application name:': 'Název aplikace:',
'are not used': 'nepoužita',
'are not used yet': 'ještě nepoužita',
'Are you sure you want to delete this object?': 'Opravdu chcete odstranit tento objekt?',
'Are you sure you want to uninstall application "%s"?': 'Opravdu chcete odinstalovat aplikaci "%s"?',
'arguments': 'arguments',
'at char %s': 'at char %s',
'at line %s': 'at line %s',
'ATTENTION:': 'ATTENTION:',
'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': 'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.',
'Available Databases and Tables': 'Dostupné databáze a tabulky',
'back': 'zpět',
'Back to wizard': 'Back to wizard',
'Basics': 'Basics',
'Begin': 'Začít',
'breakpoint': 'bod přerušení',
'Breakpoints': 'Body přerušení',
'breakpoints': 'body přerušení',
'Buy this book': 'Koupit web2py knihu',
'Cache': 'Cache',
'cache': 'cache',
'Cache Keys': 'Klíče cache',
'cache, errors and sessions cleaned': 'cache, chyby a relace byly pročištěny',
'can be a git repo': 'může to být git repo',
'Cancel': 'Storno',
'Cannot be empty': 'Nemůže být prázdné',
'Change Admin Password': 'Změnit heslo pro správu',
'Change admin password': 'Změnit heslo pro správu aplikací',
'Change password': 'Změna hesla',
'check all': 'vše označit',
'Check for upgrades': 'Zkusit aktualizovat',
'Check to delete': 'Označit ke smazání',
'Check to delete:': 'Označit ke smazání:',
'Checking for upgrades...': 'Zjišťuji, zda jsou k dispozici aktualizace...',
'Clean': 'Pročistit',
'Clear CACHE?': 'Vymazat CACHE?',
'Clear DISK': 'Vymazat DISK',
'Clear RAM': 'Vymazat RAM',
'Click row to expand traceback': 'Pro rozbalení stopy, klikněte na řádek',
'Click row to view a ticket': 'Pro zobrazení chyby (ticketu), klikněte na řádku...',
'Client IP': 'IP adresa klienta',
'code': 'code',
'Code listing': 'Code listing',
'collapse/expand all': 'vše sbalit/rozbalit',
'Community': 'Komunita',
'Compile': 'Zkompilovat',
'compiled application removed': 'zkompilovaná aplikace smazána',
'Components and Plugins': 'Komponenty a zásuvné moduly',
'Condition': 'Podmínka',
'continue': 'continue',
'Controller': 'Kontrolér (Controller)',
'Controllers': 'Kontroléry',
'controllers': 'kontroléry',
'Copyright': 'Copyright',
'Count': 'Počet',
'Create': 'Vytvořit',
'create file with filename:': 'vytvořit soubor s názvem:',
'created by': 'vytvořil',
'Created By': 'Vytvořeno - kým',
'Created On': 'Vytvořeno - kdy',
'crontab': 'crontab',
'Current request': 'Aktuální požadavek',
'Current response': 'Aktuální odpověď',
'Current session': 'Aktuální relace',
'currently running': 'právě běží',
'currently saved or': 'uloženo nebo',
'customize me!': 'upravte mě!',
'data uploaded': 'data nahrána',
'Database': 'Rozhraní databáze',
'Database %s select': 'databáze %s výběr',
'Database administration': 'Database administration',
'database administration': 'správa databáze',
'Date and Time': 'Datum a čas',
'day': 'den',
'db': 'db',
'DB Model': 'Databázový model',
'Debug': 'Ladění',
'defines tables': 'defines tables',
'Delete': 'Smazat',
'delete': 'smazat',
'delete all checked': 'smazat vše označené',
'delete plugin': 'delete plugin',
'Delete this file (you will be asked to confirm deletion)': 'Smazat tento soubor (budete požádán o potvrzení mazání)',
'Delete:': 'Smazat:',
'deleted after first hit': 'smazat po prvním dosažení',
'Demo': 'Demo',
'Deploy': 'Nahrát',
'Deploy on Google App Engine': 'Nahrát na Google App Engine',
'Deploy to OpenShift': 'Nahrát na OpenShift',
'Deployment Recipes': 'Postupy pro deployment',
'Description': 'Popis',
'design': 'návrh',
'Detailed traceback description': 'Podrobný výpis prostředí',
'details': 'podrobnosti',
'direction: ltr': 'směr: ltr',
'Disable': 'Zablokovat',
'DISK': 'DISK',
'Disk Cache Keys': 'Klíče diskové cache',
'Disk Cleared': 'Disk smazán',
'docs': 'dokumentace',
'Documentation': 'Dokumentace',
"Don't know what to do?": 'Nevíte kudy kam?',
'done!': 'hotovo!',
'Download': 'Stáhnout',
'download layouts': 'stáhnout moduly rozvržení stránky',
'download plugins': 'stáhnout zásuvné moduly',
'E-mail': 'E-mail',
'Edit': 'Upravit',
'edit all': 'edit all',
'Edit application': 'Správa aplikace',
'edit controller': 'edit controller',
'Edit current record': 'Upravit aktuální záznam',
'Edit Profile': 'Upravit profil',
'edit views:': 'upravit pohled:',
'Editing file "%s"': 'Úprava souboru "%s"',
'Editing Language file': 'Úprava jazykového souboru',
'Editing Plural Forms File': 'Editing Plural Forms File',
'Email and SMS': 'Email a SMS',
'Enable': 'Odblokovat',
'enter a number between %(min)g and %(max)g': 'zadejte číslo mezi %(min)g a %(max)g',
'enter an integer between %(min)g and %(max)g': 'zadejte celé číslo mezi %(min)g a %(max)g',
'Error': 'Chyba',
'Error logs for "%(app)s"': 'Seznam výskytu chyb pro aplikaci "%(app)s"',
'Error snapshot': 'Snapshot chyby',
'Error ticket': 'Ticket chyby',
'Errors': 'Chyby',
'Exception %(extype)s: %(exvalue)s': 'Exception %(extype)s: %(exvalue)s',
'Exception %s': 'Exception %s',
'Exception instance attributes': 'Prvky instance výjimky',
'Expand Abbreviation': 'Expand Abbreviation',
'export as csv file': 'exportovat do .csv souboru',
'exposes': 'vystavuje',
'exposes:': 'vystavuje funkce:',
'extends': 'rozšiřuje',
'failed to compile file because:': 'soubor se nepodařilo zkompilovat, protože:',
'FAQ': 'Často kladené dotazy',
'File': 'Soubor',
'file': 'soubor',
'file "%(filename)s" created': 'file "%(filename)s" created',
'file saved on %(time)s': 'soubor uložen %(time)s',
'file saved on %s': 'soubor uložen %s',
'Filename': 'Název souboru',
'filter': 'filtr',
'Find Next': 'Najít další',
'Find Previous': 'Najít předchozí',
'First name': 'Křestní jméno',
'Forgot username?': 'Zapomněl jste svoje přihlašovací jméno?',
'forgot username?': 'zapomněl jste svoje přihlašovací jméno?',
'Forms and Validators': 'Formuláře a validátory',
'Frames': 'Frames',
'Free Applications': 'Aplikace zdarma',
'Functions with no doctests will result in [passed] tests.': 'Functions with no doctests will result in [passed] tests.',
'Generate': 'Vytvořit',
'Get from URL:': 'Stáhnout z internetu:',
'Git Pull': 'Git Pull',
'Git Push': 'Git Push',
'Globals##debug': 'Globální proměnné',
'go!': 'OK!',
'Goto': 'Goto',
'graph model': 'graph model',
'Group %(group_id)s created': 'Skupina %(group_id)s vytvořena',
'Group ID': 'ID skupiny',
'Groups': 'Skupiny',
'Hello World': 'Ahoj světe',
'Help': 'Nápověda',
'Hide/Show Translated strings': 'Skrýt/Zobrazit přeložené texty',
'Hits': 'Kolikrát dosaženo',
'Home': 'Domovská stránka',
'honored only if the expression evaluates to true': 'brát v potaz jen když se tato podmínka vyhodnotí kladně',
'How did you get here?': 'Jak jste se sem vlastně dostal?',
'If start the upgrade, be patient, it may take a while to download': 'If start the upgrade, be patient, it may take a while to download',
'If the report above contains a ticket number it indicates a failure in executing the controller, before any attempt to execute the doctests. This is usually due to an indentation error or an error outside function code.\nA green title indicates that all tests (if defined) passed. In this case test results are not shown.': 'If the report above contains a ticket number it indicates a failure in executing the controller, before any attempt to execute the doctests. This is usually due to an indentation error or an error outside function code.\nA green title indicates that all tests (if defined) passed. In this case test results are not shown.',
'import': 'import',
'Import/Export': 'Import/Export',
'includes': 'zahrnuje',
'Index': 'Index',
'insert new': 'vložit nový záznam ',
'insert new %s': 'vložit nový záznam %s',
'inspect attributes': 'inspect attributes',
'Install': 'Instalovat',
'Installed applications': 'Nainstalované aplikace',
'Interaction at %s line %s': 'Interakce v %s, na řádce %s',
'Interactive console': 'Interaktivní příkazová řádka',
'Internal State': 'Vnitřní stav',
'Introduction': 'Úvod',
'Invalid email': 'Neplatný email',
'Invalid password': 'Nesprávné heslo',
'invalid password.': 'neplatné heslo',
'Invalid Query': 'Neplatný dotaz',
'invalid request': 'Neplatný požadavek',
'Is Active': 'Je aktivní',
'It is %s %%{day} today.': 'Dnes je to %s %%{den}.',
'Key': 'Klíč',
'Key bindings': 'Vazby klíčů',
'Key bindings for ZenCoding Plugin': 'Key bindings for ZenCoding Plugin',
'languages': 'jazyky',
'Languages': 'Jazyky',
'Last name': 'Příjmení',
'Last saved on:': 'Naposledy uloženo:',
'Layout': 'Rozvržení stránky (layout)',
'Layout Plugins': 'Moduly rozvržení stránky (Layout Plugins)',
'Layouts': 'Rozvržení stránek',
'License for': 'Licence pro',
'Line number': 'Číslo řádku',
'LineNo': 'Č.řádku',
'Live Chat': 'Online pokec',
'loading...': 'nahrávám...',
'locals': 'locals',
'Locals##debug': 'Lokální proměnné',
'Logged in': 'Přihlášení proběhlo úspěšně',
'Logged out': 'Odhlášení proběhlo úspěšně',
'Login': 'Přihlásit se',
'login': 'přihlásit se',
'Login to the Administrative Interface': 'Přihlásit se do Správce aplikací',
'logout': 'odhlásit se',
'Logout': 'Odhlásit se',
'Lost Password': 'Zapomněl jste heslo',
'Lost password?': 'Zapomněl jste heslo?',
'lost password?': 'zapomněl jste heslo?',
'Manage': 'Manage',
'Manage Cache': 'Manage Cache',
'Menu Model': 'Model rozbalovací nabídky',
'Models': 'Modely',
'models': 'modely',
'Modified By': 'Změněno - kým',
'Modified On': 'Změněno - kdy',
'Modules': 'Moduly',
'modules': 'moduly',
'My Sites': 'Správa aplikací',
'Name': 'Jméno',
'new application "%s" created': 'nová aplikace "%s" vytvořena',
'New Application Wizard': 'Nový průvodce aplikací',
'New application wizard': 'Nový průvodce aplikací',
'New password': 'Nové heslo',
'New Record': 'Nový záznam',
'new record inserted': 'nový záznam byl založen',
'New simple application': 'Vytvořit primitivní aplikaci',
'next': 'next',
'next 100 rows': 'dalších 100 řádků',
'No databases in this application': 'V této aplikaci nejsou žádné databáze',
'No Interaction yet': 'Ještě žádná interakce nenastala',
'No ticket_storage.txt found under /private folder': 'Soubor ticket_storage.txt v adresáři /private nenalezen',
'Object or table name': 'Objekt či tabulka',
'Old password': 'Původní heslo',
'online designer': 'online návrhář',
'Online examples': 'Příklady online',
'Open new app in new window': 'Open new app in new window',
'or alternatively': 'or alternatively',
'Or Get from URL:': 'Or Get from URL:',
'or import from csv file': 'nebo importovat z .csv souboru',
'Origin': 'Původ',
'Original/Translation': 'Originál/Překlad',
'Other Plugins': 'Ostatní moduly',
'Other Recipes': 'Ostatní zásuvné moduly',
'Overview': 'Přehled',
'Overwrite installed app': 'Přepsat instalovanou aplikaci',
'Pack all': 'Zabalit',
'Pack compiled': 'Zabalit zkompilované',
'pack plugin': 'pack plugin',
'password': 'heslo',
'Password': 'Heslo',
"Password fields don't match": 'Hesla se neshodují',
'Peeking at file': 'Peeking at file',
'Please': 'Prosím',
'Plugin "%s" in application': 'Plugin "%s" in application',
'plugins': 'zásuvné moduly',
'Plugins': 'Zásuvné moduly',
'Plural Form #%s': 'Plural Form #%s',
'Plural-Forms:': 'Množná čísla:',
'Powered by': 'Poháněno',
'Preface': 'Předmluva',
'previous 100 rows': 'předchozích 100 řádků',
'Private files': 'Soukromé soubory',
'private files': 'soukromé soubory',
'profile': 'profil',
'Project Progress': 'Vývoj projektu',
'Python': 'Python',
'Query:': 'Dotaz:',
'Quick Examples': 'Krátké příklady',
'RAM': 'RAM',
'RAM Cache Keys': 'Klíče RAM Cache',
'Ram Cleared': 'RAM smazána',
'Readme': 'Nápověda',
'Recipes': 'Postupy jak na to',
'Record': 'Záznam',
'record does not exist': 'záznam neexistuje',
'Record ID': 'ID záznamu',
'Record id': 'id záznamu',
'refresh': 'obnovte',
'register': 'registrovat',
'Register': 'Zaregistrovat se',
'Registration identifier': 'Registrační identifikátor',
'Registration key': 'Registrační klíč',
'reload': 'reload',
'Reload routes': 'Znovu nahrát cesty',
'Remember me (for 30 days)': 'Zapamatovat na 30 dní',
'Remove compiled': 'Odstranit zkompilované',
'Removed Breakpoint on %s at line %s': 'Bod přerušení smazán - soubor %s na řádce %s',
'Replace': 'Zaměnit',
'Replace All': 'Zaměnit vše',
'request': 'request',
'Reset Password key': 'Reset registračního klíče',
'response': 'response',
'restart': 'restart',
'restore': 'obnovit',
'Retrieve username': 'Získat přihlašovací jméno',
'return': 'return',
'revert': 'vrátit se k původnímu',
'Role': 'Role',
'Rows in Table': 'Záznamy v tabulce',
'Rows selected': 'Záznamů zobrazeno',
'rules are not defined': 'pravidla nejsou definována',
"Run tests in this file (to run all files, you may also use the button labelled 'test')": "Spustí testy v tomto souboru (ke spuštění všech testů, použijte tlačítko 'test')",
'Running on %s': 'Běží na %s',
'Save': 'Uložit',
'Save file:': 'Save file:',
'Save via Ajax': 'Uložit pomocí Ajaxu',
'Saved file hash:': 'hash uloženého souboru:',
'Semantic': 'Modul semantic',
'Services': 'Služby',
'session': 'session',
'session expired': 'session expired',
'Set Breakpoint on %s at line %s: %s': 'Bod přerušení nastaven v souboru %s na řádce %s: %s',
'shell': 'příkazová řádka',
'Singular Form': 'Singular Form',
'Site': 'Správa aplikací',
'Size of cache:': 'Velikost cache:',
'skip to generate': 'skip to generate',
'Sorry, could not find mercurial installed': 'Bohužel mercurial není nainstalován.',
'Start a new app': 'Vytvořit novou aplikaci',
'Start searching': 'Začít hledání',
'Start wizard': 'Spustit průvodce',
'state': 'stav',
'Static': 'Static',
'static': 'statické soubory',
'Static files': 'Statické soubory',
'Statistics': 'Statistika',
'Step': 'Step',
'step': 'step',
'stop': 'stop',
'Stylesheet': 'CSS styly',
'submit': 'odeslat',
'Submit': 'Odeslat',
'successful': 'úspěšně',
'Support': 'Podpora',
'Sure you want to delete this object?': 'Opravdu chcete smazat tento objekt?',
'Table': 'tabulka',
'Table name': 'Název tabulky',
'Temporary': 'Dočasný',
'test': 'test',
'Testing application': 'Testing application',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"Dotaz" je podmínka, například "db.tabulka1.pole1==\'hodnota\'". Podmínka "db.tabulka1.pole1==db.tabulka2.pole2" pak vytvoří SQL JOIN.',
'The application logic, each URL path is mapped in one exposed function in the controller': 'Logika aplikace: každá URL je mapována na funkci vystavovanou kontrolérem.',
'The Core': 'Jádro (The Core)',
'The data representation, define database tables and sets': 'Reprezentace dat: definovat tabulky databáze a záznamy',
'The output of the file is a dictionary that was rendered by the view %s': 'Výstup ze souboru je slovník, který se zobrazil v pohledu %s.',
'The presentations layer, views are also known as templates': 'Prezentační vrstva: pohledy či templaty (šablony)',
'The Views': 'Pohledy (The Views)',
'There are no controllers': 'There are no controllers',
'There are no modules': 'There are no modules',
'There are no plugins': 'Žádné moduly nejsou instalovány.',
'There are no private files': 'Žádné soukromé soubory neexistují.',
'There are no static files': 'There are no static files',
'There are no translators, only default language is supported': 'There are no translators, only default language is supported',
'There are no views': 'There are no views',
'These files are not served, they are only available from within your app': 'Tyto soubory jsou klientům nepřístupné. K dispozici jsou pouze v rámci aplikace.',
'These files are served without processing, your images go here': 'Tyto soubory jsou servírovány bez přídavné logiky, sem patří např. obrázky.',
'This App': 'Tato aplikace',
'This is a copy of the scaffolding application': 'Toto je kopie aplikace skelet.',
'This is an experimental feature and it needs more testing. If you decide to upgrade you do it at your own risk': 'This is an experimental feature and it needs more testing. If you decide to upgrade you do it at your own risk',
'This is the %(filename)s template': 'This is the %(filename)s template',
'this page to see if a breakpoint was hit and debug interaction is required.': 'tuto stránku, abyste uviděli, zda se dosáhlo bodu přerušení.',
'Ticket': 'Ticket',
'Ticket ID': 'Ticket ID',
'Time in Cache (h:m:s)': 'Čas v Cache (h:m:s)',
'Timestamp': 'Časové razítko',
'to previous version.': 'k předchozí verzi.',
'To create a plugin, name a file/folder plugin_[name]': 'Zásuvný modul vytvoříte tak, že pojmenujete soubor/adresář plugin_[jméno modulu]',
'To emulate a breakpoint programatically, write:': 'K nastavení bodu přerušení v kódu programu, napište:',
'to use the debugger!': ', abyste mohli ladící program používat!',
'toggle breakpoint': 'vyp./zap. bod přerušení',
'Toggle Fullscreen': 'Na celou obrazovku a zpět',
'too short': 'Příliš krátké',
'Traceback': 'Traceback',
'Translation strings for the application': 'Překlad textů pro aplikaci',
'try something like': 'try something like',
'Try the mobile interface': 'Zkuste rozhraní pro mobilní zařízení',
'try view': 'try view',
'Twitter': 'Twitter',
'Type python statement in here and hit Return (Enter) to execute it.': 'Type python statement in here and hit Return (Enter) to execute it.',
'Type some Python code in here and hit Return (Enter) to execute it.': 'Type some Python code in here and hit Return (Enter) to execute it.',
'Unable to check for upgrades': 'Unable to check for upgrades',
'unable to parse csv file': 'csv soubor nedá sa zpracovat',
'uncheck all': 'vše odznačit',
'Uninstall': 'Odinstalovat',
'update': 'aktualizovat',
'update all languages': 'aktualizovat všechny jazyky',
'Update:': 'Upravit:',
'Upgrade': 'Upgrade',
'upgrade now': 'upgrade now',
'upgrade now to %s': 'upgrade now to %s',
'upload': 'nahrát',
'Upload': 'Upload',
'Upload a package:': 'Nahrát balík:',
'Upload and install packed application': 'Nahrát a instalovat zabalenou aplikaci',
'upload file:': 'nahrát soubor:',
'upload plugin file:': 'nahrát soubor modulu:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Použijte (...)&(...) pro AND, (...)|(...) pro OR a ~(...) pro NOT pro sestavení složitějších dotazů.',
'User %(id)s Logged-in': 'Uživatel %(id)s přihlášen',
'User %(id)s Logged-out': 'Uživatel %(id)s odhlášen',
'User %(id)s Password changed': 'Uživatel %(id)s změnil heslo',
'User %(id)s Profile updated': 'Uživatel %(id)s upravil profil',
'User %(id)s Registered': 'Uživatel %(id)s se zaregistroval',
'User %(id)s Username retrieved': 'Uživatel %(id)s si nachal zaslat přihlašovací jméno',
'User ID': 'ID uživatele',
'Username': 'Přihlašovací jméno',
'variables': 'variables',
'Verify Password': 'Zopakujte heslo',
'Version': 'Verze',
'Version %s.%s.%s (%s) %s': 'Verze %s.%s.%s (%s) %s',
'Versioning': 'Verzování',
'Videos': 'Videa',
'View': 'Pohled (View)',
'Views': 'Pohledy',
'views': 'pohledy',
'Web Framework': 'Web Framework',
'web2py is up to date': 'Máte aktuální verzi web2py.',
'web2py online debugger': 'Ladící online web2py program',
'web2py Recent Tweets': 'Štěbetání na Twitteru o web2py',
'web2py upgrade': 'web2py upgrade',
'web2py upgraded; please restart it': 'web2py upgraded; please restart it',
'Welcome': 'Vítejte',
'Welcome to web2py': 'Vitejte ve web2py',
'Welcome to web2py!': 'Vítejte ve web2py!',
'Which called the function %s located in the file %s': 'která zavolala funkci %s v souboru (kontroléru) %s.',
'You are successfully running web2py': 'Úspěšně jste spustili web2py.',
'You can also set and remove breakpoint in the edit window, using the Toggle Breakpoint button': 'Nastavovat a mazat body přerušení je též možno v rámci editování zdrojového souboru přes tlačítko Vyp./Zap. bod přerušení',
'You can modify this application and adapt it to your needs': 'Tuto aplikaci si můžete upravit a přizpůsobit ji svým potřebám.',
'You need to set up and reach a': 'Je třeba nejprve nastavit a dojít až na',
'You visited the url %s': 'Navštívili jste stránku %s,',
'Your application will be blocked until you click an action button (next, step, continue, etc.)': 'Aplikace bude blokována než se klikne na jedno z tlačítek (další, krok, pokračovat, atd.)',
'You can inspect variables using the console bellow': 'Níže pomocí příkazové řádky si můžete prohlédnout proměnné',
}
| gpl-2.0 |
knxd/pKNyX | pyknyx/examples/3_weather/weather/fb/sun.py | 2 | 7957 | # -*- coding: utf-8 -*-
""" Python KNX framework
License
=======
- B{PyKNyX} (U{https://github.com/knxd/pyknyx}) is Copyright:
- © 2016-2017 Matthias Urlichs
- PyKNyX is a fork of pKNyX
- © 2013-2015 Frédéric Mantegazza
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
or see:
- U{http://www.gnu.org/licenses/gpl.html}
Module purpose
==============
Sun position management
Implements
==========
- B{Sun}
Documentation
=============
Usage
=====
@author: Frédéric Mantegazza
@copyright: (C) 2013-2015 Frédéric Mantegazza
@license: GPL
"""
import sys
import math
import time
from pyknyx.common.utils import dd2dms, dms2dd
class Sun(object):
""" Sun behaviour class.
"""
def __init__(self, latitude, longitude, timeZone, savingTime):
""" Init the Sun object.
"""
super(Sun, self).__init__()
self._latitude = latitude
self._longitude = longitude
self._timeZone = timeZone
self._savingTime = savingTime
@property
def latitude(self):
self._latitude
@latitude.setter
def latitude(self, latitude):
self._latitude = latitude
@property
def longitude(self):
self._longitude
@longitude.setter
def longitude(self, longitude):
self._longitude = longitude
@property
def timeZone(self):
self._timeZone
@latitude.setter
def timeZone(self, timeZone):
self._timeZone = timeZone
@property
def savingTime(self):
self._savingTime
@savingTime.setter
def savingTime(self, savingTime):
self._savingTime = savingTime
def computeJulianDay(self, year, month, day, hour, minute, second):
""" Compute the julian day.
"""
day += hour / 24. + minute / 1440. + second / 86400.
if month in (1, 2):
year -= 1
month += 12
a = int(year / 100.)
b = 2 - a + int(a / 4.)
julianDay = int(365.25 * (year + 4716.)) + int(30.6001 * (month + 1)) + day + b - 1524.5
julianDay -= (self._timeZone + self._savingTime) / 24.
julianDay -= 2451545. # ???!!!???
return julianDay
def siderealTime(self, julianDay):
""" Compute the sidereal time.
"""
centuries = julianDay / 36525.
siderealTime = (24110.54841 + (8640184.812866 * centuries) + (0.093104 * (centuries ** 2)) - (0.0000062 * (centuries ** 3))) / 3600.
siderealTime = ((siderealTime / 24.) - int(siderealTime / 24.)) * 24.
return siderealTime
def equatorialCoordinates(self, year, month, day, hour, minute, second):
""" Compute rightAscension and declination.
"""
julianDay = self.computeJulianDay(year, month, day, hour, minute, second)
g = 357.529 + 0.98560028 * julianDay
q = 280.459 + 0.98564736 * julianDay
l = q + 1.915 * math.sin(math.radians(g)) + 0.020 * math.sin(math.radians(2 * g))
e = 23.439 - 0.00000036 * julianDay
rightAscension = math.degrees(math.atan(math.cos(math.radians(e)) * math.sin(math.radians(l)) / math.cos(math.radians(l)))) / 15.
if math.cos(math.radians(l)) < 0.:
rightAscension += 12.
if math.cos(math.radians(l)) > 0. and math.sin(math.radians(l)) < 0.:
rightAscension += 24.
declination = math.degrees(math.asin(math.sin(math.radians(e)) * math.sin(math.radians(l))))
return rightAscension, declination
def azimuthalCoordinates(self, year, month, day, hour, minute, second):
""" Compute elevation and azimuth.
"""
julianDay = self.computeJulianDay(year, month, day, hour, minute, second)
siderealTime = self.siderealTime(julianDay)
angleH = 360. * siderealTime / 23.9344
angleT = (hour - (self._timeZone + self._savingTime) - 12. + minute / 60. + second / 3600.) * 360. / 23.9344
angle = angleH + angleT
rightAscension, declination = self.equatorialCoordinates(year, month, day, hour, minute, second)
angle_horaire = angle - rightAscension * 15. + self._longitude
elevation = math.degrees(math.asin(math.sin(math.radians(declination)) * math.sin(math.radians(self._latitude)) - math.cos(math.radians(declination)) * math.cos(math.radians(self._latitude)) * math.cos(math.radians(angle_horaire))))
azimuth = math.degrees(math.acos((math.sin(math.radians(declination)) - math.sin(math.radians(self._latitude)) * math.sin(math.radians(elevation))) / (math.cos(math.radians(self._latitude)) * math.cos(math.radians(elevation)))))
sinazimuth = (math.cos(math.radians(declination)) * math.sin(math.radians(angle_horaire))) / math.cos(math.radians(elevation))
if (sinazimuth < 0.):
azimuth = 360. - azimuth
return elevation, azimuth
def generateAzimuthal(self, year, month, day, step=3600):
""" Generate azimuthl coordinates for all day.
@param step: step generation (s)
@type step: int
@return: azimuthl coordinates for all day
@rtype: list of (elevation, azimuth) tuples
"""
coords = []
for second in xrange(0, 3600 * 24, step):
h = int(second / 3600.)
m = int((second - h * 3600.) / 60.)
s = int(second - h * 3600. - m * 60.)
elevation, azimuth = self.azimuthalCoordinates(year, month, day, h, m, s)
coords.append((elevation, azimuth))
return coords
def main():
sun = Sun(latitude=45., longitude=5., timeZone=1, savingTime=1)
# Get curent date/time
if len(sys.argv) == 5:
tm_year = int(sys.argv[1])
tm_mon = int(sys.argv[2])
tm_day = int(sys.argv[3])
tm_hour = int(sys.argv[4])
tm_min, tm_sec = 0, 0
else:
tm_year, tm_mon, tm_day, tm_hour, tm_min, tm_sec, tm_wday, tm_yday, tm_isdst = time.localtime()
print(u"Date/hour : %s" % time.ctime())
# Compute julian day
julianDay = sun.computeJulianDay(tm_year, tm_mon, tm_day, tm_hour, tm_min, tm_sec)
print(u"Julian day : %14.6f" % julianDay)
# Compute Sidereal time
siderealTime = sun.siderealTime(julianDay)
print(u"Sidereal time : %.6f" % siderealTime)
print("")
# Compute equatorial coordinates
rightAscension, declination = sun.equatorialCoordinates(tm_year, tm_mon, tm_day, tm_hour, tm_min, tm_sec)
d, m, s = dd2dms(rightAscension)
print(u"Right ascension : %10.6f° (%3d°%02d'%06.3f\")" % (rightAscension, d, m, s))
d, m, s = dd2dms(declination)
print(u"Declination : %10.6f° (%3d°%02d'%06.3f\")" % (declination, d, m, s))
print("")
# Compute azimuthal coordinates
elevation, azimuth = sun.azimuthalCoordinates(tm_year, tm_mon, tm_day, tm_hour, tm_min, tm_sec)
d, m, s = dd2dms(elevation)
print(u"Elevation : %10.6f° (%3d°%02d'%06.3f\")" % (elevation, d, m, s))
d, m, s = dd2dms(azimuth)
print(u"Azimuth : %10.6f° (%3d°%02d'%06.3f\")" % (azimuth, d, m, s))
from math import cos, sin, radians
print("")
print(cos(radians(azimuth - 90)) * cos(radians(elevation)))
print(sin(radians(azimuth - 90)) * cos(radians(elevation)))
print(sin(radians(elevation)))
print("")
if __name__ == "__main__":
main()
| gpl-3.0 |
RonaldinhoL/googletest | test/gtest_env_var_test.py | 2408 | 3487 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs gtest_env_var_test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
TestFlag('filter', 'FooTest.Bar', '*')
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
if __name__ == '__main__':
gtest_test_utils.Main()
| bsd-3-clause |
spinellic/Mission-Planner | Lib/textwrap.py | 53 | 17265 | """Text wrapping and filling.
"""
# Copyright (C) 1999-2001 Gregory P. Ward.
# Copyright (C) 2002, 2003 Python Software Foundation.
# Written by Greg Ward <gward@python.net>
__revision__ = "$Id$"
import string, re
# Do the right thing with boolean values for all known Python versions
# (so this module can be copied to projects that don't depend on Python
# 2.3, e.g. Optik and Docutils) by uncommenting the block of code below.
#try:
# True, False
#except NameError:
# (True, False) = (1, 0)
__all__ = ['TextWrapper', 'wrap', 'fill', 'dedent']
# Hardcode the recognized whitespace characters to the US-ASCII
# whitespace characters. The main reason for doing this is that in
# ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales
# that character winds up in string.whitespace. Respecting
# string.whitespace in those cases would 1) make textwrap treat 0xa0 the
# same as any other whitespace char, which is clearly wrong (it's a
# *non-breaking* space), 2) possibly cause problems with Unicode,
# since 0xa0 is not in range(128).
_whitespace = '\t\n\x0b\x0c\r '
class TextWrapper:
"""
Object for wrapping/filling text. The public interface consists of
the wrap() and fill() methods; the other methods are just there for
subclasses to override in order to tweak the default behaviour.
If you want to completely replace the main wrapping algorithm,
you'll probably have to override _wrap_chunks().
Several instance attributes control various aspects of wrapping:
width (default: 70)
the maximum width of wrapped lines (unless break_long_words
is false)
initial_indent (default: "")
string that will be prepended to the first line of wrapped
output. Counts towards the line's width.
subsequent_indent (default: "")
string that will be prepended to all lines save the first
of wrapped output; also counts towards each line's width.
expand_tabs (default: true)
Expand tabs in input text to spaces before further processing.
Each tab will become 1 .. 8 spaces, depending on its position in
its line. If false, each tab is treated as a single character.
replace_whitespace (default: true)
Replace all whitespace characters in the input text by spaces
after tab expansion. Note that if expand_tabs is false and
replace_whitespace is true, every tab will be converted to a
single space!
fix_sentence_endings (default: false)
Ensure that sentence-ending punctuation is always followed
by two spaces. Off by default because the algorithm is
(unavoidably) imperfect.
break_long_words (default: true)
Break words longer than 'width'. If false, those words will not
be broken, and some lines might be longer than 'width'.
break_on_hyphens (default: true)
Allow breaking hyphenated words. If true, wrapping will occur
preferably on whitespaces and right after hyphens part of
compound words.
drop_whitespace (default: true)
Drop leading and trailing whitespace from lines.
"""
whitespace_trans = string.maketrans(_whitespace, ' ' * len(_whitespace))
unicode_whitespace_trans = {}
uspace = ord(u' ')
for x in map(ord, _whitespace):
unicode_whitespace_trans[x] = uspace
# This funky little regex is just the trick for splitting
# text up into word-wrappable chunks. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
# (after stripping out empty strings).
wordsep_re = re.compile(
r'(\s+|' # any whitespace
r'[^\s\w]*\w+[^0-9\W]-(?=\w+[^0-9\W])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
# This less funky little regex just split on recognized spaces. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/
wordsep_simple_re = re.compile(r'(\s+)')
# XXX this is not locale- or charset-aware -- string.lowercase
# is US-ASCII only (and therefore English-only)
sentence_end_re = re.compile(r'[%s]' # lowercase letter
r'[\.\!\?]' # sentence-ending punct.
r'[\"\']?' # optional end-of-quote
r'\Z' # end of chunk
% string.lowercase)
def __init__(self,
width=70,
initial_indent="",
subsequent_indent="",
expand_tabs=True,
replace_whitespace=True,
fix_sentence_endings=False,
break_long_words=True,
drop_whitespace=True,
break_on_hyphens=True):
self.width = width
self.initial_indent = initial_indent
self.subsequent_indent = subsequent_indent
self.expand_tabs = expand_tabs
self.replace_whitespace = replace_whitespace
self.fix_sentence_endings = fix_sentence_endings
self.break_long_words = break_long_words
self.drop_whitespace = drop_whitespace
self.break_on_hyphens = break_on_hyphens
# recompile the regexes for Unicode mode -- done in this clumsy way for
# backwards compatibility because it's rather common to monkey-patch
# the TextWrapper class' wordsep_re attribute.
self.wordsep_re_uni = re.compile(self.wordsep_re.pattern, re.U)
self.wordsep_simple_re_uni = re.compile(
self.wordsep_simple_re.pattern, re.U)
# -- Private methods -----------------------------------------------
# (possibly useful for subclasses to override)
def _munge_whitespace(self, text):
"""_munge_whitespace(text : string) -> string
Munge whitespace in text: expand tabs and convert all other
whitespace characters to spaces. Eg. " foo\tbar\n\nbaz"
becomes " foo bar baz".
"""
if self.expand_tabs:
text = text.expandtabs()
if self.replace_whitespace:
if isinstance(text, str):
text = text.translate(self.whitespace_trans)
elif isinstance(text, unicode):
text = text.translate(self.unicode_whitespace_trans)
return text
def _split(self, text):
"""_split(text : string) -> [string]
Split the text to wrap into indivisible chunks. Chunks are
not quite the same as words; see _wrap_chunks() for full
details. As an example, the text
Look, goof-ball -- use the -b option!
breaks into the following chunks:
'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', 'option!'
if break_on_hyphens is True, or in:
'Look,', ' ', 'goof-ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', option!'
otherwise.
"""
if isinstance(text, unicode):
if self.break_on_hyphens:
pat = self.wordsep_re_uni
else:
pat = self.wordsep_simple_re_uni
else:
if self.break_on_hyphens:
pat = self.wordsep_re
else:
pat = self.wordsep_simple_re
chunks = pat.split(text)
chunks = filter(None, chunks) # remove empty chunks
return chunks
def _fix_sentence_endings(self, chunks):
"""_fix_sentence_endings(chunks : [string])
Correct for sentence endings buried in 'chunks'. Eg. when the
original text contains "... foo.\nBar ...", munge_whitespace()
and split() will convert that to [..., "foo.", " ", "Bar", ...]
which has one too few spaces; this method simply changes the one
space to two.
"""
i = 0
patsearch = self.sentence_end_re.search
while i < len(chunks)-1:
if chunks[i+1] == " " and patsearch(chunks[i]):
chunks[i+1] = " "
i += 2
else:
i += 1
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
"""
# Figure out when indent is larger than the specified width, and make
# sure at least one character is stripped off on every pass
if width < 1:
space_left = 1
else:
space_left = width - cur_len
# If we're allowed to break long words, then do so: put as much
# of the next chunk onto the current line as will fit.
if self.break_long_words:
cur_line.append(reversed_chunks[-1][:space_left])
reversed_chunks[-1] = reversed_chunks[-1][space_left:]
# Otherwise, we have to preserve the long word intact. Only add
# it to the current line if there's nothing already there --
# that minimizes how much we violate the width constraint.
elif not cur_line:
cur_line.append(reversed_chunks.pop())
# If we're not allowed to break long words, and there's already
# text on the current line, do nothing. Next time through the
# main loop of _wrap_chunks(), we'll wind up here again, but
# cur_len will be zero, so the next line will be entirely
# devoted to the long word that we can't handle right now.
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo 'break_long_words'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved.
"""
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
# Arrange in reverse order so items can be efficiently popped
# from a stack of chucks.
chunks.reverse()
while chunks:
# Start the list of chunks that will make up the current line.
# cur_len is just the length of all the chunks in cur_line.
cur_line = []
cur_len = 0
# Figure out which static string will prefix this line.
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
# Maximum width for this line.
width = self.width - len(indent)
# First chunk on line is whitespace -- drop it, unless this
# is the very beginning of the text (ie. no lines started yet).
if self.drop_whitespace and chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
l = len(chunks[-1])
# Can at least squeeze this chunk onto the current line.
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
# Nope, this line is full.
else:
break
# The current line is full, and the next chunk is too big to
# fit on *any* line (not just this one).
if chunks and len(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
# If the last chunk on this line is all whitespace, drop it.
if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':
del cur_line[-1]
# Convert current line back to a string and store it in list
# of all lines (return value).
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines
# -- Public interface ----------------------------------------------
def wrap(self, text):
"""wrap(text : string) -> [string]
Reformat the single paragraph in 'text' so it fits in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.
"""
text = self._munge_whitespace(text)
chunks = self._split(text)
if self.fix_sentence_endings:
self._fix_sentence_endings(chunks)
return self._wrap_chunks(chunks)
def fill(self, text):
"""fill(text : string) -> string
Reformat the single paragraph in 'text' to fit in lines of no
more than 'self.width' columns, and return a new string
containing the entire wrapped paragraph.
"""
return "\n".join(self.wrap(text))
# -- Convenience interface ---------------------------------------------
def wrap(text, width=70, **kwargs):
"""Wrap a single paragraph of text, returning a list of wrapped lines.
Reformat the single paragraph in 'text' so it fits in lines of no
more than 'width' columns, and return a list of wrapped lines. By
default, tabs in 'text' are expanded with string.expandtabs(), and
all other whitespace characters (including newline) are converted to
space. See TextWrapper class for available keyword args to customize
wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
def fill(text, width=70, **kwargs):
"""Fill a single paragraph of text, returning a new string.
Reformat the single paragraph in 'text' to fit in lines of no more
than 'width' columns, and return a new string containing the entire
wrapped paragraph. As with wrap(), tabs are expanded and other
whitespace characters converted to space. See TextWrapper class for
available keyword args to customize wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.fill(text)
# -- Loosely related functionality -------------------------------------
_whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE)
_leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE)
def dedent(text):
"""Remove any common leading whitespace from every line in `text`.
This can be used to make triple-quoted strings line up with the left
edge of the display, while still presenting them in the source code
in indented form.
Note that tabs and spaces are both treated as whitespace, but they
are not equal: the lines " hello" and "\thello" are
considered to have no common leading whitespace. (This behaviour is
new in Python 2.5; older versions of this module incorrectly
expanded tabs before searching for common leading whitespace.)
"""
# Look for the longest leading string of spaces and tabs common to
# all lines.
margin = None
text = _whitespace_only_re.sub('', text)
indents = _leading_whitespace_re.findall(text)
for indent in indents:
if margin is None:
margin = indent
# Current line more deeply indented than previous winner:
# no change (previous winner is still on top).
elif indent.startswith(margin):
pass
# Current line consistent with and no deeper than previous winner:
# it's the new winner.
elif margin.startswith(indent):
margin = indent
# Current line and previous winner have no common whitespace:
# there is no margin.
else:
margin = ""
break
# sanity check (testing/debugging only)
if 0 and margin:
for line in text.split("\n"):
assert not line or line.startswith(margin), \
"line = %r, margin = %r" % (line, margin)
if margin:
text = re.sub(r'(?m)^' + margin, '', text)
return text
if __name__ == "__main__":
#print dedent("\tfoo\n\tbar")
#print dedent(" \thello there\n \t how are you?")
print dedent("Hello there.\n This is indented.")
| gpl-3.0 |
alexpotter1/Neutron_msm8974_hammerhead | Documentation/target/tcm_mod_builder.py | 4981 | 41422 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
hep-gc/cloudscheduler | unit_tests/test_group_list.py | 1 | 1322 | from unit_test_common import execute_csv2_request, initialize_csv2_request, ut_id, sanity_requests
from sys import argv
# lno: GV - error code identifier.
def main(gvar):
if not gvar:
gvar = {}
if len(argv) > 1:
initialize_csv2_request(gvar, selections=argv[1])
else:
initialize_csv2_request(gvar)
# 01 - 05
sanity_requests(gvar, '/group/list/', ut_id(gvar, 'gtg4'), ut_id(gvar, 'gtu5'), ut_id(gvar, 'gtg7'), ut_id(gvar, 'gtu2'))
# 06 Attempt as an unprivileged user.
execute_csv2_request(
gvar, 2, None, 'HTTP response code 403, forbidden.',
'/group/list/', group=ut_id(gvar, 'gtg4'),
server_user=ut_id(gvar, 'gtu3')
)
# 07
execute_csv2_request(
gvar, 1, 'GV', 'request contained a bad parameter "invalid-unit-test".',
'/group/list/', group=ut_id(gvar, 'gtg4'), form_data={'invalid-unit-test': 'invalid-unit-test'},
server_user=ut_id(gvar, 'gtu5')
)
# 08
execute_csv2_request(
gvar, 0, None, None,
'/group/list/', group=ut_id(gvar, 'gtg4'), expected_list='group_list',
list_filter={'group_name': ut_id(gvar, 'gtg4')}, values={'htcondor_fqdn': gvar['fqdn']},
server_user=ut_id(gvar, 'gtu5')
)
if __name__ == "__main__":
main(None)
| apache-2.0 |
shahbazn/neutron | neutron/extensions/l3.py | 25 | 10193 | # Copyright 2012 VMware, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_config import cfg
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import resource_helper
from neutron.common import exceptions as nexception
from neutron.plugins.common import constants
# L3 Exceptions
class RouterNotFound(nexception.NotFound):
message = _("Router %(router_id)s could not be found")
class RouterInUse(nexception.InUse):
message = _("Router %(router_id)s %(reason)s")
def __init__(self, **kwargs):
if 'reason' not in kwargs:
kwargs['reason'] = "still has ports"
super(RouterInUse, self).__init__(**kwargs)
class RouterInterfaceNotFound(nexception.NotFound):
message = _("Router %(router_id)s does not have "
"an interface with id %(port_id)s")
class RouterInterfaceNotFoundForSubnet(nexception.NotFound):
message = _("Router %(router_id)s has no interface "
"on subnet %(subnet_id)s")
class RouterInterfaceInUseByFloatingIP(nexception.InUse):
message = _("Router interface for subnet %(subnet_id)s on router "
"%(router_id)s cannot be deleted, as it is required "
"by one or more floating IPs.")
class FloatingIPNotFound(nexception.NotFound):
message = _("Floating IP %(floatingip_id)s could not be found")
class ExternalGatewayForFloatingIPNotFound(nexception.NotFound):
message = _("External network %(external_network_id)s is not reachable "
"from subnet %(subnet_id)s. Therefore, cannot associate "
"Port %(port_id)s with a Floating IP.")
class FloatingIPPortAlreadyAssociated(nexception.InUse):
message = _("Cannot associate floating IP %(floating_ip_address)s "
"(%(fip_id)s) with port %(port_id)s "
"using fixed IP %(fixed_ip)s, as that fixed IP already "
"has a floating IP on external network %(net_id)s.")
class RouterExternalGatewayInUseByFloatingIp(nexception.InUse):
message = _("Gateway cannot be updated for router %(router_id)s, since a "
"gateway to external network %(net_id)s is required by one or "
"more floating IPs.")
ROUTERS = 'routers'
EXTERNAL_GW_INFO = 'external_gateway_info'
RESOURCE_ATTRIBUTE_MAP = {
ROUTERS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': attr.NAME_MAX_LEN},
'is_visible': True, 'default': ''},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': attr.convert_to_boolean,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string': attr.TENANT_ID_MAX_LEN},
'is_visible': True},
EXTERNAL_GW_INFO: {'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': None,
'enforce_policy': True,
'validate': {
'type:dict_or_nodata': {
'network_id': {'type:uuid': None,
'required': True},
'external_fixed_ips': {
'convert_list_to':
attr.convert_kvp_list_to_dict,
'type:fixed_ips': None,
'default': None,
'required': False,
}
}
}}
},
'floatingips': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'floating_ip_address': {'allow_post': True, 'allow_put': False,
'validate': {'type:ip_address_or_none': None},
'is_visible': True, 'default': None,
'enforce_policy': True},
'subnet_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid_or_none': None},
'is_visible': False, # Use False for input only attr
'default': None},
'floating_network_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'router_id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
'port_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None,
'required_by_policy': True},
'fixed_ip_address': {'allow_post': True, 'allow_put': True,
'validate': {'type:ip_address_or_none': None},
'is_visible': True, 'default': None},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string': attr.TENANT_ID_MAX_LEN},
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
},
}
l3_quota_opts = [
cfg.IntOpt('quota_router',
default=10,
help=_('Number of routers allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_floatingip',
default=50,
help=_('Number of floating IPs allowed per tenant. '
'A negative value means unlimited.')),
]
cfg.CONF.register_opts(l3_quota_opts, 'QUOTAS')
class L3(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Neutron L3 Router"
@classmethod
def get_alias(cls):
return "router"
@classmethod
def get_description(cls):
return ("Router abstraction for basic L3 forwarding"
" between L2 Neutron networks and access to external"
" networks via a NAT gateway.")
@classmethod
def get_updated(cls):
return "2012-07-20T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
plural_mappings = resource_helper.build_plural_mappings(
{}, RESOURCE_ATTRIBUTE_MAP)
plural_mappings['external_fixed_ips'] = 'external_fixed_ip'
attr.PLURALS.update(plural_mappings)
action_map = {'router': {'add_router_interface': 'PUT',
'remove_router_interface': 'PUT'}}
return resource_helper.build_resource_info(plural_mappings,
RESOURCE_ATTRIBUTE_MAP,
constants.L3_ROUTER_NAT,
action_map=action_map,
register_quota=True)
def update_attributes_map(self, attributes):
super(L3, self).update_attributes_map(
attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
class RouterPluginBase(object):
@abc.abstractmethod
def create_router(self, context, router):
pass
@abc.abstractmethod
def update_router(self, context, id, router):
pass
@abc.abstractmethod
def get_router(self, context, id, fields=None):
pass
@abc.abstractmethod
def delete_router(self, context, id):
pass
@abc.abstractmethod
def get_routers(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
pass
@abc.abstractmethod
def add_router_interface(self, context, router_id, interface_info):
pass
@abc.abstractmethod
def remove_router_interface(self, context, router_id, interface_info):
pass
@abc.abstractmethod
def create_floatingip(self, context, floatingip):
pass
@abc.abstractmethod
def update_floatingip(self, context, id, floatingip):
pass
@abc.abstractmethod
def get_floatingip(self, context, id, fields=None):
pass
@abc.abstractmethod
def delete_floatingip(self, context, id):
pass
@abc.abstractmethod
def get_floatingips(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
pass
def get_routers_count(self, context, filters=None):
raise NotImplementedError()
def get_floatingips_count(self, context, filters=None):
raise NotImplementedError()
| apache-2.0 |
geosolutions-it/geonode | geonode/upload/tests/test_upload_preprocessing.py | 4 | 4101 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2018 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
"""unit tests for geonode.upload.upload_preprocessing module"""
from geonode.tests.base import GeoNodeBaseTestSupport
try:
import unittest.mock as mock
except ImportError:
import mock
import os.path
from geonode.upload import files
from geonode.upload import upload_preprocessing
from geonode.upload.utils import get_kml_doc
class UploadPreprocessingTestCase(GeoNodeBaseTestSupport):
MOCK_PREFIX = "geonode.upload.upload_preprocessing"
@mock.patch(MOCK_PREFIX + ".convert_kml_ground_overlay_to_geotiff", autospec=True)
def test_preprocess_files_kml_ground_overlay(self, mock_handler):
dirname = "phony"
kml_path = "fake_path.kml"
image_path = "another_fake_path.png"
data = [
files.SpatialFile(
base_file=kml_path,
file_type=files.get_type("KML Ground Overlay"),
auxillary_files=[image_path],
sld_files=[],
xml_files=[]
)
]
spatial_files = files.SpatialFiles(dirname, data)
upload_preprocessing.preprocess_files(spatial_files)
mock_handler.assert_called_with(kml_path, image_path)
def test_extract_bbox_param(self):
fake_north = "70.000"
kml_bytes = f"""
<?xml version="1.0" encoding="UTF-8"?>
<kml xmlns="http://earth.google.com/kml/2.1">
<Document>
<GroundOverlay id="groundoverlay">
<LatLonBox>
<north>{fake_north}</north>
</LatLonBox>
</GroundOverlay>
</Document>
</kml>
""".strip()
kml_doc, ns = get_kml_doc(kml_bytes)
result = upload_preprocessing._extract_bbox_param(
kml_doc, ns, "north")
self.assertEqual(result, fake_north)
@mock.patch(MOCK_PREFIX + ".subprocess.check_output", autospec=True)
@mock.patch(MOCK_PREFIX + ".get_kml_doc", autospec=True)
@mock.patch(MOCK_PREFIX + "._extract_bbox_param", autospec=True)
def test_convert_kml_ground_overlay_to_geotiff(self, mock_extract_param,
mock_get_kml_doc,
mock_subprocess):
fake_other_file_path = "the_image.png"
fake_kml_bytes = "nothing"
mock_get_kml_doc.return_value = ("not_relevant", "for_this_test")
fake_north = "1"
fake_south = "2"
fake_east = "3"
fake_west = "4"
mock_extract_param.side_effect = [fake_west, fake_north,
fake_east, fake_south]
mock_open = mock.mock_open(read_data=fake_kml_bytes)
with mock.patch(self.MOCK_PREFIX + ".open", mock_open):
upload_preprocessing.convert_kml_ground_overlay_to_geotiff(
"fake_kml_path",
fake_other_file_path
)
mock_subprocess.assert_called_with([
"gdal_translate",
"-of", "GTiff",
"-a_srs", "EPSG:4326",
"-a_ullr", fake_west, fake_north, fake_east, fake_south,
fake_other_file_path,
os.path.splitext(fake_other_file_path)[0] + ".tif"
])
| gpl-3.0 |
boris-p/ladybug | src/Ladybug_Open STAT File.py | 1 | 1988 | # Open Weather data file
#
# Ladybug: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari
#
# This file is part of Ladybug.
#
# Copyright (c) 2013-2015, Mostapha Sadeghipour Roudsari <Sadeghipour@gmail.com>
# Ladybug is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Ladybug is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ladybug; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
"""
Use this component to open a .stat file, which downloads with the .epw weather file and contains information such as the climate zone and maximum temperatures for designing heating/cooling systems.
This component opens the file from a location on your computer.
-
Provided by Ladybug 0.0.61
Args:
_open: Set Boolean to True to browse for a .stat file on your system.
Returns:
readMe!: ...
statFile: The file path of the selected .stat file.
"""
ghenv.Component.Name = "Ladybug_Open STAT File"
ghenv.Component.NickName = 'Open stat file'
ghenv.Component.Message = 'VER 0.0.61\nNOV_05_2015'
ghenv.Component.Category = "Ladybug"
ghenv.Component.SubCategory = "0 | Ladybug"
#compatibleLBVersion = VER 0.0.59\nFEB_01_2015
try: ghenv.Component.AdditionalHelpFromDocStrings = "4"
except: pass
import rhinoscriptsyntax as rs
if _open == True:
filter = "STAT file (*.stat)|*.stat|All Files (*.*)|*.*||"
statFile = rs.OpenFileName("Open .stat File", filter)
print 'Done!'
else:
print 'Please set open to True'
| gpl-3.0 |
lichuan261/wuand | XX-Net/python27/1.0/lib/noarch/sortedcontainers/sortedlistwithkey.py | 10 | 41145 | # -*- coding: utf-8 -*-
#
# Sorted list implementation.
from __future__ import print_function
from sys import hexversion
from .sortedlist import recursive_repr
from bisect import bisect_left, bisect_right, insort
from itertools import chain, repeat, starmap
from collections import MutableSequence
from operator import iadd, add
from functools import wraps
from math import log
if hexversion < 0x03000000:
from itertools import izip as zip
from itertools import imap as map
else:
from functools import reduce
def identity(value):
return value
class SortedListWithKey(MutableSequence):
"""
SortedList provides most of the same methods as a list but keeps the items
in sorted order.
"""
def __init__(self, iterable=None, key=identity, load=1000):
"""
SortedList provides most of the same methods as a list but keeps the
items in sorted order.
An optional *iterable* provides an initial series of items to populate
the SortedList.
An optional *load* specifies the load-factor of the list. The default
load factor of '1000' works well for lists from tens to tens of millions
of elements. Good practice is to use a value that is the cube root of
the list size. With billions of elements, the best load factor depends
on your usage. It's best to leave the load factor at the default until
you start benchmarking.
"""
self._len, self._maxes, self._lists, self._keys, self._index = 0, [], [], [], []
self._key, self._load, self._twice, self._half = key, load, load * 2, load >> 1
self._offset = 0
if iterable is not None:
self.update(iterable)
def clear(self):
"""Remove all the elements from the list."""
self._len = 0
del self._maxes[:]
del self._lists[:]
del self._keys[:]
del self._index[:]
def add(self, val):
"""Add the element *val* to the list."""
_maxes, _lists, _keys = self._maxes, self._lists, self._keys
key = self._key(val)
if _maxes:
pos = bisect_right(_maxes, key)
if pos == len(_maxes):
pos -= 1
_maxes[pos] = key
_keys[pos].append(key)
_lists[pos].append(val)
else:
idx = bisect_right(_keys[pos], key)
_keys[pos].insert(idx, key)
_lists[pos].insert(idx, val)
self._expand(pos)
else:
_maxes.append(key)
_keys.append([key])
_lists.append([val])
self._len += 1
def _expand(self, pos):
"""
Splits sublists that are more than double the load level.
Updates the index when the sublist length is less than double the load
level. This requires incrementing the nodes in a traversal from the leaf
node to the root. For an example traversal see self._loc.
"""
_lists, _keys, _index = self._lists, self._keys, self._index
if len(_keys[pos]) > self._twice:
_maxes, _load = self._maxes, self._load
half = _keys[pos][_load:]
half_list = _lists[pos][_load:]
del _keys[pos][_load:]
del _lists[pos][_load:]
_maxes[pos] = _keys[pos][-1]
_maxes.insert(pos + 1, half[-1])
_keys.insert(pos + 1, half)
_lists.insert(pos + 1, half_list)
del _index[:]
else:
if len(_index) > 0:
child = self._offset + pos
while child > 0:
_index[child] += 1
child = (child - 1) >> 1
_index[0] += 1
def update(self, iterable):
"""Update the list by adding all elements from *iterable*."""
_maxes, _lists, _keys = self._maxes, self._lists, self._keys
values = sorted(iterable, key=self._key)
if _maxes:
if len(values) * 4 >= self._len:
values.extend(chain.from_iterable(_lists))
values.sort(key=self._key)
self.clear()
else:
_add = self.add
for val in values:
_add(val)
return
_load, _index = self._load, self._index
_lists.extend(values[pos:(pos + _load)]
for pos in range(0, len(values), _load))
_keys.extend(list(map(self._key, _list)) for _list in _lists)
_maxes.extend(sublist[-1] for sublist in _keys)
self._len = len(values)
del _index[:]
def __contains__(self, val):
"""Return True if and only if *val* is an element in the list."""
_maxes = self._maxes
if not _maxes:
return False
key = self._key(val)
pos = bisect_left(_maxes, key)
if pos == len(_maxes):
return False
_keys = self._keys
_lists = self._lists
idx = bisect_left(_keys[pos], key)
len_keys = len(_keys)
len_sublist = len(_keys[pos])
while True:
if _keys[pos][idx] != key:
return False
if _lists[pos][idx] == val:
return True
idx += 1
if idx == len_sublist:
pos += 1
if pos == len_keys:
return False
len_sublist = len(_keys[pos])
idx = 0
def discard(self, val):
"""
Remove the first occurrence of *val*.
If *val* is not a member, does nothing.
"""
_maxes = self._maxes
if not _maxes:
return
key = self._key(val)
pos = bisect_left(_maxes, key)
if pos == len(_maxes):
return
_keys = self._keys
_lists = self._lists
idx = bisect_left(_keys[pos], key)
len_keys = len(_keys)
len_sublist = len(_keys[pos])
while True:
if _keys[pos][idx] != key:
return
if _lists[pos][idx] == val:
self._delete(pos, idx)
return
idx += 1
if idx == len_sublist:
pos += 1
if pos == len_keys:
return
len_sublist = len(_keys[pos])
idx = 0
def remove(self, val):
"""
Remove first occurrence of *val*.
Raises ValueError if *val* is not present.
"""
_maxes = self._maxes
if not _maxes:
raise ValueError('{0} not in list'.format(repr(val)))
key = self._key(val)
pos = bisect_left(_maxes, key)
if pos == len(_maxes):
raise ValueError('{0} not in list'.format(repr(val)))
_keys = self._keys
_lists = self._lists
idx = bisect_left(_keys[pos], key)
len_keys = len(_keys)
len_sublist = len(_keys[pos])
while True:
if _keys[pos][idx] != key:
raise ValueError('{0} not in list'.format(repr(val)))
if _lists[pos][idx] == val:
self._delete(pos, idx)
return
idx += 1
if idx == len_sublist:
pos += 1
if pos == len_keys:
raise ValueError('{0} not in list'.format(repr(val)))
len_sublist = len(_keys[pos])
idx = 0
def _delete(self, pos, idx):
"""
Delete the item at the given (pos, idx).
Combines lists that are less than half the load level.
Updates the index when the sublist length is more than half the load
level. This requires decrementing the nodes in a traversal from the leaf
node to the root. For an example traversal see self._loc.
"""
_maxes, _lists, _keys, _index = self._maxes, self._lists, self._keys, self._index
keys_pos = _keys[pos]
lists_pos = _lists[pos]
del keys_pos[idx]
del lists_pos[idx]
self._len -= 1
len_keys_pos = len(keys_pos)
if len_keys_pos > self._half:
_maxes[pos] = keys_pos[-1]
if len(_index) > 0:
child = self._offset + pos
while child > 0:
_index[child] -= 1
child = (child - 1) >> 1
_index[0] -= 1
elif len(_keys) > 1:
if not pos:
pos += 1
prev = pos - 1
_keys[prev].extend(_keys[pos])
_lists[prev].extend(_lists[pos])
_maxes[prev] = _keys[prev][-1]
del _keys[pos]
del _lists[pos]
del _maxes[pos]
del _index[:]
self._expand(prev)
elif len_keys_pos:
_maxes[pos] = keys_pos[-1]
else:
del _keys[pos]
del _lists[pos]
del _maxes[pos]
del _index[:]
def _loc(self, pos, idx):
"""Convert an index pair (alpha, beta) into a single index that corresponds to
the position of the value in the sorted list.
Most queries require the index be built. Details of the index are
described in self._build_index.
Indexing requires traversing the tree from a leaf node to the root. The
parent of each node is easily computable at (pos - 1) // 2.
Left-child nodes are always at odd indices and right-child nodes are
always at even indices.
When traversing up from a right-child node, increment the total by the
left-child node.
The final index is the sum from traversal and the index in the sublist.
For example, using the index from self._build_index:
_index = 14 5 9 3 2 4 5
_offset = 3
Tree:
14
5 9
3 2 4 5
Converting index pair (2, 3) into a single index involves iterating like
so:
1. Starting at the leaf node: offset + alpha = 3 + 2 = 5. We identify
the node as a left-child node. At such nodes, we simply traverse to
the parent.
2. At node 9, position 2, we recognize the node as a right-child node
and accumulate the left-child in our total. Total is now 5 and we
traverse to the parent at position 0.
3. Iteration ends at the root.
Computing the index is the sum of the total and beta: 5 + 3 = 8.
"""
if not pos:
return idx
_index = self._index
if not len(_index):
self._build_index()
total = 0
# Increment pos to point in the index to len(self._lists[pos]).
pos += self._offset
# Iterate until reaching the root of the index tree at pos = 0.
while pos:
# Right-child nodes are at odd indices. At such indices
# account the total below the left child node.
if not (pos & 1):
total += _index[pos - 1]
# Advance pos to the parent node.
pos = (pos - 1) >> 1
return total + idx
def _pos(self, idx):
"""Convert an index into a pair (alpha, beta) that can be used to access
the corresponding _lists[alpha][beta] position.
Most queries require the index be built. Details of the index are
described in self._build_index.
Indexing requires traversing the tree to a leaf node. Each node has
two children which are easily computable. Given an index, pos, the
left-child is at pos * 2 + 1 and the right-child is at pos * 2 + 2.
When the index is less than the left-child, traversal moves to the
left sub-tree. Otherwise, the index is decremented by the left-child
and traversal moves to the right sub-tree.
At a child node, the indexing pair is computed from the relative
position of the child node as compared with the offset and the remaining
index.
For example, using the index from self._build_index:
_index = 14 5 9 3 2 4 5
_offset = 3
Tree:
14
5 9
3 2 4 5
Indexing position 8 involves iterating like so:
1. Starting at the root, position 0, 8 is compared with the left-child
node (5) which it is greater than. When greater the index is
decremented and the position is updated to the right child node.
2. At node 9 with index 3, we again compare the index to the left-child
node with value 4. Because the index is the less than the left-child
node, we simply traverse to the left.
3. At node 4 with index 3, we recognize that we are at a leaf node and
stop iterating.
4. To compute the sublist index, we subtract the offset from the index
of the leaf node: 5 - 3 = 2. To compute the index in the sublist, we
simply use the index remaining from iteration. In this case, 3.
The final index pair from our example is (2, 3) which corresponds to
index 8 in the sorted list.
"""
_len, _lists = self._len, self._lists
if idx < 0:
last_len = len(_lists[-1])
if (-idx) <= last_len:
return len(_lists) - 1, last_len + idx
idx += _len
if idx < 0:
raise IndexError('list index out of range')
elif idx >= _len:
raise IndexError('list index out of range')
if idx < len(_lists[0]):
return 0, idx
_index = self._index
if not len(_index):
self._build_index()
pos = 0
len_index = len(_index)
child = (pos << 1) + 1
while child < len_index:
index_child = _index[child]
if idx < index_child:
pos = child
else:
idx -= index_child
pos = child + 1
child = (pos << 1) + 1
return (pos - self._offset, idx)
def _build_index(self):
"""Build an index for indexing the sorted list.
Indexes are represented as binary trees in a dense array notation
similar to a binary heap.
For example, given a _lists representation storing integers:
[0]: 1 2 3
[1]: 4 5
[2]: 6 7 8 9
[3]: 10 11 12 13 14
The first transformation maps the sub-lists by their length. The
first row of the index is the length of the sub-lists.
[0]: 3 2 4 5
Each row after that is the sum of consecutive pairs of the previous row:
[1]: 5 9
[2]: 14
Finally, the index is built by concatenating these lists together:
_index = 14 5 9 3 2 4 5
An offset storing the start of the first row is also stored:
_offset = 3
When built, the index can be used for efficient indexing into the list.
See the comment and notes on self._pos for details.
"""
row0 = list(map(len, self._lists))
if len(row0) == 1:
self._index[:] = row0
self._offset = 0
return
head = iter(row0)
tail = iter(head)
row1 = list(starmap(add, zip(head, tail)))
if len(row0) & 1:
row1.append(row0[-1])
if len(row1) == 1:
self._index[:] = row1 + row0
self._offset = 1
return
size = 2 ** (int(log(len(row1) - 1, 2)) + 1)
row1.extend(repeat(0, size - len(row1)))
tree = [row0, row1]
while len(tree[-1]) > 1:
head = iter(tree[-1])
tail = iter(head)
row = list(starmap(add, zip(head, tail)))
tree.append(row)
reduce(iadd, reversed(tree), self._index)
self._offset = size * 2 - 1
def _slice(self, slc):
start, stop, step = slc.start, slc.stop, slc.step
if step == 0:
raise ValueError('slice step cannot be zero')
# Set defaults for missing values.
if step is None:
step = 1
if step > 0:
if start is None:
start = 0
if stop is None:
stop = len(self)
elif stop < 0:
stop += len(self)
else:
if start is None:
start = len(self) - 1
if stop is None:
stop = -1
elif stop < 0:
stop += len(self)
if start < 0:
start += len(self)
# Fix indices that are too big or too small.
# Slice notation is surprisingly permissive
# where normal indexing would raise IndexError.
if step > 0:
if start < 0:
start = 0
elif start > len(self):
start = len(self)
if stop < 0:
stop = 0
elif stop > len(self):
stop = len(self)
else:
if start < 0:
start = -1
elif start >= len(self):
start = len(self) - 1
if stop < 0:
stop = -1
elif stop > len(self):
stop = len(self)
return start, stop, step
def __delitem__(self, idx):
"""Remove the element at *idx*. Supports slicing."""
if isinstance(idx, slice):
start, stop, step = self._slice(idx)
if ((step == 1) and (start < stop)
and ((stop - start) * 8 >= self._len)):
values = self[:start]
if stop < self._len:
values += self[stop:]
self.clear()
self.update(values)
return
indices = range(start, stop, step)
# Delete items from greatest index to least so
# that the indices remain valid throughout iteration.
if step > 0:
indices = reversed(indices)
_pos, _delete = self._pos, self._delete
for index in indices:
pos, idx = _pos(index)
_delete(pos, idx)
else:
pos, idx = self._pos(idx)
self._delete(pos, idx)
def __getitem__(self, idx):
"""Return the element at *idx*. Supports slicing."""
_lists = self._lists
if isinstance(idx, slice):
start, stop, step = self._slice(idx)
if step == 1 and start < stop:
if start == 0 and stop == self._len:
return self.as_list()
start_pos, start_idx = self._pos(start)
if stop == self._len:
stop_pos = len(_lists) - 1
stop_idx = len(_lists[stop_pos])
else:
stop_pos, stop_idx = self._pos(stop)
if start_pos == stop_pos:
return _lists[start_pos][start_idx:stop_idx]
prefix = _lists[start_pos][start_idx:]
middle = _lists[(start_pos + 1):stop_pos]
result = reduce(iadd, middle, prefix)
result += _lists[stop_pos][:stop_idx]
return result
if step == -1 and start > stop:
result = self[(stop + 1):(start + 1)]
result.reverse()
return result
# Return a list because a negative step could
# reverse the order of the items and this could
# be the desired behavior.
indices = range(start, stop, step)
return list(self[index] for index in indices)
else:
pos, idx = self._pos(idx)
return _lists[pos][idx]
def _check_order(self, idx, key, val):
_keys, _len = self._keys, self._len
pos, loc = self._pos(idx)
if idx < 0:
idx += _len
# Check that the inserted value is not less than the
# previous value.
if idx > 0:
idx_prev = loc - 1
pos_prev = pos
if idx_prev < 0:
pos_prev -= 1
idx_prev = len(_keys[pos_prev]) - 1
if _keys[pos_prev][idx_prev] > key:
msg = '{0} not in sort order at index {1}'.format(repr(val), idx)
raise ValueError(msg)
# Check that the inserted value is not greater than
# the previous value.
if idx < (_len - 1):
idx_next = loc + 1
pos_next = pos
if idx_next == len(_keys[pos_next]):
pos_next += 1
idx_next = 0
if _keys[pos_next][idx_next] < key:
msg = '{0} not in sort order at index {1}'.format(repr(val), idx)
raise ValueError(msg)
def __setitem__(self, index, value):
"""
Replace the item at position *index* with *value*.
Supports slice notation. Raises a :exc:`ValueError` if the sort order
would be violated. When used with a slice and iterable, the
:exc:`ValueError` is raised before the list is mutated if the sort order
would be violated by the operation.
"""
_maxes, _lists, _keys, _pos = self._maxes, self._lists, self._keys, self._pos
_check_order = self._check_order
if isinstance(index, slice):
start, stop, step = self._slice(index)
indices = range(start, stop, step)
if step != 1:
if not hasattr(value, '__len__'):
value = list(value)
indices = list(indices)
if len(value) != len(indices):
raise ValueError(
'attempt to assign sequence of size {0}'
' to extended slice of size {1}'
.format(len(value), len(indices)))
# Keep a log of values that are set so that we can
# roll back changes if ordering is violated.
log = []
_append = log.append
for idx, val in zip(indices, value):
pos, loc = _pos(idx)
key = self._key(val)
_append((idx, _keys[pos][loc], key, _lists[pos][loc], val))
_keys[pos][loc] = key
_lists[pos][loc] = val
if len(_keys[pos]) == (loc + 1):
_maxes[pos] = key
try:
# Validate ordering of new values.
for idx, oldkey, newkey, oldval, newval in log:
_check_order(idx, newkey, newval)
except ValueError:
# Roll back changes from log.
for idx, oldkey, newkey, oldval, newval in log:
pos, loc = _pos(idx)
_keys[pos][loc] = oldkey
_lists[pos][loc] = oldval
if len(_keys[pos]) == (loc + 1):
_maxes[pos] = oldkey
raise
else:
# Test ordering using indexing. If the value given
# doesn't support getitem, convert it to a list.
if not hasattr(value, '__getitem__'):
value = list(value)
# Check that the given values are ordered properly.
keys = list(map(self._key, value))
ordered = all(keys[pos - 1] <= keys[pos]
for pos in range(1, len(keys)))
if not ordered:
raise ValueError('given sequence not in sort order')
# Check ordering in context of sorted list.
if not start or not len(value):
# Nothing to check on the lhs.
pass
else:
pos, loc = _pos(start - 1)
if _keys[pos][loc] > keys[0]:
msg = '{0} not in sort order at index {1}'.format(repr(value[0]), start)
raise ValueError(msg)
if stop == len(self) or not len(value):
# Nothing to check on the rhs.
pass
else:
# "stop" is exclusive so we don't need
# to add one for the index.
pos, loc = _pos(stop)
if _keys[pos][loc] < keys[-1]:
msg = '{0} not in sort order at index {1}'.format(repr(value[-1]), stop)
raise ValueError(msg)
# Delete the existing values.
del self[index]
# Insert the new values.
_insert = self.insert
for idx, val in enumerate(value):
_insert(start + idx, val)
else:
pos, loc = _pos(index)
key = self._key(value)
_check_order(index, key, value)
_keys[pos][loc] = key
_lists[pos][loc] = value
if len(_lists[pos]) == (loc + 1):
_maxes[pos] = key
def __iter__(self):
"""Create an iterator over the list."""
return chain.from_iterable(self._lists)
def __reversed__(self):
"""Create an iterator to traverse the list in reverse."""
return chain.from_iterable(map(reversed, reversed(self._lists)))
def __len__(self):
"""Return the number of elements in the list."""
return self._len
def bisect_left(self, val):
"""
Similar to the *bisect* module in the standard library, this returns an
appropriate index to insert *val*. If *val* is already present, the
insertion point will be before (to the left of) any existing entries.
"""
_maxes = self._maxes
if not _maxes:
return 0
key = self._key(val)
pos = bisect_left(_maxes, key)
if pos == len(_maxes):
return self._len
idx = bisect_left(self._keys[pos], key)
return self._loc(pos, idx)
def bisect_right(self, val):
"""
Same as *bisect_left*, but if *val* is already present, the insertion
point will be after (to the right of) any existing entries.
"""
_maxes = self._maxes
if not _maxes:
return 0
key = self._key(val)
pos = bisect_right(_maxes, key)
if pos == len(_maxes):
return self._len
idx = bisect_right(self._keys[pos], key)
return self._loc(pos, idx)
bisect = bisect_right
def count(self, val):
"""Return the number of occurrences of *val* in the list."""
_maxes = self._maxes
if not _maxes:
return 0
key = self._key(val)
pos = bisect_left(_maxes, key)
if pos == len(_maxes):
return 0
_keys = self._keys
_lists = self._lists
idx = bisect_left(_keys[pos], key)
total = 0
len_keys = len(_keys)
len_sublist = len(_keys[pos])
while True:
if _keys[pos][idx] != key:
return total
if _lists[pos][idx] == val:
total += 1
idx += 1
if idx == len_sublist:
pos += 1
if pos == len_keys:
return total
len_sublist = len(_keys[pos])
idx = 0
def copy(self):
"""Return a shallow copy of the sorted list."""
return self.__class__(self, key=self._key, load=self._load)
__copy__ = copy
def append(self, val):
"""
Append the element *val* to the list. Raises a ValueError if the *val*
would violate the sort order.
"""
_maxes, _lists, _keys = self._maxes, self._lists, self._keys
key = self._key(val)
if not _maxes:
_maxes.append(key)
_keys.append([key])
_lists.append([val])
self._len = 1
return
pos = len(_keys) - 1
if key < _keys[pos][-1]:
msg = '{0} not in sort order at index {1}'.format(repr(val), self._len)
raise ValueError(msg)
_maxes[pos] = key
_keys[pos].append(key)
_lists[pos].append(val)
self._len += 1
self._expand(pos)
def extend(self, values):
"""
Extend the list by appending all elements from the *values*. Raises a
ValueError if the sort order would be violated.
"""
_maxes, _keys, _lists, _load = self._maxes, self._keys, self._lists, self._load
if not isinstance(values, list):
values = list(values)
keys = list(map(self._key, values))
if any(keys[pos - 1] > keys[pos]
for pos in range(1, len(keys))):
raise ValueError('given sequence not in sort order')
offset = 0
if _maxes:
if keys[0] < _keys[-1][-1]:
msg = '{0} not in sort order at index {1}'.format(repr(values[0]), self._len)
raise ValueError(msg)
if len(_keys[-1]) < self._half:
_lists[-1].extend(values[:_load])
_keys[-1].extend(keys[:_load])
_maxes[-1] = _keys[-1][-1]
offset = _load
len_keys = len(_keys)
for idx in range(offset, len(keys), _load):
_lists.append(values[idx:(idx + _load)])
_keys.append(keys[idx:(idx + _load)])
_maxes.append(_keys[-1][-1])
_index = self._index
if len_keys == len(_keys):
len_index = len(_index)
if len_index > 0:
len_values = len(values)
child = len_index - 1
while child:
_index[child] += len_values
child = (child - 1) >> 1
_index[0] += len_values
else:
del _index[:]
self._len += len(values)
def insert(self, idx, val):
"""
Insert the element *val* into the list at *idx*. Raises a ValueError if
the *val* at *idx* would violate the sort order.
"""
_maxes, _lists, _keys, _len = self._maxes, self._lists, self._keys, self._len
if idx < 0:
idx += _len
if idx < 0:
idx = 0
if idx > _len:
idx = _len
key = self._key(val)
if not _maxes:
# The idx must be zero by the inequalities above.
_maxes.append(key)
_lists.append([val])
_keys.append([key])
self._len = 1
return
if not idx:
if key > _keys[0][0]:
msg = '{0} not in sort order at index {1}'.format(repr(val), 0)
raise ValueError(msg)
else:
_keys[0].insert(0, key)
_lists[0].insert(0, val)
self._expand(0)
self._len += 1
return
if idx == _len:
pos = len(_keys) - 1
if _keys[pos][-1] > key:
msg = '{0} not in sort order at index {1}'.format(repr(val), _len)
raise ValueError(msg)
else:
_keys[pos].append(key)
_lists[pos].append(val)
_maxes[pos] = _keys[pos][-1]
self._expand(pos)
self._len += 1
return
pos, idx = self._pos(idx)
idx_before = idx - 1
if idx_before < 0:
pos_before = pos - 1
idx_before = len(_keys[pos_before]) - 1
else:
pos_before = pos
before = _keys[pos_before][idx_before]
if before <= key <= _keys[pos][idx]:
_lists[pos].insert(idx, val)
_keys[pos].insert(idx, key)
self._expand(pos)
self._len += 1
else:
msg = '{0} not in sort order at index {1}'.format(repr(val), idx)
raise ValueError(msg)
def pop(self, idx=-1):
"""
Remove and return item at *idx* (default last). Raises IndexError if
list is empty or index is out of range. Negative indices are supported,
as for slice indices.
"""
if (idx < 0 and -idx > self._len) or (idx >= self._len):
raise IndexError('pop index out of range')
pos, idx = self._pos(idx)
val = self._lists[pos][idx]
self._delete(pos, idx)
return val
def index(self, val, start=None, stop=None):
"""
Return the smallest *k* such that L[k] == val and i <= k < j`. Raises
ValueError if *val* is not present. *stop* defaults to the end of the
list. *start* defaults to the beginning. Negative indices are supported,
as for slice indices.
"""
_len, _maxes = self._len, self._maxes
if not _maxes:
raise ValueError('{0} is not in list'.format(repr(val)))
if start is None:
start = 0
if start < 0:
start += _len
if start < 0:
start = 0
if stop is None:
stop = _len
if stop < 0:
stop += _len
if stop > _len:
stop = _len
if stop <= start:
raise ValueError('{0} is not in list'.format(repr(val)))
stop -= 1
key = self._key(val)
pos = bisect_left(_maxes, key)
if pos == len(_maxes):
raise ValueError('{0} is not in list'.format(repr(val)))
_keys = self._keys
_lists = self._lists
idx = bisect_left(_keys[pos], key)
len_keys = len(_keys)
len_sublist = len(_keys[pos])
while True:
if _keys[pos][idx] != key:
raise ValueError('{0} is not in list'.format(repr(val)))
if _lists[pos][idx] == val:
loc = self._loc(pos, idx)
if start <= loc <= stop:
return loc
elif loc > stop:
break
idx += 1
if idx == len_sublist:
pos += 1
if pos == len_keys:
raise ValueError('{0} is not in list'.format(repr(val)))
len_sublist = len(_keys[pos])
idx = 0
raise ValueError('{0} is not in list'.format(repr(val)))
def as_list(self):
"""Very efficiently convert the SortedList to a list."""
return reduce(iadd, self._lists, [])
def __add__(self, that):
"""
Return a new sorted list containing all the elements in *self* and
*that*. Elements in *that* do not need to be properly ordered with
respect to *self*.
"""
values = self.as_list()
values.extend(that)
return self.__class__(values, key=self._key, load=self._load)
def __iadd__(self, that):
"""
Update *self* to include all values in *that*. Elements in *that* do not
need to be properly ordered with respect to *self*.
"""
self.update(that)
return self
def __mul__(self, that):
"""
Return a new sorted list containing *that* shallow copies of each item
in SortedList.
"""
values = self.as_list() * that
return self.__class__(values, key=self._key, load=self._load)
def __imul__(self, that):
"""
Increase the length of the list by appending *that* shallow copies of
each item.
"""
values = self.as_list() * that
self.clear()
self.update(values)
return self
def __eq__(self, that):
"""Compare two Sequences for equality."""
return ((self._len == len(that))
and all(lhs == rhs for lhs, rhs in zip(self, that)))
def __ne__(self, that):
"""Compare two Sequences for inequality."""
return ((self._len != len(that))
or any(lhs != rhs for lhs, rhs in zip(self, that)))
def __lt__(self, that):
"""Compare two Sequences for less than."""
return ((self._len <= len(that))
and all(lhs < rhs for lhs, rhs in zip(self, that)))
def __le__(self, that):
"""Compare two Sequences for less than equal."""
return ((self._len <= len(that))
and all(lhs <= rhs for lhs, rhs in zip(self, that)))
def __gt__(self, that):
"""Compare two Sequences for greater than."""
return ((self._len >= len(that))
and all(lhs > rhs for lhs, rhs in zip(self, that)))
def __ge__(self, that):
"""Compare two Sequences for greater than equal."""
return ((self._len >= len(that))
and all(lhs >= rhs for lhs, rhs in zip(self, that)))
@recursive_repr
def __repr__(self):
"""Return string representation of SortedListWithKey."""
temp = '{0}({1}, key={2}, load={3})'
return temp.format(
self.__class__.__name__,
repr(list(self)),
repr(self._key),
repr(self._load)
)
def _check(self):
try:
# Check load parameters.
assert self._load >= 4
assert self._half == (self._load >> 1)
assert self._twice == (self._load * 2)
# Check empty sorted list case.
if self._maxes == []:
assert self._keys == []
assert self._lists == []
return
assert len(self._maxes) > 0 and len(self._keys) > 0 and len(self._lists) > 0
# Check all sublists are sorted.
assert all(sublist[pos - 1] <= sublist[pos]
for sublist in self._keys
for pos in range(1, len(sublist)))
# Check beginning/end of sublists are sorted.
for pos in range(1, len(self._keys)):
assert self._keys[pos - 1][-1] <= self._keys[pos][0]
# Check length of _maxes and _lists match.
assert len(self._maxes) == len(self._lists) == len(self._keys)
# Check _keys matches _key mapped to _lists.
assert all(len(val_list) == len(key_list)
for val_list, key_list in zip(self._lists, self._keys))
assert all(self._key(val) == key for val, key in
zip((_val for _val_list in self._lists for _val in _val_list),
(_key for _key_list in self._keys for _key in _key_list)))
# Check _maxes is a map of _keys.
assert all(self._maxes[pos] == self._keys[pos][-1]
for pos in range(len(self._maxes)))
# Check load level is less than _twice.
assert all(len(sublist) <= self._twice for sublist in self._lists)
# Check load level is greater than _half for all
# but the last sublist.
assert all(len(self._lists[pos]) >= self._half
for pos in range(0, len(self._lists) - 1))
# Check length.
assert self._len == sum(len(sublist) for sublist in self._lists)
# Check index.
if len(self._index):
assert len(self._index) == self._offset + len(self._lists)
assert self._len == self._index[0]
def test_offset_pos(pos):
from_index = self._index[self._offset + pos]
return from_index == len(self._lists[pos])
assert all(test_offset_pos(pos)
for pos in range(len(self._lists)))
for pos in range(self._offset):
child = (pos << 1) + 1
if self._index[pos] == 0:
assert child >= len(self._index)
elif child + 1 == len(self._index):
assert self._index[pos] == self._index[child]
else:
child_sum = self._index[child] + self._index[child + 1]
assert self._index[pos] == child_sum
except:
import sys
import traceback
traceback.print_exc(file=sys.stdout)
print('len', self._len)
print('load', self._load, self._half, self._twice)
print('offset', self._offset)
print('len_index', len(self._index))
print('index', self._index)
print('len_maxes', len(self._maxes))
print('maxes', self._maxes)
print('len_keys', len(self._keys))
print('keys', self._keys)
print('len_lists', len(self._lists))
print('lists', self._lists)
raise
| gpl-2.0 |
Maspear/odoo | addons/email_template/__openerp__.py | 260 | 3068 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009 Sharoon Thomas
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
{
'name' : 'Email Templates',
'version' : '1.1',
'author' : 'OpenERP SA',
'website' : 'https://www.odoo.com/page/mailing',
'category' : 'Marketing',
'depends' : ['mail'],
'description': """
Email Templating (simplified version of the original Power Email by Openlabs).
==============================================================================
Lets you design complete email templates related to any OpenERP document (Sale
Orders, Invoices and so on), including sender, recipient, subject, body (HTML and
Text). You may also automatically attach files to your templates, or print and
attach a report.
For advanced use, the templates may include dynamic attributes of the document
they are related to. For example, you may use the name of a Partner's country
when writing to them, also providing a safe default in case the attribute is
not defined. Each template contains a built-in assistant to help with the
inclusion of these dynamic values.
If you enable the option, a composition assistant will also appear in the sidebar
of the OpenERP documents to which the template applies (e.g. Invoices).
This serves as a quick way to send a new email based on the template, after
reviewing and adapting the contents, if needed.
This composition assistant will also turn into a mass mailing system when called
for multiple documents at once.
These email templates are also at the heart of the marketing campaign system
(see the ``marketing_campaign`` application), if you need to automate larger
campaigns on any OpenERP document.
**Technical note:** only the templating system of the original Power Email by Openlabs was kept.
""",
'data': [
'wizard/email_template_preview_view.xml',
'email_template_view.xml',
'res_partner_view.xml',
'ir_actions_view.xml',
'wizard/mail_compose_message_view.xml',
'security/ir.model.access.csv'
],
'demo': [],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
m039/Void | third-party/void-boost/tools/build/src/tools/symlink.py | 26 | 3959 | # Status: ported.
# Base revision: 64488.
# Copyright 2003 Dave Abrahams
# Copyright 2002, 2003 Rene Rivera
# Copyright 2002, 2003, 2004, 2005 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Defines the "symlink" special target. 'symlink' targets make symbolic links
# to the sources.
import b2.build.feature as feature
import b2.build.targets as targets
import b2.build.property_set as property_set
import b2.build.virtual_target as virtual_target
import b2.build.targets
from b2.manager import get_manager
import bjam
import os
feature.feature("symlink-location", ["project-relative", "build-relative"], ["incidental"])
class SymlinkTarget(targets.BasicTarget):
_count = 0
def __init__(self, project, targets, sources):
# Generate a fake name for now. Need unnamed targets eventually.
fake_name = "symlink#%s" % SymlinkTarget._count
SymlinkTarget._count = SymlinkTarget._count + 1
b2.build.targets.BasicTarget.__init__(self, fake_name, project, sources)
# Remember the targets to map the sources onto. Pad or truncate
# to fit the sources given.
assert len(targets) <= len(sources)
self.targets = targets[:] + sources[len(targets):]
# The virtual targets corresponding to the given targets.
self.virtual_targets = []
def construct(self, name, source_targets, ps):
i = 0
for t in source_targets:
s = self.targets[i]
a = virtual_target.Action(self.manager(), [t], "symlink.ln", ps)
vt = virtual_target.FileTarget(os.path.basename(s), t.type(), self.project(), a)
# Place the symlink in the directory relative to the project
# location, instead of placing it in the build directory.
if not ps.get('symlink-location') == "project-relative":
vt.set_path(os.path.join(self.project().get('location'), os.path.dirname(s)))
vt = get_manager().virtual_targets().register(vt)
self.virtual_targets.append(vt)
i = i + 1
return (property_set.empty(), self.virtual_targets)
# Creates a symbolic link from a set of targets to a set of sources.
# The targets and sources map one to one. The symlinks generated are
# limited to be the ones given as the sources. That is, the targets
# are either padded or trimmed to equate to the sources. The padding
# is done with the name of the corresponding source. For example::
#
# symlink : one two ;
#
# Is equal to::
#
# symlink one two : one two ;
#
# Names for symlink are relative to the project location. They cannot
# include ".." path components.
def symlink(targets, sources):
from b2.manager import get_manager
t = get_manager().targets()
p = get_manager().projects().current()
return t.main_target_alternative(
SymlinkTarget(p, targets,
# Note: inline targets are not supported for symlink, intentionally,
# since it's used to linking existing non-local targets.
sources))
def setup_ln(targets, sources, ps):
source_path = bjam.call("get-target-variable", sources[0], "LOCATE")[0]
target_path = bjam.call("get-target-variable", targets[0], "LOCATE")[0]
rel = os.path.relpath(source_path, target_path)
if rel == ".":
bjam.call("set-target-variable", targets, "PATH_TO_SOURCE", "")
else:
bjam.call("set-target-variable", targets, "PATH_TO_SOURCE", rel)
if os.name == 'nt':
ln_action = """echo "NT symlinks not supported yet, making copy"
del /f /q "$(<)" 2>nul >nul
copy "$(>)" "$(<)" $(NULL_OUT)"""
else:
ln_action = "ln -f -s '$(>:D=:R=$(PATH_TO_SOURCE))' '$(<)'"
get_manager().engine().register_action("symlink.ln", ln_action, function=setup_ln)
get_manager().projects().add_rule("symlink", symlink)
| mit |
valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/numpy/core/_methods.py | 103 | 4308 | """
Array methods which are called by both the C-code for the method
and the Python code for the NumPy-namespace function
"""
from __future__ import division, absolute_import, print_function
import warnings
from numpy.core import multiarray as mu
from numpy.core import umath as um
from numpy.core.numeric import asanyarray
from numpy.core import numerictypes as nt
# save those O(100) nanoseconds!
umr_maximum = um.maximum.reduce
umr_minimum = um.minimum.reduce
umr_sum = um.add.reduce
umr_prod = um.multiply.reduce
umr_any = um.logical_or.reduce
umr_all = um.logical_and.reduce
# avoid keyword arguments to speed up parsing, saves about 15%-20% for very
# small reductions
def _amax(a, axis=None, out=None, keepdims=False):
return umr_maximum(a, axis, None, out, keepdims)
def _amin(a, axis=None, out=None, keepdims=False):
return umr_minimum(a, axis, None, out, keepdims)
def _sum(a, axis=None, dtype=None, out=None, keepdims=False):
return umr_sum(a, axis, dtype, out, keepdims)
def _prod(a, axis=None, dtype=None, out=None, keepdims=False):
return umr_prod(a, axis, dtype, out, keepdims)
def _any(a, axis=None, dtype=None, out=None, keepdims=False):
return umr_any(a, axis, dtype, out, keepdims)
def _all(a, axis=None, dtype=None, out=None, keepdims=False):
return umr_all(a, axis, dtype, out, keepdims)
def _count_reduce_items(arr, axis):
if axis is None:
axis = tuple(range(arr.ndim))
if not isinstance(axis, tuple):
axis = (axis,)
items = 1
for ax in axis:
items *= arr.shape[ax]
return items
def _mean(a, axis=None, dtype=None, out=None, keepdims=False):
arr = asanyarray(a)
rcount = _count_reduce_items(arr, axis)
# Make this warning show up first
if rcount == 0:
warnings.warn("Mean of empty slice.", RuntimeWarning)
# Cast bool, unsigned int, and int to float64 by default
if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool_)):
dtype = mu.dtype('f8')
ret = umr_sum(arr, axis, dtype, out, keepdims)
if isinstance(ret, mu.ndarray):
ret = um.true_divide(
ret, rcount, out=ret, casting='unsafe', subok=False)
elif hasattr(ret, 'dtype'):
ret = ret.dtype.type(ret / rcount)
else:
ret = ret / rcount
return ret
def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
arr = asanyarray(a)
rcount = _count_reduce_items(arr, axis)
# Make this warning show up on top.
if ddof >= rcount:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
# Cast bool, unsigned int, and int to float64 by default
if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool_)):
dtype = mu.dtype('f8')
# Compute the mean.
# Note that if dtype is not of inexact type then arraymean will
# not be either.
arrmean = umr_sum(arr, axis, dtype, keepdims=True)
if isinstance(arrmean, mu.ndarray):
arrmean = um.true_divide(
arrmean, rcount, out=arrmean, casting='unsafe', subok=False)
else:
arrmean = arrmean.dtype.type(arrmean / rcount)
# Compute sum of squared deviations from mean
# Note that x may not be inexact and that we need it to be an array,
# not a scalar.
x = asanyarray(arr - arrmean)
if issubclass(arr.dtype.type, nt.complexfloating):
x = um.multiply(x, um.conjugate(x), out=x).real
else:
x = um.multiply(x, x, out=x)
ret = umr_sum(x, axis, dtype, out, keepdims)
# Compute degrees of freedom and make sure it is not negative.
rcount = max([rcount - ddof, 0])
# divide by degrees of freedom
if isinstance(ret, mu.ndarray):
ret = um.true_divide(
ret, rcount, out=ret, casting='unsafe', subok=False)
elif hasattr(ret, 'dtype'):
ret = ret.dtype.type(ret / rcount)
else:
ret = ret / rcount
return ret
def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims)
if isinstance(ret, mu.ndarray):
ret = um.sqrt(ret, out=ret)
elif hasattr(ret, 'dtype'):
ret = ret.dtype.type(um.sqrt(ret))
else:
ret = um.sqrt(ret)
return ret
| gpl-2.0 |
TheTypoMaster/Fujitsu-Siemens-ESPRIMO-Mobile-V5535 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 1891 | 3300 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
self.callchain = common_callchain
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
gsehub/edx-platform | lms/djangoapps/verify_student/tests/fake_software_secure.py | 13 | 1684 | """
Fake Software Secure page for use in acceptance tests.
"""
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.views.generic.base import View
from edxmako.shortcuts import render_to_response
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification
class SoftwareSecureFakeView(View):
"""
Fake SoftwareSecure view for testing different photo verification statuses
and email functionality.
"""
@method_decorator(login_required)
def get(self, request):
"""
Render a fake Software Secure page that will pick the most recent
attempt for a given user and pass it to the html page.
"""
context_dict = self.response_post_params(request.user)
return render_to_response("verify_student/test/fake_softwaresecure_response.html", context_dict)
@classmethod
def response_post_params(cls, user):
"""
Calculate the POST params we want to send back to the client.
"""
access_key = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_ACCESS_KEY"]
context = {
'receipt_id': None,
'authorization_code': 'SIS {}:0000'.format(access_key),
'results_callback': reverse('verify_student_results_callback')
}
try:
most_recent = SoftwareSecurePhotoVerification.objects.filter(user=user).order_by("-updated_at")[0]
context["receipt_id"] = most_recent.receipt_id
except: # pylint: disable=bare-except
pass
return context
| agpl-3.0 |
HardlyHaki/crits | crits/samples/handlers.py | 2 | 57539 | import copy
import json
import logging
import os
import pprint
import subprocess
import tempfile, shutil
import time
from bson.objectid import ObjectId
from django.conf import settings
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from hashlib import md5
from mongoengine.base import ValidationError
from crits.backdoors.backdoor import Backdoor
from crits.campaigns.forms import CampaignForm
from crits.core import form_consts
from crits.core.class_mapper import class_from_value, class_from_id
from crits.core.crits_mongoengine import EmbeddedSource, EmbeddedCampaign
from crits.core.crits_mongoengine import json_handler, create_embedded_source
from crits.core.data_tools import convert_string_to_bool, validate_md5_checksum
from crits.core.exceptions import ZipFileError
from crits.core.forms import DownloadFileForm
from crits.core.handlers import build_jtable, jtable_ajax_list, jtable_ajax_delete
from crits.core.handlers import csv_export
from crits.core.handsontable_tools import convert_handsontable_to_rows, parse_bulk_upload
from crits.core.mongo_tools import get_file
from crits.core.source_access import SourceAccess
from crits.core.user_tools import is_admin, user_sources, get_user_organization
from crits.core.user_tools import is_user_subscribed, is_user_favorite
from crits.notifications.handlers import remove_user_from_notification
from crits.objects.handlers import object_array_to_dict
from crits.objects.handlers import validate_and_add_new_handler_object
from crits.samples.forms import XORSearchForm, UnrarSampleForm, UploadFileForm
from crits.samples.sample import Sample
from crits.samples.yarahit import YaraHit
from crits.services.analysis_result import AnalysisResult
from crits.services.handlers import run_triage, get_supported_services
from crits.stats.handlers import generate_yara_hits
from crits.vocabulary.relationships import RelationshipTypes
logger = logging.getLogger(__name__)
def generate_sample_csv(request):
"""
Generate a CSV file of the Sample information
:param request: The request for this CSV.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
response = csv_export(request, Sample)
return response
def get_sample_details(sample_md5, analyst, format_=None):
"""
Generate the data to render the Sample details template.
:param sample_md5: The MD5 of the Sample to get details for.
:type sample_md5: str
:param analyst: The user requesting this information.
:type analyst: str
:param format_: The format of the details page.
:type format_: str
:returns: template (str), arguments (dict)
"""
template = None
sources = user_sources(analyst)
sample = Sample.objects(md5=sample_md5,
source__name__in=sources).first()
if not sample:
return ('error.html', {'error': "File not yet available or you do not have access to view it."})
sample.sanitize_sources(username=analyst)
if format_:
exclude = [
"source",
"relationships",
"schema_version",
"campaign",
"analysis",
"bucket_list",
"ticket",
"releasability",
"unsupported_attrs",
"status",
"objects",
"modified",
"analyst",
"_id"
]
if format_ == "yaml":
data = sample.to_yaml(exclude)
return "yaml", data
if format_ == "json":
data = sample.to_json(exclude)
return "json", data
if not sample:
template = "error.html"
args = {'error': "No sample found"}
elif format_ == "text":
template = "samples_detail_text.html"
args = {'sample': sample}
else:
#create forms
xor_search_form = XORSearchForm()
campaign_form = CampaignForm()
unrar_sample_form = UnrarSampleForm()
download_form = DownloadFileForm(initial={"obj_type":'Sample',
"obj_id":sample.id,
"meta_format": "none"})
# do we have the binary?
if isinstance(sample.filedata.grid_id, ObjectId):
binary_exists = 1
else:
binary_exists = 0
sample.sanitize("%s" % analyst)
# remove pending notifications for user
remove_user_from_notification("%s" % analyst, sample.id, 'Sample')
# subscription
subscription = {
'type': 'Sample',
'id': sample.id,
'subscribed': is_user_subscribed("%s" % analyst,
'Sample',
sample.id),
}
#objects
objects = sample.sort_objects()
#relationships
relationships = sample.sort_relationships("%s" % analyst,
meta=True)
# relationship
relationship = {
'type': 'Sample',
'value': sample.id
}
#comments
comments = {'comments': sample.get_comments(),
'url_key': sample_md5}
#screenshots
screenshots = sample.get_screenshots(analyst)
# favorites
favorite = is_user_favorite("%s" % analyst, 'Sample', sample.id)
# services
service_list = get_supported_services('Sample')
# analysis results
service_results = sample.get_analysis_results()
args = {'objects': objects,
'relationships': relationships,
'comments': comments,
'relationship': relationship,
'subscription': subscription,
'sample': sample, 'sources': sources,
'campaign_form': campaign_form,
'download_form': download_form,
'xor_search_form': xor_search_form,
'unrar_sample_form': unrar_sample_form,
'binary_exists': binary_exists,
'favorite': favorite,
'screenshots': screenshots,
'service_list': service_list,
'service_results': service_results}
return template, args
def generate_sample_jtable(request, option):
"""
Generate the jtable data for rendering in the list template.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
obj_type = Sample
type_ = "sample"
mapper = obj_type._meta['jtable_opts']
if option == "jtlist":
# Sets display url
details_url = mapper['details_url']
details_url_key = mapper['details_url_key']
fields = mapper['fields']
response = jtable_ajax_list(obj_type, details_url, details_url_key,
request, includes=fields)
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
if option == "jtlist_by_org":
# Sets display url
details_url = mapper['details_url']
details_url_key = mapper['details_url_key']
get_values = request.GET.copy()
get_values['source'] = get_user_organization("%s" % request.user.username)
request.GET = get_values
fields = mapper['fields']
response = jtable_ajax_list(obj_type,details_url,details_url_key,
request, includes=fields)
return HttpResponse(json.dumps(response, default=json_handler),
content_type="application/json")
if option == "jtdelete":
response = {"Result": "ERROR"}
if jtable_ajax_delete(obj_type,request):
response = {"Result": "OK"}
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
jtopts = {
'title': "Samples",
'default_sort': mapper['default_sort'],
'listurl': reverse('crits.%ss.views.%ss_listing' %
(type_, type_), args=('jtlist',)),
'deleteurl': reverse('crits.%ss.views.%ss_listing' %
(type_, type_), args=('jtdelete',)),
'searchurl': reverse(mapper['searchurl']),
'fields': mapper['jtopts_fields'],
'hidden_fields': mapper['hidden_fields'],
'linked_fields': mapper['linked_fields'],
'details_link': mapper['details_link'],
'no_sort': mapper['no_sort']
}
jtable = build_jtable(jtopts,request)
jtable['toolbar'] = [
{
'tooltip': "'All Samples'",
'text': "'All'",
'click': "function () {$('#sample_listing').jtable('load', {'refresh': 'yes'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'New Samples'",
'text': "'New'",
'click': "function () {$('#sample_listing').jtable('load', {'refresh': 'yes', 'status': 'New'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'In Progress Samples'",
'text': "'In Progress'",
'click': "function () {$('#sample_listing').jtable('load', {'refresh': 'yes', 'status': 'In Progress'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Analyzed Samples'",
'text': "'Analyzed'",
'click': "function () {$('#sample_listing').jtable('load', {'refresh': 'yes', 'status': 'Analyzed'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Deprecated Samples'",
'text': "'Deprecated'",
'click': "function () {$('#sample_listing').jtable('load', {'refresh': 'yes', 'status': 'Deprecated'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Add Sample'",
'text': "'Add Sample'",
'click': "function () {$('#new-sample').click()}",
},
]
if option == "inline":
return render_to_response("jtable.html",
{'jtable': jtable,
'jtid': '%s_listing' % type_,
'button' : '%ss_tab' % type_},
RequestContext(request))
else:
return render_to_response("%s_listing.html" % type_,
{'jtable': jtable,
'jtid': '%s_listing' % type_},
RequestContext(request))
def generate_yarahit_jtable(request, option):
"""
Generate the jtable data for rendering in the list template.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
refresh = request.GET.get("refresh", "no")
if refresh == "yes":
generate_yara_hits()
obj_type = YaraHit
type_ = "yarahit"
if option == "jtlist":
# Sets display url
details_url = 'crits.samples.views.samples_listing'
details_url_key = "detectexact"
response = jtable_ajax_list(obj_type,
details_url,
details_url_key,
request)
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
jtopts = {
'title': "Yara Hits",
'default_sort': "result ASC",
'listurl': reverse('crits.samples.views.%ss_listing' % (type_,),
args=('jtlist',)),
'deleteurl': "",
'searchurl': reverse('crits.samples.views.%ss_listing' % (type_,)),
'fields': ["result", "engine", "version", "sample_count","_id"],
'hidden_fields': ["_id"],
'linked_fields': []
}
jtable = build_jtable(jtopts,request)
jtable['toolbar'] = [
{
'tooltip': "'Refresh Yara Hits'",
'text': "'Refresh Stats'",
'click': "function () {$.get('"+reverse('crits.samples.views.%ss_listing' % type_)+"', {'refresh': 'yes'}, function () { $('#yarahits_listing').jtable('reload');});}"
},
]
if option == "inline":
return render_to_response("jtable.html",
{'jtable': jtable,
'jtid': '%ss_listing' % type_,
'button' : '%ss_button' % type_},
RequestContext(request))
else:
return render_to_response("%ss_listing.html" % type_,
{'jtable': jtable,
'jtid': '%ss_listing' % type_},
RequestContext(request))
def get_filename(md5=None):
"""
Get the filename of a sample by MD5.
:param md5: The MD5 of the sample to get the filename of.
:type md5: str
:returns: None, str
"""
if not md5:
return None
sample = Sample.objects(md5=md5).first()
if not sample:
return None
return sample.filename
def get_md5_hash(oid=None):
"""
Get the MD5 of a sample by ObjectId.
:param oid: The ObjectId of the sample to get the MD5 of.
:type oid: str
:returns: None, str
"""
if oid is None:
return None
else:
sample = Sample.objects(id=oid).first()
if not sample:
return None
return sample.md5
def delete_sample(sample_md5, username=None):
"""
Delete a sample from CRITs.
:param sample_md5: The MD5 of the sample to delete.
:type sample_md5: str
:param username: The user deleting this sample.
:type username: str
:returns: bool
"""
if is_admin(username):
sample = Sample.objects(md5=sample_md5).first()
if sample:
sample.delete(username=username)
return True
else:
return False
else:
return False
def mail_sample(sample_md5, recips=None):
"""
Mail a sample to a list of recipients.
:param sample_md5: The MD5 of the sample to send.
:type sample_md5: str
:param recips: List of recipients.
:type recips: list
:returns: None, str
"""
if recips is not None:
sample = Sample.objects(md5=sample_md5).first()
if not sample:
return None
try:
send_mail('Details for %s' % sample_md5,
'%s' % pprint.pformat(sample.to_json()),
settings.CRITS_EMAIL,
recips,
fail_silently=False)
except Exception as e:
logger.error(e)
return str(e.args)
return None
def get_source_counts(analyst):
"""
Get the sources for a user.
:param analyst: The user to get sources for.
:type analyst: str
:returns: :class:`crits.core.crits_mongoengine.CritsQuerySet`
"""
allowed = user_sources(analyst)
sources = SourceAccess.objects(name__in=allowed)
return sources
def get_yara_hits(version=None):
"""
Get the yara hits in the database.
:param version: The yara hit version to search for.
:type version: str
:returns: :class:`crits.core.crits_mongoengine.CritsQuerySet`
"""
if version:
hits = YaraHit.objects(version=version).order_by('+result')
else:
hits = YaraHit.objects().order_by('+result')
return hits
def handle_unrar_sample(md5, user=None, password=None):
"""
Unrar a sample.
:param md5: The MD5 of the sample to unrar.
:type md5: str
:param user: The user unraring this sample.
:type user: str
:param password: Password to use to unrar the sample.
:type password: str
:returns: list
:raises: ZipFileError, Exception
"""
sample = class_from_value('Sample', md5)
if not sample:
return None
data = sample.filedata.read()
source = sample.source[0].name
campaign = sample.campaign
reference = ''
return unrar_file(md5, user, password, data, source, method="Unrar Existing Sample",
reference=reference, campaign=campaign, related_md5=md5)
def handle_unzip_file(md5, user=None, password=None):
"""
Unzip a sample.
:param md5: The MD5 of the sample to unzip.
:type md5: str
:param user: The user unzipping this sample.
:type user: str
:param password: Password to use to unzip the sample.
:type password: str
:returns: list
:raises: ZipFileError, Exception
"""
sample = class_from_value('Sample', md5)
if not sample:
return None
data = sample.filedata.read()
source = sample.source[0].name
campaign = sample.campaign
reference = ''
return unzip_file(md5, user, password, data, source, method="Unzip Existing Sample",
reference=reference, campaign=campaign, related_md5=md5, )
def unzip_file(filename, user=None, password=None, data=None, source=None,
method='Zip', reference='', campaign=None, confidence='low',
related_md5=None, related_id=None, related_type='Sample',
bucket_list=None, ticket=None, inherited_source=None,
is_return_only_md5=True, backdoor_name=None,
backdoor_version=None):
"""
Unzip a file.
:param filename: The name of the file to unzip.
:type filename: str
:param user: The user unzipping the file.
:type user: str
:param password: The password to use to unzip the file.
:type password: str
:param data: The filedata.
:type data: str
:param source: The name of the source that provided the data.
:type source: str
:param method: The source method to assign to the data.
:type method: str
:param reference: A reference to the data source.
:type reference: str
:param campaign: The campaign to attribute to the data.
:type campaign: str
:param confidence: The confidence level of the campaign attribution.
:type confidence: str ('low', 'medium', 'high')
:param related_md5: The MD5 of a related sample.
:type related_md5: str
:param related_id: The ObjectId of a related top-level object.
:type related_id: str
:param related_type: The type of the related top-level object.
:type related_type: str
:param bucket_list: The bucket(s) to assign to this data.
:type bucket_list: str
:param ticket: The ticket to assign to this data.
:type ticket: str
:param inherited_source: Source(s) to be inherited by the new Sample
:type inherited_source: list, :class:`crits.core.crits_mongoengine.EmbeddedSource`
:param backdoor_name: Name of backdoor to relate this object to.
:type is_return_only_md5: str
:param is_return_only_md5: Only return the MD5s.
:type backdoor_name: str
:param backdoor_version: Version of backdoor to relate this object to.
:type backdoor_version: str
:returns: list
:raises: ZipFileError, Exception
"""
temproot = settings.TEMP_DIR
samples = []
zipdir = ""
extractdir = ""
try:
zip_md5 = md5(data).hexdigest()
# 7z doesn't decompress archives via stdin, therefore
# we need to write it out as a file first
zipdir = tempfile.mkdtemp(dir=temproot)
zipfile = open(zipdir + "/" + filename, "wb")
zipfile.write(data)
zipfile.close()
# Build argument string to popen()
args = [settings.ZIP7_PATH]
args.append("e")
extractdir = tempfile.mkdtemp(dir=temproot)
args.append("-o" + extractdir) # Set output directory
# Apparently 7z doesn't mind being handed a password to an
# archive that isn't encrypted - but blocks for the opposite
# case, so we'll always give it something for a password argument
if password is None:
args.append("-pNone")
else:
args.append("-p" + password)
args.append("-y") # 'Yes' on all queries - avoid blocking
args.append(zipdir + "/" + filename)
proc = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# Give the process 30 seconds to complete, otherwise kill it
waitSeconds = 30
while (proc.poll() is None and waitSeconds):
time.sleep(1)
waitSeconds -= 1
if proc.returncode: # 7z spit out an error
errmsg = "Error while extracting archive\n" + proc.stdout.read()
raise ZipFileError, errmsg
elif not waitSeconds: # Process timed out
proc.terminate()
raise ZipFileError, "Unzip process failed to terminate"
else:
if related_md5 and related_md5 == zip_md5:
relationship = RelationshipTypes.COMPRESSED_INTO
else:
relationship = RelationshipTypes.RELATED_TO
for root, dirs, files in os.walk(extractdir):
for filename in files:
filepath = extractdir + "/" + filename
filehandle = open(filepath, 'rb')
new_sample = handle_file(filename, filehandle.read(),
source, method, reference,
related_md5=related_md5,
related_id=related_id,
related_type=related_type, backdoor='',
user=user, campaign=campaign,
confidence=confidence,
bucket_list=bucket_list,
ticket=ticket,
inherited_source=inherited_source,
relationship=relationship,
is_return_only_md5=is_return_only_md5,
backdoor_name=backdoor_name,
backdoor_version=backdoor_version)
if new_sample:
samples.append(new_sample)
filehandle.close()
except ZipFileError: # Pass this error up the chain
raise
except Exception, ex:
errmsg = ''
for err in ex.args:
errmsg = errmsg + " " + str(err)
raise ZipFileError, errmsg
finally:
if os.path.isdir(zipdir):
shutil.rmtree(zipdir)
if os.path.isdir(extractdir):
shutil.rmtree(extractdir)
return samples
def unrar_file(filename, user=None, password=None, data=None, source=None,
method="Generic", reference='', campaign=None, confidence='low',
related_md5=None, related_id=None, related_type='Sample',
bucket_list=None, ticket=None, inherited_source=None,
is_return_only_md5=True,
backdoor_name=None, backdoor_version=None):
"""
Unrar a file.
:param filename: The name of the file to unrar.
:type filename: str
:param user: The user unraring the file.
:type user: str
:param password: The password to use to unrar the file.
:type password: str
:param data: The filedata.
:type data: str
:param source: The name of the source that provided the data.
:type source: str
:param method: The source method to assign to the data.
:type method: str
:param reference: A reference to the data source.
:type reference: str
:param campaign: The campaign to attribute to the data.
:type campaign: str
:param confidence: The confidence level of the campaign attribution.
:type confidence: str ('low', 'medium', 'high')
:param related_md5: The MD5 of a related sample.
:type related_md5: str
:param related_id: The ObjectId of a related top-level object.
:type related_id: str
:param related_type: The type of the related top-level object.
:type related_type: str
:param bucket_list: The bucket(s) to assign to this data.
:type bucket_list: str
:param ticket: The ticket to assign to this data.
:type ticket: str
:param inherited_source: Source(s) to be inherited by the new Sample
:type inherited_source: list, :class:`crits.core.crits_mongoengine.EmbeddedSource`
:param is_return_only_md5: Only return the MD5s.
:type is_return_only_md5: bool
:returns: list
:raises: ZipFileError, Exception
"""
samples = []
try:
rar_md5 = md5(data).hexdigest()
# write the data to a file so we can read from it as a rar file
temproot = settings.TEMP_DIR
rardir = tempfile.mkdtemp(dir=temproot)
# append '.rar' to help ensure rarfile doesn't have same
# name as an extracted file.
rarname = os.path.join(rardir, filename)+'.rar'
if data is None: #unraring an existing file
data = get_file(filename)
with open(rarname, "wb") as f:
f.write(data)
# change to temp directory since unrar allows extraction
# only to the current directory first save current directory
old_dir = os.getcwd()
os.chdir(rardir)
cmd = [settings.RAR_PATH,'e'] #,'-inul'
if password:
cmd.append('-p'+password)
else:
cmd.append('-p-')
cmd.append('-y') #assume yes to all prompts
cmd.append(rarname)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
waitSeconds = 30
while (proc.poll() is None and waitSeconds):
time.sleep(1)
waitSeconds -= 1
if proc.returncode:
errmsg = "Error while unraring archive\n" + proc.stdout.read()
raise ZipFileError, errmsg
elif not waitSeconds:
proc.terminate()
raise ZipFileError, "Unrar process failed to terminate"
else:
if related_md5 and related_md5 == rar_md5:
relationship = RelationshipTypes.COMPRESSED_INTO
else:
relationship = RelationshipTypes.RELATED_TO
for root, dirs, files in os.walk(rardir):
for filename in files:
filepath = os.path.join(rardir, filename)
if filepath != rarname:
with open(filepath, 'rb') as filehandle:
new_sample = handle_file(filename,
filehandle.read(),
source, method, reference,
related_md5=related_md5,
related_id=related_id,
related_type=related_type,
backdoor='', user=user,
campaign=campaign,
confidence=confidence,
bucket_list=bucket_list,
ticket=ticket,
inherited_source=inherited_source,
relationship=relationship,
is_return_only_md5=is_return_only_md5,
backdoor_name=backdoor_name,
backdoor_version=backdoor_version)
samples.append(new_sample)
except ZipFileError:
raise
except Exception:
raise
#raise ZipFileError, str(e)
finally:
#change back to original current directory
os.chdir(old_dir)
if os.path.isdir(rardir):
shutil.rmtree(rardir)
return samples
def handle_file(filename, data, source, method='Generic', reference='', related_md5=None,
related_id=None, related_type='Sample', backdoor=None, user='',
campaign=None, confidence='low', md5_digest=None, bucket_list=None,
ticket=None, relationship=None, inherited_source=None, is_validate_only=False,
is_return_only_md5=True, cache={}, backdoor_name=None,
backdoor_version=None):
"""
Handle adding a file.
:param filename: The name of the file.
:type filename: str
:param data: The filedata.
:type data: str
:param source: The name of the source that provided the data.
:type source: list, str, :class:`crits.core.crits_mongoengine.EmbeddedSource`
:param method: The source method to assign to the data.
:type method: str
:param reference: A reference to the data source.
:type reference: str
:param related_md5: The MD5 of a related sample.
:type related_md5: str
:param related_id: The ObjectId of a related top-level object.
:type related_id: str
:param related_type: The type of the related top-level object.
:type related_type: str
:param backdoor: The backdoor to assign to this sample.
:type backdoor: str
:param user: The user uploading this sample.
:type user: str
:param campaign: The campaign to attribute to the data.
:type campaign: str
:param confidence: The confidence level of the campaign attribution.
:type confidence: str ('low', 'medium', 'high')
:param md5_digest: The MD5 of this sample.
:type md5_digest: str
:param bucket_list: The bucket(s) to assign to this data.
:type bucket_list: str
:param ticket: The ticket to assign to this data.
:type ticket: str
:param relationship: The relationship between this sample and the parent.
:type relationship: str
:param inherited_source: Source(s) to be inherited by the new Sample
:type inherited_source: list, :class:`crits.core.crits_mongoengine.EmbeddedSource`
:param is_validate_only: Only validate, do not add.
:type is_validate_only: bool
:param is_return_only_md5: Only return the MD5s.
:type is_return_only_md5: bool
:param cache: Cached data, typically for performance enhancements
during bulk operations.
:type cache: dict
:param backdoor_name: Name of the backdoor to relate the file to.
:type backdoor_name: str
:param backdoor_version: Version of the backdoor to relate the file to.
:type backdoor_version: str
:returns: str,
dict with keys:
"success" (boolean),
"message" (str),
"object" (the sample),
"""
retVal = {}
retVal['success'] = True
retVal['message'] = ""
is_sample_new = False
# get sample from database, or create it if one doesn't exist
if not md5_digest and not data:
retVal['message'] += "Either the MD5 digest or data need to be supplied"
retVal['success'] = False
elif md5_digest:
# validate md5
md5_digest = md5_digest.lower().strip()
validate_md5_result = validate_md5_checksum(md5_digest)
retVal['message'] += validate_md5_result.get('message')
retVal['success'] = validate_md5_result.get('success')
else:
md5_digest = md5(data).hexdigest()
validate_md5_result = validate_md5_checksum(md5_digest)
retVal['message'] += validate_md5_result.get('message')
retVal['success'] = validate_md5_result.get('success')
if related_id or related_md5:
if related_id:
related_obj = class_from_id(related_type, related_id)
else:
related_obj = class_from_value(related_type, related_md5)
if not related_obj:
retVal['message'] += (' Related %s not found. Sample not uploaded.'
% (related_type))
retVal['success'] = False
else:
related_obj = None
if retVal['success'] == False:
if is_return_only_md5 == True:
return None
else:
return retVal
cached_results = cache.get(form_consts.Sample.CACHED_RESULTS)
if cached_results != None:
sample = cached_results.get(md5_digest)
else:
sample = Sample.objects(md5=md5_digest).first()
if not sample:
is_sample_new = True
sample = Sample()
sample.filename = filename or md5_digest
sample.md5 = md5_digest
else:
if filename not in sample.filenames and filename != sample.filename:
sample.filenames.append(filename)
if cached_results != None:
cached_results[md5_digest] = sample
# attempt to discover binary in GridFS before assuming we don't
# have it
sample.discover_binary()
if data:
# we already have this binary so generate metadata from it
if sample.filedata.grid_id:
sample._generate_file_metadata(data)
# add the binary to gridfs and generate metadata
else:
sample.add_file_data(data)
# if we didn't get data:
else:
if sample.filedata:
# get data from db and add metadata in case it doesn't exist
data = sample.filedata.read()
sample._generate_file_metadata(data)
else:
if md5_digest:
# no data and no binary, add limited metadata
sample.md5 = md5_digest
else:
retVal['message'] += ("The MD5 digest and data, or the file "
"data itself, need to be supplied.")
retVal['success'] = False
#add copy of inherited source(s) to Sample
if isinstance(inherited_source, EmbeddedSource):
sample.add_source(copy.copy(inherited_source))
elif isinstance(inherited_source, list) and len(inherited_source) > 0:
for s in inherited_source:
if isinstance(s, EmbeddedSource):
sample.add_source(copy.copy(s))
# generate new source information and add to sample
if isinstance(source, basestring) and len(source) > 0:
s = create_embedded_source(source,
method=method,
reference=reference,
analyst=user)
# this will handle adding a new source, or an instance automatically
sample.add_source(s)
elif isinstance(source, EmbeddedSource):
sample.add_source(source, method=method, reference=reference)
elif isinstance(source, list) and len(source) > 0:
for s in source:
if isinstance(s, EmbeddedSource):
sample.add_source(s, method=method, reference=reference)
if bucket_list:
sample.add_bucket_list(bucket_list, user)
if ticket:
sample.add_ticket(ticket, user)
# if no proper source has been provided, don't add the sample
if len(sample.source) == 0:
retVal['message'] += "The sample does not have a source."
retVal['success'] = False
elif is_validate_only == False:
# assume it's a list of EmbeddedCampaign, but check if it's a string
# if it is a string then create a new EmbeddedCampaign
if campaign != None:
campaign_array = campaign
if isinstance(campaign, basestring):
campaign_array = [EmbeddedCampaign(name=campaign, confidence=confidence, analyst=user)]
for campaign_item in campaign_array:
sample.add_campaign(campaign_item)
# save sample to get an id since the rest of the processing needs it
sample.save(username=user)
sources = user_sources(user)
if backdoor_name:
# Relate this to the backdoor family if there is one.
backdoor = Backdoor.objects(name=backdoor_name,
source__name__in=sources).first()
if backdoor:
backdoor.add_relationship(sample,
RelationshipTypes.RELATED_TO,
analyst=user)
backdoor.save()
# Also relate to the specific instance backdoor.
if backdoor_version:
backdoor = Backdoor.objects(name=backdoor_name,
version=backdoor_version,
source__name__in=sources).first()
if backdoor:
backdoor.add_relationship(sample,
RelationshipTypes.RELATED_TO,
analyst=user)
backdoor.save()
# reloading clears the _changed_fields of the sample object. this prevents
# situations where we save again below and the shard key (md5) is
# still marked as changed.
sample.reload()
# run sample triage:
if len(AnalysisResult.objects(object_id=str(sample.id))) < 1 and data:
run_triage(sample, user)
# update relationship if a related top-level object is supplied
if related_obj and sample:
if related_obj.id != sample.id: #don't form relationship to itself
if not relationship:
if related_obj._meta['crits_type'] == 'Email':
relationship = RelationshipTypes.CONTAINED_WITHIN
else:
relationship = RelationshipTypes.RELATED_TO
sample.add_relationship(related_obj,
relationship,
analyst=user,
get_rels=False)
sample.save(username=user)
if is_sample_new == True:
# New sample, and successfully uploaded
if is_validate_only == False:
retVal['message'] += ('Success: Added new sample <a href="%s">%s.</a>'
% (reverse('crits.samples.views.detail',
args=[sample.md5.lower()]),
sample.md5.lower()))
# Update Cache
if cached_results != None:
cached_results[sample.md5] = sample
else:
# Duplicate sample, but uploaded anyways
if is_validate_only == False:
message = ('Success: Updated sample <a href="%s">%s.</a>'
% (reverse('crits.samples.views.detail',
args=[sample.md5.lower()]),
sample.md5.lower()))
retVal['message'] += message
retVal['status'] = form_consts.Status.DUPLICATE
retVal['warning'] = message
# Duplicate sample, but only validation
else:
if sample.id != None:
warning_message = ('Warning: Trying to add file [' +
filename + ']'
' when MD5 already exists as file [' +
sample.filename + ']'
'<a href="%s">%s.</a>'
% (reverse('crits.samples.views.detail',
args=[sample.md5.lower()]),
sample.md5.lower()))
retVal['message'] += warning_message
retVal['status'] = form_consts.Status.DUPLICATE
retVal['warning'] = warning_message
if is_return_only_md5 == True:
return md5_digest
else:
retVal['object'] = sample
return retVal
def handle_uploaded_file(f, source, method='', reference='', file_format=None,
password=None, user=None, campaign=None, confidence='low',
related_md5=None, related_id=None, related_type='Sample',
filename=None, md5=None, bucket_list=None, ticket=None,
inherited_source=None, is_validate_only=False,
is_return_only_md5=True, cache={}, backdoor_name=None,
backdoor_version=None):
"""
Handle an uploaded file.
:param f: The uploaded file.
:type f: file handle
:param source: The name of the source that provided the data.
:type source: list, str, :class:`crits.core.crits_mongoengine.EmbeddedSource`
:param method: The source method to assign to the data.
:type method: str
:param reference: A reference to the data source.
:type reference: str
:param file_format: The format the file was uploaded in.
:type file_format: str
:param password: A password necessary to access the file data.
:type password: str
:param user: The user uploading this sample.
:type user: str
:param campaign: The campaign to attribute to the data.
:type campaign: str
:param confidence: The confidence level of the campaign attribution.
:type confidence: str ('low', 'medium', 'high')
:param related_md5: The MD5 of a related sample.
:type related_md5: str
:param related_id: The ObjectId of a related top-level object.
:type related_id: str
:param related_type: The type of the related top-level object.
:type related_type: str
:param filename: The filename of the sample.
:type filename: str
:param md5: The MD5 of the sample.
:type md5: str
:param bucket_list: The bucket(s) to assign to this data.
:type bucket_list: str
:param ticket: The ticket to assign to this data.
:type ticket: str
:param inherited_source: Source(s) to be inherited by the new Sample
:type inherited_source: list, :class:`crits.core.crits_mongoengine.EmbeddedSource`
:param is_validate_only: Only validate, do not add.
:type is_validate_only: bool
:param is_return_only_md5: Only return the MD5s.
:type is_return_only_md5: bool
:param cache: Cached data, typically for performance enhancements
during bulk operations.
:type cache: dict
:param backdoor_name: Name of backdoor to relate this object to.
:type backdoor_name: str
:param backdoor_version: Version of backdoor to relate this object to.
:type backdoor_version: str
:returns: list
"""
samples = list()
if not source:
return [{'success': False, 'message': "Missing source information."}]
if method:
method = " - " + method
if f:
method = "File Upload" + method
elif md5:
method = "Metadata Upload" + method
else:
method = "Upload" + method
try:
data = f.read()
except AttributeError:
data = f
if not filename:
filename = getattr(f, 'name', None)
if not filename:
try:
filename = md5(data).hexdigest()
except:
filename = "unknown"
if file_format == "zip" and f:
return unzip_file(
filename,
user=user,
password=password,
data=data,
source=source,
method=method,
reference=reference,
campaign=campaign,
confidence=confidence,
related_md5=related_md5,
related_id=related_id,
related_type=related_type,
bucket_list=bucket_list,
ticket=ticket,
inherited_source=inherited_source,
is_return_only_md5=is_return_only_md5,
backdoor_name=backdoor_name,
backdoor_version=backdoor_version)
elif file_format == "rar" and f:
return unrar_file(
filename,
user=user,
password=password,
data=data,
source=source,
method=method,
reference=reference,
campaign=campaign,
confidence=confidence,
related_md5=related_md5,
related_id=related_id,
related_type=related_type,
bucket_list=bucket_list,
ticket=ticket,
inherited_source=inherited_source,
is_return_only_md5=is_return_only_md5,
backdoor_name=backdoor_name,
backdoor_version=backdoor_version)
else:
new_sample = handle_file(filename, data, source, method, reference,
related_md5=related_md5, related_id=related_id,
related_type=related_type, backdoor='', user=user,
campaign=campaign, confidence=confidence, md5_digest=md5,
bucket_list=bucket_list, ticket=ticket,
inherited_source=inherited_source, is_validate_only=is_validate_only,
is_return_only_md5=is_return_only_md5, cache=cache,
backdoor_name=backdoor_name,
backdoor_version=backdoor_version)
if new_sample:
samples.append(new_sample)
return samples
def add_new_sample_via_bulk(data, rowData, request, errors, is_validate_only=False, cache={}):
"""
Add a new sample from bulk upload.
:param data: The data about the sample.
:type data: dict
:param rowData: Object data in the row.
:type rowData: dict
:param request: The Django request.
:type request: :class:`django.http.HttpRequest`
:param errors: List of existing errors to append to.
:type errors: list
:param is_validate_only: Only validate, do not add.
:type is_validate_only: bool
:param cache: Cached data, typically for performance enhancements
during bulk operations.
:type cache: dict
returns: tuple of result, errors, return value
"""
username = request.user.username
result = False
retVal = {}
retVal['success'] = True
files = None
if request.FILES:
files = request.FILES
#upload_type = data.get('upload_type')
#filedata = data.get('filedata')
filename = data.get('filename')
campaign = data.get('campaign')
confidence = data.get('confidence')
md5 = data.get('md5')
fileformat = data.get('file_format')
password = data.get('password')
#is_email_results = data.get('email')
related_md5 = data.get('related_md5')
source = data.get('source')
method = data.get('method', '')
reference = data.get('reference')
bucket_list = data.get(form_consts.Common.BUCKET_LIST_VARIABLE_NAME)
ticket = data.get(form_consts.Common.TICKET_VARIABLE_NAME)
samples = handle_uploaded_file(files, source, method, reference,
file_format=fileformat,
password=password,
user=username,
campaign=campaign,
confidence=confidence,
related_md5=related_md5,
filename=filename,
md5=md5,
bucket_list=bucket_list,
ticket=ticket,
is_validate_only=is_validate_only,
is_return_only_md5=False,
cache=cache)
# This block tries to add objects to the item
if not errors or is_validate_only == True:
result = True
objectsData = rowData.get(form_consts.Common.OBJECTS_DATA)
for sample in samples:
# repack message field into top of structure
if retVal.get('message'):
if sample.get('success') == False:
retVal['success'] = False
result = False
errors.append(sample.get('message'))
else:
retVal['message'] += sample.get('message')
else:
if sample.get('success') == False:
retVal['success'] = False
result = False
errors.append(sample.get('message'))
else:
retVal['message'] = sample.get('message')
if sample.get('warning'):
retVal['warning'] = sample.get('warning')
if sample.get('status'):
retVal['status'] = sample.get('status')
# add new objects if they exist
if objectsData:
objectsData = json.loads(objectsData)
for object_row_counter, objectData in enumerate(objectsData, 1):
if sample.get('object') != None and is_validate_only == False:
objectDict = object_array_to_dict(objectData, "Sample",
sample.get('object').id)
else:
if sample.get('object'):
if sample.get('object').id:
objectDict = object_array_to_dict(objectData, "Sample",
sample.get('object').id)
else:
objectDict = object_array_to_dict(objectData, "Sample", "")
else:
objectDict = object_array_to_dict(objectData, "Sample", "")
(object_result, object_errors, object_retVal) = validate_and_add_new_handler_object(
None, objectDict, request, errors, object_row_counter,
is_validate_only=is_validate_only, cache=cache)
# if there was an error, mark the overall
# operation as failed
if object_retVal.get('success') == False:
retVal['success'] = False
result = False
if object_retVal.get('message'):
errors.append(object_retVal['message'])
else:
errors += "Failed to add Sample: " + md5
return result, errors, retVal
def parse_row_to_bound_sample_form(request, rowData, cache, upload_type="File Upload"):
"""
Parse a mass upload row into an UploadFileForm.
:param request: The Django request.
:type request: :class:`django.http.HttpRequest`
:param rowData: The data in the row.
:type rowData: dict
:param cache: Cached data, typically for performance enhancements
during bulk operations.
:type cache: dict
:param upload_type: The type of upload.
:type upload_type: str
:returns: :class:`crits.samples.forms.UploadFileForm`
"""
filedata = None
fileformat = None
password = None
filename = None
md5 = None
if not upload_type:
upload_type = rowData.get(form_consts.Sample.UPLOAD_TYPE, "")
if upload_type == form_consts.Sample.UploadType.FILE_UPLOAD:
filedata = rowData.get(form_consts.Sample.FILE_DATA, "")
fileformat = rowData.get(form_consts.Sample.FILE_FORMAT, "")
password = rowData.get(form_consts.Sample.PASSWORD, "")
elif upload_type == form_consts.Sample.UploadType.METADATA_UPLOAD:
filename = rowData.get(form_consts.Sample.FILE_NAME, "")
md5 = rowData.get(form_consts.Sample.MD5, "")
campaign = rowData.get(form_consts.Sample.CAMPAIGN, "")
confidence = rowData.get(form_consts.Sample.CAMPAIGN_CONFIDENCE, "")
is_email_results = convert_string_to_bool(rowData.get(form_consts.Sample.EMAIL_RESULTS, ""))
related_md5 = rowData.get(form_consts.Sample.RELATED_MD5, "")
source = rowData.get(form_consts.Sample.SOURCE, "")
method = rowData.get(form_consts.Sample.SOURCE_METHOD, "")
reference = rowData.get(form_consts.Sample.SOURCE_REFERENCE, "")
bucket_list = rowData.get(form_consts.Sample.BUCKET_LIST, "")
ticket = rowData.get(form_consts.Common.TICKET, "")
data = {
'upload_type': upload_type,
'filedata': filedata,
'filename': filename,
'md5': md5,
'file_format': fileformat,
'campaign': campaign,
'confidence': confidence,
'password': password,
'email': is_email_results,
'related_md5': related_md5,
'source': source,
'method': method,
'reference': reference,
'bucket_list': bucket_list,
'ticket': ticket
}
bound_md5_sample_form = cache.get('sample_form')
if bound_md5_sample_form == None:
bound_md5_sample_form = UploadFileForm(request.user, data, request.FILES)
cache['sample_form'] = bound_md5_sample_form
else:
bound_md5_sample_form.data = data
bound_md5_sample_form.full_clean()
return bound_md5_sample_form
def parse_row_to_bound_md5_sample_form(request, rowData, cache):
"""
Parse a mass upload row into an UploadFileForm.
:param request: The Django request.
:type request: :class:`django.http.HttpRequest`
:param rowData: The data in the row.
:type rowData: dict
:param cache: Cached data, typically for performance enhancements
during bulk operations.
:type cache: dict
:returns: :class:`crits.samples.forms.UploadFileForm`
"""
upload_type = form_consts.Sample.UploadType.METADATA_UPLOAD
return parse_row_to_bound_sample_form(request, rowData, cache, upload_type=upload_type)
def process_bulk_add_md5_sample(request, formdict):
"""
Performs the bulk add of MD5 samples by parsing the request data. Batches
some data into a cache object for performance by reducing large
amounts of single database queries.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param formdict: The form representing the bulk uploaded data.
:type formdict: dict
:returns: :class:`django.http.HttpResponse`
"""
md5_samples = []
cached_results = {}
cleanedRowsData = convert_handsontable_to_rows(request)
for rowData in cleanedRowsData:
if rowData != None and rowData.get(form_consts.Sample.MD5) != None:
md5_samples.append(rowData.get(form_consts.Sample.MD5).lower())
md5_results = Sample.objects(md5__in=md5_samples)
for md5_result in md5_results:
cached_results[md5_result.md5] = md5_result
cache = {form_consts.Sample.CACHED_RESULTS: cached_results, 'cleaned_rows_data': cleanedRowsData}
response = parse_bulk_upload(request, parse_row_to_bound_md5_sample_form, add_new_sample_via_bulk, formdict, cache)
return response
def update_sample_filename(id_, filename, analyst):
"""
Update a Sample filename.
:param id_: ObjectId of the Sample.
:type id_: str
:param filename: The new filename.
:type filename: str
:param analyst: The user setting the new filename.
:type analyst: str
:returns: dict with key 'success' (boolean) and 'message' (str) if failed.
"""
if not filename:
return {'success': False, 'message': "No filename to change"}
sample = Sample.objects(id=id_).first()
if not sample:
return {'success': False, 'message': "No sample to change"}
sample.filename = filename.strip()
try:
sample.save(username=analyst)
return {'success': True}
except ValidationError, e:
return {'success': False, 'message': e}
def modify_sample_filenames(id_, tags, analyst):
"""
Modify the filenames for a Sample.
:param id_: ObjectId of the Sample.
:type id_: str
:param tags: The new filenames.
:type tags: list
:param analyst: The user setting the new filenames.
:type analyst: str
:returns: dict with key 'success' (boolean) and 'message' (str) if failed.
"""
sample = Sample.objects(id=id_).first()
if sample:
sample.set_filenames(tags)
try:
sample.save(username=analyst)
return {'success': True}
except ValidationError, e:
return {'success': False, 'message': "Invalid value: %s" % e}
else:
return {'success': False}
| mit |
Venturi/oldcms | env/lib/python2.7/site-packages/django/db/models/loading.py | 81 | 1229 | import warnings
from django.apps import apps
from django.utils.deprecation import RemovedInDjango19Warning
warnings.warn(
"The utilities in django.db.models.loading are deprecated "
"in favor of the new application loading system.",
RemovedInDjango19Warning, stacklevel=2)
__all__ = ('get_apps', 'get_app', 'get_models', 'get_model', 'register_models',
'load_app', 'app_cache_ready')
# Backwards-compatibility for private APIs during the deprecation period.
UnavailableApp = LookupError
cache = apps
# These methods were always module level, so are kept that way for backwards
# compatibility.
get_apps = apps.get_apps
get_app_package = apps.get_app_package
get_app_path = apps.get_app_path
get_app_paths = apps.get_app_paths
get_app = apps.get_app
get_models = apps.get_models
get_model = apps.get_model
register_models = apps.register_models
load_app = apps.load_app
app_cache_ready = apps.app_cache_ready
# This method doesn't return anything interesting in Django 1.6. Maintain it
# just for backwards compatibility until this module is deprecated.
def get_app_errors():
try:
return apps.app_errors
except AttributeError:
apps.app_errors = {}
return apps.app_errors
| apache-2.0 |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/pandas/tests/test_compat.py | 9 | 2357 | # -*- coding: utf-8 -*-
"""
Testing that functions from compat work as expected
"""
from pandas.compat import (
range, zip, map, filter,
lrange, lzip, lmap, lfilter,
builtins
)
import unittest
import nose
import pandas.util.testing as tm
class TestBuiltinIterators(tm.TestCase):
def check_result(self, actual, expected, lengths):
for (iter_res, list_res), exp, length in zip(actual, expected, lengths):
self.assertNotIsInstance(iter_res, list)
tm.assertIsInstance(list_res, list)
iter_res = list(iter_res)
self.assertEqual(len(list_res), length)
self.assertEqual(len(iter_res), length)
self.assertEqual(iter_res, exp)
self.assertEqual(list_res, exp)
def test_range(self):
actual1 = range(10)
actual2 = lrange(10)
actual = [actual1, actual2],
expected = list(builtins.range(10)),
lengths = 10,
actual1 = range(1, 10, 2)
actual2 = lrange(1, 10, 2)
actual += [actual1, actual2],
lengths += 5,
expected += list(builtins.range(1, 10, 2)),
self.check_result(actual, expected, lengths)
def test_map(self):
func = lambda x, y, z: x + y + z
lst = [builtins.range(10), builtins.range(10), builtins.range(10)]
actual1 = map(func, *lst)
actual2 = lmap(func, *lst)
actual = [actual1, actual2],
expected = list(builtins.map(func, *lst)),
lengths = 10,
self.check_result(actual, expected, lengths)
def test_filter(self):
func = lambda x: x
lst = list(builtins.range(10))
actual1 = filter(func, lst)
actual2 = lfilter(func, lst)
actual = [actual1, actual2],
lengths = 9,
expected = list(builtins.filter(func, lst)),
self.check_result(actual, expected, lengths)
def test_zip(self):
lst = [builtins.range(10), builtins.range(10), builtins.range(10)]
actual = [zip(*lst), lzip(*lst)],
expected = list(builtins.zip(*lst)),
lengths = 10,
self.check_result(actual, expected, lengths)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
# '--with-coverage', '--cover-package=pandas.core'],
exit=False)
| apache-2.0 |
henaras/sahara | sahara/plugins/vanilla/v1_2_1/scaling.py | 9 | 3515 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import six
from sahara import context
from sahara.i18n import _
from sahara.plugins import utils
from sahara.plugins.vanilla.v1_2_1 import config_helper
from sahara.plugins.vanilla.v1_2_1 import run_scripts as run
from sahara.utils import cluster_progress_ops as cpo
from sahara.utils import poll_utils
from sahara.utils import remote
@cpo.event_wrapper(True, step=_("Decommission %s") % "TaskTrackers")
def decommission_tt(jt, inst_to_be_deleted, survived_inst):
with remote.get_remote(jt) as r:
r.write_file_to('/etc/hadoop/tt.excl',
utils.generate_fqdn_host_names(
inst_to_be_deleted))
run.refresh_nodes(remote.get_remote(jt), "mradmin")
context.sleep(3)
r.write_files_to({'/etc/hadoop/tt.incl':
utils.generate_fqdn_host_names(survived_inst),
'/etc/hadoop/tt.excl': "",
})
def is_decommissioned(r, inst_to_be_deleted):
cmd = r.execute_command("sudo su -c 'hadoop dfsadmin -report' hadoop")
datanodes_info = parse_dfs_report(cmd[1])
for inst in inst_to_be_deleted:
for dn in datanodes_info:
if (dn["Name"].startswith(inst.internal_ip)) and (
dn["Decommission Status"] != "Decommissioned"):
return False
return True
@cpo.event_wrapper(True, step=_("Decommission %s") % "DataNodes")
def decommission_dn(nn, inst_to_be_deleted, survived_inst):
with remote.get_remote(nn) as r:
r.write_file_to('/etc/hadoop/dn.excl',
utils.generate_fqdn_host_names(
inst_to_be_deleted))
run.refresh_nodes(remote.get_remote(nn), "dfsadmin")
context.sleep(3)
poll_utils.plugin_option_poll(
nn.cluster, is_decommissioned,
config_helper.DECOMMISSIONING_TIMEOUT,
_("Decommission %s") % "DataNodes", 3,
{'r': r, 'inst_to_be_deleted': inst_to_be_deleted})
r.write_files_to({'/etc/hadoop/dn.incl':
utils.generate_fqdn_host_names(survived_inst),
'/etc/hadoop/dn.excl': ""})
def parse_dfs_report(cmd_output):
report = cmd_output.rstrip().split(os.linesep)
array = []
started = False
for line in report:
if started:
array.append(line)
if line.startswith("Datanodes available"):
started = True
res = []
datanode_info = {}
for i in six.moves.xrange(0, len(array)):
if array[i]:
idx = str.find(array[i], ':')
name = array[i][0:idx]
value = array[i][idx + 2:]
datanode_info[name.strip()] = value.strip()
if not array[i] and datanode_info:
res.append(datanode_info)
datanode_info = {}
if datanode_info:
res.append(datanode_info)
return res
| apache-2.0 |
jpzm/bw | comboboxes.py | 1 | 2554 | # vim: set fileencoding=utf-8 :
# Copyright (C) 2008 Joao Paulo de Souza Medeiros
#
# Author(s): Joao Paulo de Souza Medeiros <ignotus21@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import gtk
import gobject
class BWChangeableComboBoxEntry(gtk.ComboBoxEntry):
"""
"""
def __init__(self):
"""
"""
self.__liststore = gtk.ListStore(gobject.TYPE_STRING)
gtk.ComboBoxEntry.__init__(self, self.__liststore, 0)
self.connect("changed", self.__changed)
self.get_child().connect("changed", self.__entry_changed)
self.__last_active = None
def __changed(self, widget):
"""
"""
if self.get_active() != -1:
self.__last_active = self.get_active()
def bw_get_lenght(self):
"""
"""
return len(self.__liststore)
def __entry_changed(self, widget):
"""
"""
if len(self.__liststore) > 0 and\
self.__last_active != None and\
self.get_active() == -1:
iter = self.get_model().get_iter((self.__last_active,))
self.__liststore.set_value(iter, 0, widget.get_text().strip())
def bw_get_active(self):
"""
"""
if self.get_active() == -1:
return self.__last_active
return self.get_active()
if __name__ == "__main__":
def button_clicked(widget, combo):
"""
"""
combo.append_text('New')
window = gtk.Window()
window.connect("destroy", lambda w: gtk.main_quit())
box = gtk.HBox()
combo = BWChangeableComboBoxEntry()
combo.append_text('New')
combo.set_active(0)
button = gtk.Button('More')
button.connect("clicked", button_clicked, combo)
box.pack_start(button, False, False)
box.pack_start(combo, True, True)
window.add(box)
window.show_all()
gtk.main()
| gpl-2.0 |
jrgm/blog | packer/packer.py | 7 | 7732 | #!/usr/bin/python
# The MIT License (MIT)
#
# Copyright (c) 2012-2013 Teemu Ikonen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Tool that accepts multiple images as arguments and tries to fit them to single image
# Image locations are written out as json file.
#
# ImageMagick 6.8 or later must be installed and in the command path. Tool uses commands 'identify' and 'convert'.
#
# $ ./packer.py <image1>, <image2>, ..
#
# Tool outputs atlas image and json and css files that map the image locations on the atlas
#
import subprocess
import argparse
import re
import os, sys
import json
parser = argparse.ArgumentParser(description="Packs images to atlas. Uses ImageMagick to parse and compose the images")
parser.add_argument('files', metavar='FILE', type=str, nargs='+', help="Image file")
parser.add_argument("-o", dest="outfile", type=str, default='out.png', help="Output atlas file")
parser.add_argument("-jo", dest="jsonoutfile", type=str, help="Output atlas json file")
parser.add_argument("-jso", dest="jsoutfile", type=str, help="Output atlas import js file")
parser.add_argument("-co", dest="cssoutfile", type=str, help="Output atlas css file")
parser.add_argument("-p", dest="pad", type=int, default=1, help="Padding")
parser.add_argument("-mw", dest="width", type=int, default=1024, help="Maximum width")
parser.add_argument("-mh", dest="height", type=int, default=1024, help="Maximum height")
args = parser.parse_args()
# Check that ImageMagick is installed
def is_im_installed():
print "Checking ImageMagick"
try:
o = subprocess.check_output(['identify', '--version'], stderr=subprocess.STDOUT).strip()
if not o.find('ImageMagick'):
print "\nUnknown output from identify, is ImageMagick installed?"
return False
else:
print 'Found: ' + o.split('\n')[0]
return True
except Exception as e:
print "ImageMagick identity not found:",e
return False
if not is_im_installed():
sys.exit(1)
def bname(file):
l = os.path.basename(file).split('.')
return ''.join(l[:-1])
if not args.jsonoutfile:
args.jsonoutfile = os.path.join(os.path.dirname(args.outfile),bname(args.outfile)+ '.json')
if not args.cssoutfile:
args.cssoutfile = os.path.join(os.path.dirname(args.outfile),bname(args.outfile)+ '.css')
if not args.jsoutfile:
args.jsoutfile = args.jsonoutfile + '.js'
print "==========================="
print "Resolving file dimensions"
blocks = []
# Use identify command to get file dimensions
for file in args.files:
try:
o = subprocess.check_output(['identify', file], stderr=subprocess.STDOUT).strip()
p = re.compile(r'^[^:]+ ([^\s]+) ([0-9]+)x([0-9]+)')
m = p.match(o)
if not m:
print "\nWARN: unable to identify {0}: {1}".format(file, o)
continue
fmt = m.group(1)
w = int(m.group(2))
h = int(m.group(3))
blocks.append({
'name': file,
'ow': w,
'oh': h,
'w': w + args.pad, # add padding
'h': h + args.pad
})
print "{0} -> {1}x{2}".format(os.path.basename(file), w, h)
except subprocess.CalledProcessError as e:
print "\nWARN: failed to process {0} error: {1}".format(file, e.output)
# Area tree packer
def find_node(node, w, h):
if 'used' in node:
return find_node(node['right'], w, h) or find_node(node['left'], w, h)
elif w <= node['w'] and h <= node['h']:
return node
else:
return None
# mark node as used and split it to right and bottom areas
def use_node(node, w, h):
node['used'] = True
node['left'] = {'x': node['x'], 'y':node['y']+h, 'w':node['w'],'h':node['h']-h}
node['right'] = {'x':node['x']+w, 'y':node['y'], 'w':node['w']-w, 'h':h}
return node
# Fits the blocks to the area and sets p key to the allocated area
def fit(blocks, w, h):
root = { 'x': 0, 'y': 0, 'w': w, 'h': h }
for b in blocks:
node = find_node(root, b['w'], b['h'])
if node: # location found for this image
b['p'] = use_node(node, b['w'], b['h'])
else:
return False
return True
print "==========================="
print "fitting {0} images, padding {1}".format(len(blocks), args.pad)
# sort files to suit the simple tree algorithm better
blocks.sort(lambda a,b: b['w'] - a['w'] if a['h'] == b['h'] else b['h'] - a['h'])
# run the packer
if not fit(blocks, args.width, args.height):
print "ERROR: unable to fit images to {0}x{1} padding {2}".format(args.width, args.height, args.pad)
sys.exit(1)
w = 0
h = 0
for b in blocks:
# get size of the output image, decrement padding as borders do not need one
w = max(w, b['p']['x'] + b['w'] - args.pad)
h = max(h, b['p']['y'] + b['h'] - args.pad)
# eliminate the extra padding from output image borders
sx = b['p']['x'] + int(args.pad/2)
sy = b['p']['y'] + int(args.pad/2)
sx = max(0, sx - int(args.pad/2))
sy = max(0, sy - int(args.pad/2))
b['p']['x'] = sx
b['p']['y'] = sy
if not len(blocks):
print "\nWARN: nothing to do"
sys.exit(0)
print "successfully fitted {0} images to {1}x{2} padding {3}".format(len(blocks), w, h, args.pad)
info = {}
try:
# compose images in single atlas
convert = [
'convert',
'-define',
'png:exclude-chunks:date', # do not set date
'-size',
'%sx%s' % (w, h),
'xc:none', # transparent background
]
for b in blocks:
convert.append(b['name'])
convert.append('-geometry')
convert.append('+%s+%s' % (b['p']['x'], b['p']['y']))
convert.append('-composite')
info[os.path.basename(b['name'])] = {
'x': b['p']['x'],
'y': b['p']['y'],
'w': b['ow'],
'h': b['oh']
}
convert.append(args.outfile)
o = subprocess.check_output(convert, stderr=subprocess.STDOUT).strip()
print "Wrote: atlas to {0}".format(args.outfile)
# write Raw JSON
f = open(args.jsonoutfile, 'w')
f.write(json.dumps(info, sort_keys=True, indent=4))
f.close()
print "Wrote json to {0}".format(args.jsonoutfile)
# write import JS file
bvar = 'window.bg_' + bname(args.outfile)
f = open(args.jsoutfile, 'w')
f.write(bvar+' = '+json.dumps(info, sort_keys=True, indent=4))
f.close()
print "Wrote js to {0}".format(args.jsoutfile)
# write CSS
bclass = '.bg-' + bname(args.outfile)
rules = []
rules.extend([{bclass + '.' + bname(file) : {
'background': 'url('+os.path.basename(args.outfile)+') no-repeat -%dpx -%dpx' % (b['x'], b['y']),
'width': ('%dpx' % b['w']),
'height':('%dpx' % b['h']),
}} for (file,b) in info.items()])
def rule2str(rule):
l = []
for (key, style) in rule.items():
sl = []
for (opt, val) in style.items():
sl.append('\t%s: %s;' % (opt,val))
l.append(key + ' {\n' + '\n'.join(sl) +'\n}\n')
return "\n".join(l)
f = open(args.cssoutfile, 'w')
for r in rules:
f.write(rule2str(r))
f.close()
print "Wrote css to {0}\n".format(args.cssoutfile)
except subprocess.CalledProcessError as e:
print "failed to process, error: {0}".format(e.output),
| mit |
chetan51/nupic | nupic/regions/ImageSensorFilters/Tracking.py | 15 | 11231 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import random
import numpy
from PIL import Image
from nupic.regions.ImageSensorFilters.BaseFilter import BaseFilter
class Tracking(BaseFilter):
"""
Create resized versions of the original image, using various methods of
padding and stretching.
"""
def __init__(self,
targetDims,
padding=0,
scales=None,
fillValue=0,
fillFromImageWherePossible=True,
preservationMode=None,
qualityLevel='antialias',
dumpDebugImages=False,
applyAlpha=True):
"""
@param qualityLevel -- specifies the quality of the filter to be used
for resizing image patches; must be one of:
'nearest', 'bilinear', 'bicubic', 'antialias'
(in increasing order of quality)
@param applyAlpha -- if True, we'll "apply" the alpha channel to the image
before flattening it (from 'LA' to 'L'). This allows you to pass in
images that are non-rectangular. If False, the alpha channel will
just be used to figure out the bounding box of the object (unless
a tracking box is passed in, in which case alpha is ignored).
Note: this is only useful if fillFromImageWherePossible=False
"""
BaseFilter.__init__(self)
# Apply default for scales
if scales is None:
scales = [1.0]
if type(scales) not in (list, tuple):
raise ValueError("'scales' must be a list or tuple")
if type(scales) is tuple:
scales = list(scales)
self._applyAlpha = applyAlpha
self._targetDims = targetDims
self._padding = padding
self._scales = scales
self._fillValue = fillValue
self._fillFromImageWherePossible = fillFromImageWherePossible
self._preservationMode = preservationMode
self._resizingFilter = eval("Image.%s" % qualityLevel.upper())
self._dumpDebugImages = dumpDebugImages
if fillValue is None:
self._histWeights = numpy.array(range(256), dtype='float')
def process(self, image):
"""
Performs the following operations:
1. Locates the original bounding box of the image as defined by the
image's alpha mask. It is assumed that the alpha mask will consist
of a single rectangular, in which case the resulting bbox will
be exactly equivalent to the mask representation. However, if for
some reason the positive regions of the alpha mask is not a single
rectangle, things will still work.
2. Fit the bounding box to the target dimensions, scaling as needed,
and filling in padding regions if needed (if the aspect ratio of
the bounding box does not match that of the target dimensions
which, in general, will be True.) If padding is needed, we fill
from the original image pixels around the bounding box if
fillFromImageWherePossible is True and we're not outside the original
image bounds, otherwise, we use 'fillValue'.
3. Apply each scale in 'scales' to the resulting cropped image, and
pad each side by 'padding' (pulling from the real image pixels
when possible, and filling with 'fillValue' where not.)
4. Return the list of cropped images.
"""
BaseFilter.process(self, image)
assert image.mode == "LA"
# Pull bbox of the alpha mask
if 'tracking' in image.info:
bbox = image.info['tracking']
if type(bbox) == type(""):
bbox = eval(bbox)
else:
bbox = image.split()[1].getbbox()
# If alpha channel is completely empty, we will end up
# with a bbox of 'None'. Nothing much we can do
if bbox is None:
bbox = (0, 0, image.size[0], image.size[1])
print 'WARNING: empty alpha channel'
# Check for malformed box
elif bbox[0] >= bbox[2] or bbox[1] >= bbox[3]:
bbox = (0, 0, image.size[0], image.size[1])
print 'WARNING: malformed box'
# Ascertain the original raw size of the tracking box
width = bbox[2] - bbox[0]
height = bbox[3] - bbox[1]
if self._fillValue is None:
[gray, alpha] = image.split()
hist = numpy.array(gray.histogram(alpha), dtype='float')
mean = (hist * self._histWeights).sum() / hist.sum()
if mean < 127.5:
fillValue = 255
else:
fillValue = 0
elif isinstance(self._fillValue, int):
fillValue = self._fillValue
else:
fillValue = self._fillValue[random.randint(0, len(self._fillValue)-1)]
# If we're not going to fill from the rest of the image, we should
# apply the alpha from the original image directly. If the original alpha
# was a square bounding box, this won't hurt (since we're not filling from
# the image). If it was a tight mask, this will prevent it from reverting
# back to a square mask.
if (self._applyAlpha) and (not self._fillFromImageWherePossible):
grayImage, alphaImage = image.split()
image = Image.new('L', size=image.size, color=fillValue)
image.paste(grayImage, alphaImage)
newImages = []
for scaleIdx, scale in enumerate(self._scales):
# Target dimensions depend on the scale at which we're operating
targetDims = (self._targetDims[0] * scale,
self._targetDims[1] * scale)
scaleFactorX = float(targetDims[0]) / float(width)
scaleFactorY = float(targetDims[1]) / float(height)
# Determine the scaling factors needed to map the
# bounding box to the target dimensions (prior to
# padding be accounted for)
if self._preservationMode is None:
pass
elif self._preservationMode == "aspect":
scaleFactor = min(scaleFactorX, scaleFactorY)
scaleFactorX = scaleFactor
scaleFactorY = scaleFactor
else:
assert self._preservationMode == "size"
scaleFactorX = scale
scaleFactorY = scale
# Now, holding the scaling factor constant, compute the
# size of the src box in the original image that will
# produce the correctly padded target size
targetWidth = int(round(targetDims[0])) + 2*self._padding
targetHeight = int(round(targetDims[1])) + 2*self._padding
srcWidth = float(targetWidth) / scaleFactorX
srcHeight = float(targetHeight) / scaleFactorY
# Compute the exact coordinates of the source box
if self._fillFromImageWherePossible:
origCenterX = float(bbox[0] + bbox[2]) * 0.5
origCenterY = float(bbox[1] + bbox[3]) * 0.5
halfWidth = srcWidth * 0.5
halfHeight = srcHeight * 0.5
srcBox = (int(round(origCenterX - halfWidth)),
int(round(origCenterY - halfHeight)),
int(round(origCenterX + halfWidth)),
int(round(origCenterY + halfHeight)))
# take into account clipping off the image boundary
clipBox = (max(srcBox[0], 0),
max(srcBox[1], 0),
min(srcBox[2], image.size[0]),
min(srcBox[3], image.size[1]))
#clipOffset = (clipBox[0] - srcBox[0],
# clipBox[1] - srcBox[1])
else:
# extend the bbox to include padding pixels on all sides
paddedBBox = (int(bbox[0] - self._padding/scaleFactorX),
int(bbox[1] - self._padding/scaleFactorY),
int(bbox[2] + self._padding/scaleFactorX),
int(bbox[3] + self._padding/scaleFactorY))
# take into account clipping off the image boundary
clipBox = (max(paddedBBox[0], 0),
max(paddedBBox[1], 0),
min(paddedBBox[2], image.size[0]),
min(paddedBBox[3], image.size[1]))
# The srcBox is the correct aspect ratio, and either taller or wider than the
# bbox, but not both.
srcBox = (0, 0, srcWidth, srcHeight)
clipBoxWidth = clipBox[2] - clipBox[0]
clipBoxHeight = clipBox[3] - clipBox[1]
#clipOffset = (int((srcWidth - clipBoxWidth)/2),
# int((srcHeight - clipBoxHeight)/2))
# Copy the source rect
croppedImage = image.crop(clipBox)
croppedImage.load()
# New development
croppedImage.putalpha(Image.new(mode='L', size=croppedImage.size, color=255))
# Scale the cropped image. At last one dimension of this cropped image
# should be the target size.
xFactor = float(targetWidth) / croppedImage.size[0]
yFactor = float(targetHeight) / croppedImage.size[1]
scaleFactor = min(xFactor, yFactor)
if scaleFactor >= 1:
resizingFilter = Image.BICUBIC
else:
resizingFilter = Image.ANTIALIAS
scaledImage = croppedImage.resize((int(round(scaleFactor * croppedImage.size[0])),
int(round(scaleFactor * croppedImage.size[1]))),
resizingFilter)
clipOffset = (int((targetWidth - scaledImage.size[0]) / 2),
int((targetHeight - scaledImage.size[1]) / 2))
# Paste into a new image
newImage = Image.new(mode='LA', size=(targetWidth, targetHeight), color=fillValue)
newImage.paste(scaledImage, clipOffset)
# Resize the cropped image to the (padded) target size
# Convert and save the scaled image as the output
assert newImage.mode == 'LA'
newImages += [newImage]
# Dump debugging images to disk
if self._dumpDebugImages:
self._handleDebug(newImage, scaleIdx)
return [newImages]
def _handleDebug(self, image, scaleIdx, debugDir="tracking.d"):
"""
Dump tracking boxes to disk for offline analysis
"""
if not hasattr(self, "_debugIndex"):
self._debugIndex = 0
if not os.path.isdir(debugDir):
os.mkdir(debugDir)
debugPath = os.path.join(debugDir, "tracking.%06d.%02d.png" % \
(self._debugIndex, scaleIdx))
image.save(debugPath)
self._debugIndex += 1
def getOutputCount(self):
"""
Return the number of images returned by each call to process().
If the filter creates multiple simultaneous outputs, return a tuple:
(outputCount, simultaneousOutputCount).
"""
return 1, len(self._scales)
| gpl-3.0 |
ortylp/scipy | scipy/linalg/decomp_lu.py | 117 | 5796 | """LU decomposition functions."""
from __future__ import division, print_function, absolute_import
from warnings import warn
from numpy import asarray, asarray_chkfinite
# Local imports
from .misc import _datacopied
from .lapack import get_lapack_funcs
from .flinalg import get_flinalg_funcs
__all__ = ['lu', 'lu_solve', 'lu_factor']
def lu_factor(a, overwrite_a=False, check_finite=True):
"""
Compute pivoted LU decomposition of a matrix.
The decomposition is::
A = P L U
where P is a permutation matrix, L lower triangular with unit
diagonal elements, and U upper triangular.
Parameters
----------
a : (M, M) array_like
Matrix to decompose
overwrite_a : bool, optional
Whether to overwrite data in A (may increase performance)
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
lu : (N, N) ndarray
Matrix containing U in its upper triangle, and L in its lower triangle.
The unit diagonal elements of L are not stored.
piv : (N,) ndarray
Pivot indices representing the permutation matrix P:
row i of matrix was interchanged with row piv[i].
See also
--------
lu_solve : solve an equation system using the LU factorization of a matrix
Notes
-----
This is a wrapper to the ``*GETRF`` routines from LAPACK.
"""
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]):
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or (_datacopied(a1, a))
getrf, = get_lapack_funcs(('getrf',), (a1,))
lu, piv, info = getrf(a1, overwrite_a=overwrite_a)
if info < 0:
raise ValueError('illegal value in %d-th argument of '
'internal getrf (lu_factor)' % -info)
if info > 0:
warn("Diagonal number %d is exactly zero. Singular matrix." % info,
RuntimeWarning)
return lu, piv
def lu_solve(lu_and_piv, b, trans=0, overwrite_b=False, check_finite=True):
"""Solve an equation system, a x = b, given the LU factorization of a
Parameters
----------
(lu, piv)
Factorization of the coefficient matrix a, as given by lu_factor
b : array
Right-hand side
trans : {0, 1, 2}, optional
Type of system to solve:
===== =========
trans system
===== =========
0 a x = b
1 a^T x = b
2 a^H x = b
===== =========
overwrite_b : bool, optional
Whether to overwrite data in b (may increase performance)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : array
Solution to the system
See also
--------
lu_factor : LU factorize a matrix
"""
(lu, piv) = lu_and_piv
if check_finite:
b1 = asarray_chkfinite(b)
else:
b1 = asarray(b)
overwrite_b = overwrite_b or _datacopied(b1, b)
if lu.shape[0] != b1.shape[0]:
raise ValueError("incompatible dimensions.")
getrs, = get_lapack_funcs(('getrs',), (lu, b1))
x,info = getrs(lu, piv, b1, trans=trans, overwrite_b=overwrite_b)
if info == 0:
return x
raise ValueError('illegal value in %d-th argument of internal gesv|posv'
% -info)
def lu(a, permute_l=False, overwrite_a=False, check_finite=True):
"""
Compute pivoted LU decomposition of a matrix.
The decomposition is::
A = P L U
where P is a permutation matrix, L lower triangular with unit
diagonal elements, and U upper triangular.
Parameters
----------
a : (M, N) array_like
Array to decompose
permute_l : bool, optional
Perform the multiplication P*L (Default: do not permute)
overwrite_a : bool, optional
Whether to overwrite data in a (may improve performance)
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
**(If permute_l == False)**
p : (M, M) ndarray
Permutation matrix
l : (M, K) ndarray
Lower triangular or trapezoidal matrix with unit diagonal.
K = min(M, N)
u : (K, N) ndarray
Upper triangular or trapezoidal matrix
**(If permute_l == True)**
pl : (M, K) ndarray
Permuted L matrix.
K = min(M, N)
u : (K, N) ndarray
Upper triangular or trapezoidal matrix
Notes
-----
This is a LU factorization routine written for Scipy.
"""
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) != 2:
raise ValueError('expected matrix')
overwrite_a = overwrite_a or (_datacopied(a1, a))
flu, = get_flinalg_funcs(('lu',), (a1,))
p, l, u, info = flu(a1, permute_l=permute_l, overwrite_a=overwrite_a)
if info < 0:
raise ValueError('illegal value in %d-th argument of '
'internal lu.getrf' % -info)
if permute_l:
return l, u
return p, l, u
| bsd-3-clause |
CamelBackNotation/CarnotKE | jyhton/lib-python/2.7/compiler/syntax.py | 369 | 1444 | """Check for errs in the AST.
The Python parser does not catch all syntax errors. Others, like
assignments with invalid targets, are caught in the code generation
phase.
The compiler package catches some errors in the transformer module.
But it seems clearer to write checkers that use the AST to detect
errors.
"""
from compiler import ast, walk
def check(tree, multi=None):
v = SyntaxErrorChecker(multi)
walk(tree, v)
return v.errors
class SyntaxErrorChecker:
"""A visitor to find syntax errors in the AST."""
def __init__(self, multi=None):
"""Create new visitor object.
If optional argument multi is not None, then print messages
for each error rather than raising a SyntaxError for the
first.
"""
self.multi = multi
self.errors = 0
def error(self, node, msg):
self.errors = self.errors + 1
if self.multi is not None:
print "%s:%s: %s" % (node.filename, node.lineno, msg)
else:
raise SyntaxError, "%s (%s:%s)" % (msg, node.filename, node.lineno)
def visitAssign(self, node):
# the transformer module handles many of these
pass
## for target in node.nodes:
## if isinstance(target, ast.AssList):
## if target.lineno is None:
## target.lineno = node.lineno
## self.error(target, "can't assign to list comprehension")
| apache-2.0 |
animekita/selvbetjening | selvbetjening/core/mailcenter/models.py | 1 | 6200 | import logging
import re
import markdown
from django.conf import settings
from django.db import models
from django.template import Template, Context, loader
import sys
from selvbetjening.core.mail import send_mail
logger = logging.getLogger('selvbetjening.email')
class EmailSpecification(models.Model):
BODY_FORMAT_CHOICES = (
('html', 'HTML'),
('markdown', 'Markdown')
)
CONTEXT_CHOICES = (
('user', 'User'),
('attendee', 'Attendee')
)
# template
subject = models.CharField(max_length=128)
body = models.TextField()
body_format = models.CharField(max_length=32, choices=BODY_FORMAT_CHOICES, default='markdown')
# context
template_context = models.CharField(max_length=32, choices=CONTEXT_CHOICES, default='user')
# meta
date_created = models.DateField(editable=False, auto_now_add=True)
def send_email_user(self, user, internal_sender_id):
if self.template_context == 'attendee':
raise ValueError
ok, email, err = self.render_user(user)
if not ok:
# Warn an admin and log the error silently
logger.exception('Failure rendering e-mail (template pk: %s) -- Addressed to %s', self.pk, user.email, exc_info=err, extra={
'related_user': user})
return
instance = self._send_mail(user.email, email, internal_sender_id)
logger.info('E-mail queued (%s) -- Addressed to %s', email['subject'], user.email,
extra={
'related_user': user,
'related_email': instance
})
def send_email_attendee(self, attendee, internal_sender_id):
ok, email, err = self.render_attendee(attendee)
if not ok:
# Warn an admin and log the error silently
logger.exception('Failure rendering e-mail (template pk: %s) -- Addressed to %s', self.pk, attendee.user.email, exc_info=err, extra={
'related_user': attendee.user,
'related_attendee': attendee})
return
instance = self._send_mail(attendee.user.email, email, internal_sender_id)
logger.info('E-mail queued (%s) -- Addressed to %s', email['subject'], attendee.user.email,
extra={
'related_user': attendee.user,
'related_attendee': attendee,
'related_email': instance
})
def _send_mail(self, to_address, email, internal_sender_id):
mails = send_mail(email['subject'],
email['body_plain'],
settings.DEFAULT_FROM_EMAIL,
[to_address],
body_html=email['body_html'],
internal_sender_id=internal_sender_id)
return mails[0]
def render_user(self, user):
"""
Renders the e-mail template using a user object as source.
An error is thrown if the template context is Attendee.
"""
if self.template_context == 'attendee':
raise ValueError
return self._render(self._get_context(user))
def render_attendee(self, attendee):
"""
Renders the e-mail template using a user object as source.
"""
return self._render(self._get_context(attendee.user, attendee=attendee))
def render_dummy(self):
context = {
# user context
'username': 'johndoe',
'full_name': 'John Doe',
'email': 'johndoe@example.org',
# attendee.event context
'event_title': 'Dummy Event',
'invoice_plain': 'INVOICE',
'invoice_html': 'INVOICE_HTML'
}
return self._render(context)
def _get_context(self, user, attendee=None):
# lazy import, prevent circular import in core.events
from selvbetjening.core.events.options.dynamic_selections import SCOPE, dynamic_selections
context = {
# user context
'username': user.username,
'full_name': ('%s %s' % (user.first_name, user.last_name)).strip(),
'email': user.email
}
if attendee is not None:
invoice = dynamic_selections(SCOPE.VIEW_USER_INVOICE, attendee)
invoice_html = loader.render_to_string('events/parts/invoice.html', {
'attendee': attendee,
'invoice': invoice
})
invoice_text = loader.render_to_string('events/parts/invoice_text.html', {
'attendee': attendee,
'invoice': invoice
})
context.update({
# attendee.event context
'event_title': attendee.event.title,
'attendee': attendee,
'invoice_plain': invoice_text,
'invoice_html': invoice_html,
})
for option, selection in invoice:
context['selected_%s' % option.pk] = selection is not None
return context
def _render(self, context):
context = Context(context)
try:
email = {
'subject': self.subject,
'body_plain': self._get_rendered_body_plain(context),
'body_html': self._get_rendered_body_html(context)
}
return True, email, None
except Exception:
return False, None, sys.exc_info()
def _get_rendered_body_plain(self, context):
if self.body_format == 'markdown':
body = self.body
else:
body = re.sub(r'<[^>]*?>', '', self.body)
context['invoice'] = context.get('invoice_plain', None)
return Template(body).render(context)
def _get_rendered_body_html(self, context):
if self.body_format == 'markdown':
body = markdown.markdown(self.body)
else:
body = self.body
context['invoice'] = context.get('invoice_html', None)
return Template(body).render(context)
def __unicode__(self):
return self.subject
| mit |
goddardl/cortex | test/IECore/EXRImageWriter.py | 7 | 8551 | ##########################################################################
#
# Copyright (c) 2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import sys, os
from IECore import *
from math import pow
class TestEXRWriter(unittest.TestCase):
def __verifyImageRGB( self, imgNew, imgOrig, maxError = 0.002 ):
self.assertEqual( type(imgNew), ImagePrimitive )
if "R" in imgOrig :
self.assert_( "R" in imgNew )
if "G" in imgOrig :
self.assert_( "G" in imgNew )
if "B" in imgOrig :
self.assert_( "B" in imgNew )
if "A" in imgOrig :
self.assert_( "A" in imgNew )
if "Y" in imgOrig :
self.assert_( "Y" in imgNew )
op = ImageDiffOp()
res = op(
imageA = imgNew,
imageB = imgOrig,
maxError = maxError,
skipMissingChannels = True
)
self.failIf( res.value )
def __makeFloatImage( self, dataWindow, displayWindow, withAlpha = False, dataType = FloatVectorData ) :
img = ImagePrimitive( dataWindow, displayWindow )
w = dataWindow.max.x - dataWindow.min.x + 1
h = dataWindow.max.y - dataWindow.min.y + 1
area = w * h
R = dataType( area )
G = dataType( area )
B = dataType( area )
if withAlpha:
A = dataType( area )
offset = 0
for y in range( 0, h ) :
for x in range( 0, w ) :
R[offset] = float(x) / (w - 1)
G[offset] = float(y) / (h - 1)
B[offset] = 0.0
if withAlpha:
A[offset] = 0.5
offset = offset + 1
img["R"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Vertex, R )
img["G"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Vertex, G )
img["B"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Vertex, B )
if withAlpha:
img["A"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Vertex, A )
return img
def testWrite( self ) :
displayWindow = Box2i(
V2i( 0, 0 ),
V2i( 99, 99 )
)
dataWindow = displayWindow
for dataType in [ FloatVectorData ] :
self.setUp()
imgOrig = self.__makeFloatImage( dataWindow, displayWindow, dataType = dataType )
w = Writer.create( imgOrig, "test/IECore/data/exrFiles/output.exr" )
self.assertEqual( type(w), EXRImageWriter )
w.write()
self.assert_( os.path.exists( "test/IECore/data/exrFiles/output.exr" ) )
# Now we've written the image, verify the rgb
r = Reader.create( "test/IECore/data/exrFiles/output.exr" )
imgNew = r.read()
self.assertEqual( type(imgNew['R'].data), FloatVectorData )
self.__verifyImageRGB( imgOrig, imgNew )
self.tearDown()
def testColorConversion(self):
r = Reader.create( "test/IECore/data/exrFiles/ramp.exr" )
imgOrig = r.read()
self.assertEqual( type(imgOrig), ImagePrimitive )
w = Writer.create( imgOrig, "test/IECore/data/exrFiles/output.exr" )
self.assertEqual( type(w), EXRImageWriter )
w.write()
w = None
r = Reader.create( "test/IECore/data/exrFiles/output.exr" )
imgNew = r.read()
self.assertEqual( type(imgNew), ImagePrimitive )
self.assertEqual( imgOrig, imgNew )
def testWriteIncomplete( self ) :
displayWindow = Box2i(
V2i( 0, 0 ),
V2i( 99, 99 )
)
dataWindow = displayWindow
imgOrig = self.__makeFloatImage( dataWindow, displayWindow )
# We don't have enough data to fill this dataWindow
imgOrig.dataWindow = Box2i(
V2i( 0, 0 ),
V2i( 199, 199 )
)
self.failIf( imgOrig.arePrimitiveVariablesValid() )
w = Writer.create( imgOrig, "test/IECore/data/exrFiles/output.exr" )
self.assertEqual( type(w), EXRImageWriter )
self.assertRaises( RuntimeError, w.write )
self.failIf( os.path.exists( "test/IECore/data/exrFiles/output.exr" ) )
def testWindowWrite( self ) :
dataWindow = Box2i(
V2i( 0, 0 ),
V2i( 99, 99 )
)
imgOrig = self.__makeFloatImage( dataWindow, dataWindow )
imgOrig.displayWindow = Box2i(
V2i( -20, -20 ),
V2i( 199, 199 )
)
w = Writer.create( imgOrig, "test/IECore/data/exrFiles/output.exr" )
self.assertEqual( type(w), EXRImageWriter )
w.write()
self.assert_( os.path.exists( "test/IECore/data/exrFiles/output.exr" ) )
r = Reader.create( "test/IECore/data/exrFiles/output.exr" )
imgNew = r.read()
self.__verifyImageRGB( imgNew, imgOrig )
def testOversizeDataWindow( self ) :
r = Reader.create( "test/IECore/data/exrFiles/oversizeDataWindow.exr" )
img = r.read()
w = Writer.create( img, "test/IECore/data/exrFiles/output.exr" )
self.assertEqual( type(w), EXRImageWriter )
w.write()
r = Reader.create( "test/IECore/data/exrFiles/output.exr" )
imgNew = r.read()
r = Reader.create( "test/IECore/data/expectedResults/oversizeDataWindow.dpx" )
imgExpected = r.read()
self.__verifyImageRGB( imgNew, imgExpected )
def testCompressionParameter( self ):
r = Reader.create( "test/IECore/data/exrFiles/oversizeDataWindow.exr" )
img = r.read()
w = Writer.create( img, "test/IECore/data/exrFiles/output.exr" )
w['compression'].setValue( w['compression'].getPresets()['zip'] )
w.write()
w = EXRImageWriter()
w['compression'].setValue( w['compression'].getPresets()['zip'] )
def testBlindDataToHeader( self ) :
displayWindow = Box2i(
V2i( 0, 0 ),
V2i( 9, 9 )
)
dataWindow = displayWindow
headerValues = {
"one": IntData( 1 ),
"two": FloatData( 2 ),
"three": DoubleData( 3 ),
"four" : {
"five": V2fData( V2f(5) ),
"six": V2iData( V2i(6) ),
"seven": V3fData( V3f(7) ),
"eight": V3iData( V3i(8) ),
"nine": {
"ten": Box2iData( Box2i( V2i(0), V2i(10) ) ),
"eleven": Box2fData( Box2f( V2f(0), V2f(11) ) ),
"twelve": M33fData( M33f(12) ),
"thirteen": M44fData( M44f(13) ),
},
"fourteen": StringData( "fourteen" ),
}
}
imgOrig = self.__makeFloatImage( dataWindow, dataWindow )
imgOrig.blindData().update( headerValues.copy() )
# now add some unsupported types
imgOrig.blindData()['notSupported1'] = FloatVectorData( [ 1,2,3] )
imgOrig.blindData()['four']['notSupported2'] = DoubleVectorData( [1,2,3] )
w = Writer.create( imgOrig, "test/IECore/data/exrFiles/output.exr" )
self.assertEqual( type(w), EXRImageWriter )
w.write()
self.assert_( os.path.exists( "test/IECore/data/exrFiles/output.exr" ) )
r = Reader.create( "test/IECore/data/exrFiles/output.exr" )
imgNew = r.read()
imgBlindData = imgNew.blindData()
# eliminate default header info that comes on EXR..
del imgBlindData['screenWindowCenter']
del imgBlindData['displayWindow']
del imgBlindData['dataWindow']
del imgBlindData['pixelAspectRatio']
del imgBlindData['screenWindowWidth']
self.assertEqual( imgBlindData, CompoundData( headerValues ) )
def setUp( self ) :
if os.path.isfile( "test/IECore/data/exrFiles/output.exr") :
os.remove( "test/IECore/data/exrFiles/output.exr" )
def tearDown( self ) :
if os.path.isfile( "test/IECore/data/exrFiles/output.exr") :
os.remove( "test/IECore/data/exrFiles/output.exr" )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
abircse06/youtube-dl | test/helper.py | 29 | 8457 | from __future__ import unicode_literals
import errno
import io
import hashlib
import json
import os.path
import re
import types
import sys
import youtube_dl.extractor
from youtube_dl import YoutubeDL
from youtube_dl.utils import (
compat_str,
preferredencoding,
write_string,
)
def get_params(override=None):
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"parameters.json")
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
parameters = json.load(pf)
if override:
parameters.update(override)
return parameters
def try_rm(filename):
""" Remove a file if it exists """
try:
os.remove(filename)
except OSError as ose:
if ose.errno != errno.ENOENT:
raise
def report_warning(message):
'''
Print the message to stderr, it will be prefixed with 'WARNING:'
If stderr is a tty file the 'WARNING:' will be colored
'''
if sys.stderr.isatty() and os.name != 'nt':
_msg_header = '\033[0;33mWARNING:\033[0m'
else:
_msg_header = 'WARNING:'
output = '%s %s\n' % (_msg_header, message)
if 'b' in getattr(sys.stderr, 'mode', '') or sys.version_info[0] < 3:
output = output.encode(preferredencoding())
sys.stderr.write(output)
class FakeYDL(YoutubeDL):
def __init__(self, override=None):
# Different instances of the downloader can't share the same dictionary
# some test set the "sublang" parameter, which would break the md5 checks.
params = get_params(override=override)
super(FakeYDL, self).__init__(params, auto_init=False)
self.result = []
def to_screen(self, s, skip_eol=None):
print(s)
def trouble(self, s, tb=None):
raise Exception(s)
def download(self, x):
self.result.append(x)
def expect_warning(self, regex):
# Silence an expected warning matching a regex
old_report_warning = self.report_warning
def report_warning(self, message):
if re.match(regex, message):
return
old_report_warning(message)
self.report_warning = types.MethodType(report_warning, self)
def gettestcases(include_onlymatching=False):
for ie in youtube_dl.extractor.gen_extractors():
for tc in ie.get_testcases(include_onlymatching):
yield tc
md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
def expect_info_dict(self, got_dict, expected_dict):
for info_field, expected in expected_dict.items():
if isinstance(expected, compat_str) and expected.startswith('re:'):
got = got_dict.get(info_field)
match_str = expected[len('re:'):]
match_rex = re.compile(match_str)
self.assertTrue(
isinstance(got, compat_str),
'Expected a %s object, but got %s for field %s' % (
compat_str.__name__, type(got).__name__, info_field))
self.assertTrue(
match_rex.match(got),
'field %s (value: %r) should match %r' % (info_field, got, match_str))
elif isinstance(expected, compat_str) and expected.startswith('startswith:'):
got = got_dict.get(info_field)
start_str = expected[len('startswith:'):]
self.assertTrue(
isinstance(got, compat_str),
'Expected a %s object, but got %s for field %s' % (
compat_str.__name__, type(got).__name__, info_field))
self.assertTrue(
got.startswith(start_str),
'field %s (value: %r) should start with %r' % (info_field, got, start_str))
elif isinstance(expected, compat_str) and expected.startswith('contains:'):
got = got_dict.get(info_field)
contains_str = expected[len('contains:'):]
self.assertTrue(
isinstance(got, compat_str),
'Expected a %s object, but got %s for field %s' % (
compat_str.__name__, type(got).__name__, info_field))
self.assertTrue(
contains_str in got,
'field %s (value: %r) should contain %r' % (info_field, got, contains_str))
elif isinstance(expected, type):
got = got_dict.get(info_field)
self.assertTrue(isinstance(got, expected),
'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got)))
else:
if isinstance(expected, compat_str) and expected.startswith('md5:'):
got = 'md5:' + md5(got_dict.get(info_field))
elif isinstance(expected, compat_str) and expected.startswith('mincount:'):
got = got_dict.get(info_field)
self.assertTrue(
isinstance(got, list),
'Expected field %s to be a list, but it is of type %s' % (
info_field, type(got).__name__))
expected_num = int(expected.partition(':')[2])
assertGreaterEqual(
self, len(got), expected_num,
'Expected %d items in field %s, but only got %d' % (
expected_num, info_field, len(got)
)
)
continue
else:
got = got_dict.get(info_field)
self.assertEqual(expected, got,
'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
# Check for the presence of mandatory fields
if got_dict.get('_type') not in ('playlist', 'multi_video'):
for key in ('id', 'url', 'title', 'ext'):
self.assertTrue(got_dict.get(key), 'Missing mandatory field %s' % key)
# Check for mandatory fields that are automatically set by YoutubeDL
for key in ['webpage_url', 'extractor', 'extractor_key']:
self.assertTrue(got_dict.get(key), 'Missing field: %s' % key)
# Are checkable fields missing from the test case definition?
test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
for key, value in got_dict.items()
if value and key in ('id', 'title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location'))
missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys())
if missing_keys:
def _repr(v):
if isinstance(v, compat_str):
return "'%s'" % v.replace('\\', '\\\\').replace("'", "\\'").replace('\n', '\\n')
else:
return repr(v)
info_dict_str = ''
if len(missing_keys) != len(expected_dict):
info_dict_str += ''.join(
' %s: %s,\n' % (_repr(k), _repr(v))
for k, v in test_info_dict.items() if k not in missing_keys)
if info_dict_str:
info_dict_str += '\n'
info_dict_str += ''.join(
' %s: %s,\n' % (_repr(k), _repr(test_info_dict[k]))
for k in missing_keys)
write_string(
'\n\'info_dict\': {\n' + info_dict_str + '},\n', out=sys.stderr)
self.assertFalse(
missing_keys,
'Missing keys in test definition: %s' % (
', '.join(sorted(missing_keys))))
def assertRegexpMatches(self, text, regexp, msg=None):
if hasattr(self, 'assertRegexp'):
return self.assertRegexp(text, regexp, msg)
else:
m = re.match(regexp, text)
if not m:
note = 'Regexp didn\'t match: %r not found' % (regexp)
if len(text) < 1000:
note += ' in %r' % text
if msg is None:
msg = note
else:
msg = note + ', ' + msg
self.assertTrue(m, msg)
def assertGreaterEqual(self, got, expected, msg=None):
if not (got >= expected):
if msg is None:
msg = '%r not greater than or equal to %r' % (got, expected)
self.assertTrue(got >= expected, msg)
def expect_warnings(ydl, warnings_re):
real_warning = ydl.report_warning
def _report_warning(w):
if not any(re.search(w_re, w) for w_re in warnings_re):
real_warning(w)
ydl.report_warning = _report_warning
| unlicense |
Daniex/horizon | openstack_dashboard/dashboards/project/routers/tables.py | 11 | 8788 | # Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.template import defaultfilters as filters
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from neutronclient.common import exceptions as q_ext
from horizon import exceptions
from horizon import messages
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard import policy
from openstack_dashboard.usage import quotas
LOG = logging.getLogger(__name__)
class DeleteRouter(policy.PolicyTargetMixin, tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Router",
u"Delete Routers",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Router",
u"Deleted Routers",
count
)
redirect_url = "horizon:project:routers:index"
policy_rules = (("network", "delete_router"),)
def delete(self, request, obj_id):
try:
# detach all interfaces before attempting to delete the router
search_opts = {'device_owner': 'network:router_interface',
'device_id': obj_id}
ports = api.neutron.port_list(request, **search_opts)
for port in ports:
api.neutron.router_remove_interface(request, obj_id,
port_id=port.id)
api.neutron.router_delete(request, obj_id)
except q_ext.NeutronClientException as e:
msg = _('Unable to delete router "%s"') % e
LOG.info(msg)
messages.error(request, msg)
redirect = reverse(self.redirect_url)
raise exceptions.Http302(redirect, message=msg)
except Exception:
obj = self.table.get_object_by_id(obj_id)
name = self.table.get_object_display(obj)
msg = _('Unable to delete router "%s"') % name
LOG.info(msg)
exceptions.handle(request, msg)
def allowed(self, request, router=None):
return True
class CreateRouter(tables.LinkAction):
name = "create"
verbose_name = _("Create Router")
url = "horizon:project:routers:create"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_router"),)
def allowed(self, request, datum=None):
usages = quotas.tenant_quota_usages(request)
# when Settings.OPENSTACK_NEUTRON_NETWORK['enable_quotas'] = False
# usages['routers'] is empty
if usages.get('routers', {}).get('available', 1) <= 0:
if "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ["disabled"]
self.verbose_name = _("Create Router (Quota exceeded)")
else:
self.verbose_name = _("Create Router")
self.classes = [c for c in self.classes if c != "disabled"]
return True
class EditRouter(policy.PolicyTargetMixin, tables.LinkAction):
name = "update"
verbose_name = _("Edit Router")
url = "horizon:project:routers:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("network", "update_router"),)
class SetGateway(policy.PolicyTargetMixin, tables.LinkAction):
name = "setgateway"
verbose_name = _("Set Gateway")
url = "horizon:project:routers:setgateway"
classes = ("ajax-modal",)
icon = "camera"
policy_rules = (("network", "update_router"),)
def allowed(self, request, datum=None):
if datum.external_gateway_info:
return False
return True
class ClearGateway(policy.PolicyTargetMixin, tables.BatchAction):
help_text = _("You may reset the gateway later by using the"
" set gateway action, but the gateway IP may change.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Clear Gateway",
u"Clear Gateways",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Cleared Gateway",
u"Cleared Gateways",
count
)
name = "cleargateway"
classes = ('btn-danger', 'btn-cleargateway')
redirect_url = "horizon:project:routers:index"
policy_rules = (("network", "update_router"),)
def action(self, request, obj_id):
obj = self.table.get_object_by_id(obj_id)
name = self.table.get_object_display(obj)
try:
api.neutron.router_remove_gateway(request, obj_id)
except Exception as e:
msg = (_('Unable to clear gateway for router '
'"%(name)s": "%(msg)s"')
% {"name": name, "msg": e})
LOG.info(msg)
redirect = reverse(self.redirect_url)
exceptions.handle(request, msg, redirect=redirect)
def get_success_url(self, request):
return reverse(self.redirect_url)
def allowed(self, request, datum=None):
if datum.external_gateway_info:
return True
return False
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, router_id):
router = api.neutron.router_get(request, router_id)
return router
def get_external_network(router):
if router.external_gateway_info:
return router.external_gateway_info['network']
else:
return _("-")
class RoutersFilterAction(tables.FilterAction):
def filter(self, table, routers, filter_string):
"""Naive case-insensitive search."""
query = filter_string.lower()
return [router for router in routers
if query in router.name.lower()]
class RoutersTable(tables.DataTable):
STATUS_DISPLAY_CHOICES = (
("active", pgettext_lazy("current status of router", u"Active")),
("error", pgettext_lazy("current status of router", u"Error")),
)
ADMIN_STATE_DISPLAY_CHOICES = (
("UP", pgettext_lazy("Admin state of a Router", u"UP")),
("DOWN", pgettext_lazy("Admin state of a Router", u"DOWN")),
)
name = tables.Column("name",
verbose_name=_("Name"),
link="horizon:project:routers:detail")
status = tables.Column("status",
verbose_name=_("Status"),
status=True,
display_choices=STATUS_DISPLAY_CHOICES)
distributed = tables.Column("distributed",
filters=(filters.yesno, filters.capfirst),
verbose_name=_("Distributed"))
ha = tables.Column("ha",
filters=(filters.yesno, filters.capfirst),
# Translators: High Availability mode of Neutron router
verbose_name=_("HA mode"))
ext_net = tables.Column(get_external_network,
verbose_name=_("External Network"))
admin_state = tables.Column("admin_state",
verbose_name=_("Admin State"),
display_choices=ADMIN_STATE_DISPLAY_CHOICES)
def __init__(self, request, data=None, needs_form_wrapper=None, **kwargs):
super(RoutersTable, self).__init__(
request,
data=data,
needs_form_wrapper=needs_form_wrapper,
**kwargs)
if not api.neutron.get_feature_permission(request, "dvr", "get"):
del self.columns["distributed"]
if not api.neutron.get_feature_permission(request, "l3-ha", "get"):
del self.columns["ha"]
def get_object_display(self, obj):
return obj.name
class Meta(object):
name = "Routers"
verbose_name = _("Routers")
status_columns = ["status"]
row_class = UpdateRow
table_actions = (CreateRouter, DeleteRouter,
RoutersFilterAction)
row_actions = (SetGateway, ClearGateway, EditRouter, DeleteRouter)
| apache-2.0 |
pedro2d10/SickRage-FR | lib/hachoir_core/field/fake_array.py | 95 | 2294 | import itertools
from hachoir_core.field import MissingField
class FakeArray:
"""
Simulate an array for GenericFieldSet.array(): fielset.array("item")[0] is
equivalent to fielset.array("item[0]").
It's possible to iterate over the items using::
for element in fieldset.array("item"):
...
And to get array size using len(fieldset.array("item")).
"""
def __init__(self, fieldset, name):
pos = name.rfind("/")
if pos != -1:
self.fieldset = fieldset[name[:pos]]
self.name = name[pos+1:]
else:
self.fieldset = fieldset
self.name = name
self._format = "%s[%%u]" % self.name
self._cache = {}
self._known_size = False
self._max_index = -1
def __nonzero__(self):
"Is the array empty or not?"
if self._cache:
return True
else:
return (0 in self)
def __len__(self):
"Number of fields in the array"
total = self._max_index+1
if not self._known_size:
for index in itertools.count(total):
try:
field = self[index]
total += 1
except MissingField:
break
return total
def __contains__(self, index):
try:
field = self[index]
return True
except MissingField:
return False
def __getitem__(self, index):
"""
Get a field of the array. Returns a field, or raise MissingField
exception if the field doesn't exist.
"""
try:
value = self._cache[index]
except KeyError:
try:
value = self.fieldset[self._format % index]
except MissingField:
self._known_size = True
raise
self._cache[index] = value
self._max_index = max(index, self._max_index)
return value
def __iter__(self):
"""
Iterate in the fields in their index order: field[0], field[1], ...
"""
for index in itertools.count(0):
try:
yield self[index]
except MissingField:
raise StopIteration()
| gpl-3.0 |
chris-wood/python-mingus | mingus/core/progressions.py | 10 | 15752 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# mingus - Music theory Python package, progressions module.
# Copyright (C) 2008-2009, Bart Spaans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Module for dealing with progressions.
In music and music theory you often deal with sequencesi of chords. These
chord sequences are called progressions and are often written down using
roman numerals. In this system the 'I' refers to the first natural triad in
a key, the II to the second, etc. We can add prefixes and suffixes to denote
more complex progressions, like: #V7, bIIdim7, etc.
This module provides methods which can convert progressions to chords and
vice versa.
"""
import notes
import chords
import intervals
numerals = ['I', 'II', 'III', 'IV', 'V', 'VI', 'VII']
numeral_intervals = [0, 2, 4, 5, 7, 9, 11]
def to_chords(progression, key='C'):
"""Convert a list of chord functions or a string to a list of chords.
Examples:
>>> to_chords(['I', 'V7'])
[['C', 'E', 'G'], ['G', 'B', 'D', 'F']]
>>> to_chords('I7')
[['C', 'E', 'G', 'B']]
Any number of accidentals can be used as prefix to augment or diminish;
for example: bIV or #I.
All the chord abbreviations in the chord module can be used as suffixes;
for example: Im7, IVdim7, etc.
You can combine prefixes and suffixes to manage complex progressions:
#vii7, #iidim7, iii7, etc.
Using 7 as suffix is ambiguous, since it is classicly used to denote the
seventh chord when talking about progressions instead of just the
dominant seventh chord. We have taken the classic route; I7 will get
you a major seventh chord. If you specifically want a dominanth seventh,
use Idom7.
"""
if type(progression) == str:
progression = [progression]
result = []
for chord in progression:
# strip preceding accidentals from the string
(roman_numeral, acc, suffix) = parse_string(chord)
# There is no roman numeral parsing, just a simple check. Sorry to
# disappoint. warning Should throw exception
if roman_numeral not in numerals:
return []
# These suffixes don't need any post processing
if suffix == '7' or suffix == '':
roman_numeral += suffix
# ahh Python. Everything is a dict.
r = chords.__dict__[roman_numeral](key)
else:
r = chords.__dict__[roman_numeral](key)
r = chords.chord_shorthand[suffix](r[0])
while acc < 0:
r = map(notes.diminish, r)
acc += 1
while acc > 0:
r = map(notes.augment, r)
acc -= 1
result.append(r)
return result
def determine(chord, key, shorthand=False):
"""Determine the harmonic function of chord in key.
This function can also deal with lists of chords.
Examples:
>>> determine(['C', 'E', 'G'], 'C')
['tonic']
>>> determine(['G', 'B', 'D'], 'C')
['dominant']
>>> determine(['G', 'B', 'D', 'F'], 'C', True)
['V7']
>>> determine([['C', 'E', 'G'], ['G', 'B', 'D']], 'C', True)
[['I'], ['V']]
"""
result = []
# Handle lists of chords
if type(chord[0]) == list:
for c in chord:
result.append(determine(c, key, shorthand))
return result
func_dict = {
'I': 'tonic',
'ii': 'supertonic',
'iii': 'mediant',
'IV': 'subdominant',
'V': 'dominant',
'vi': 'submediant',
'vii': 'subtonic',
}
expected_chord = [
['I', 'M', 'M7'],
['ii', 'm', 'm7'],
['iii', 'm', 'm7'],
['IV', 'M', 'M7'],
['V', 'M', '7'],
['vi', 'm', 'm7'],
['vii', 'dim', 'm7b5'],
]
type_of_chord = chords.determine(chord, True, False, True)
for chord in type_of_chord:
name = chord[0]
# Get accidentals
a = 1
for n in chord[1:]:
if n == 'b':
name += 'b'
elif n == '#':
name += '#'
else:
break
a += 1
chord_type = chord[a:]
# Determine chord function
(interval_type, interval) = intervals.determine(key, name).split(' ')
if interval == 'unison':
func = 'I'
elif interval == 'second':
func = 'ii'
elif interval == 'third':
func = 'iii'
elif interval == 'fourth':
func = 'IV'
elif interval == 'fifth':
func = 'V'
elif interval == 'sixth':
func = 'vi'
elif interval == 'seventh':
func = 'vii'
# Check whether the chord is altered or not
for x in expected_chord:
if x[0] == func:
# Triads
if chord_type == x[1]:
if not shorthand:
func = func_dict[func]
elif chord_type == x[2]:
# Sevenths
if shorthand:
func += '7'
else:
func = func_dict[func] + ' seventh'
else:
# Other
if shorthand:
func += chord_type
else:
func = func_dict[func]\
+ chords.chord_shorthand_meaning[chord_type]
# Handle b's and #'s (for instance Dbm in key C is bII)
if shorthand:
if interval_type == 'minor':
func = 'b' + func
elif interval_type == 'augmented':
func = '#' + func
elif interval_type == 'diminished':
func = 'bb' + func
else:
if interval_type == 'minor':
func = 'minor ' + func
elif interval_type == 'augmented':
func = 'augmented ' + func
elif interval_type == 'diminished':
func = 'diminished ' + func
# Add to results
result.append(func)
return result
def parse_string(progression):
"""Return a tuple (roman numeral, accidentals, chord suffix).
Examples:
>>> parse_string('I')
('I', 0, '')
>>> parse_string('bIM7')
('I', -1, 'M7')
"""
acc = 0
roman_numeral = ''
suffix = ''
i = 0
for c in progression:
if c == '#':
acc += 1
elif c == 'b':
acc -= 1
elif c.upper() == 'I' or c.upper() == 'V':
roman_numeral += c.upper()
else:
break
i += 1
suffix = progression[i:]
return (roman_numeral, acc, suffix)
def tuple_to_string(prog_tuple):
"""Create a string from tuples returned by parse_string."""
(roman, acc, suff) = prog_tuple
if acc > 6:
acc = 0 - acc % 6
elif acc < -6:
acc = acc % 6
while acc < 0:
roman = 'b' + roman
acc += 1
while acc > 0:
roman = '#' + roman
acc -= 1
return roman + suff
def substitute_harmonic(progression, substitute_index, ignore_suffix=False):
"""Do simple harmonic substitutions. Return a list of possible substitions
for progression[substitute_index].
If ignore_suffix is set to True the suffix of the chord being
substituted will be ignored. Otherwise only progressions without a
suffix, or with suffix '7' will be substituted.
The following table is used to convert progressions:
|| I || III ||
|| I || VI ||
|| IV || II ||
|| IV || VI ||
|| V || VII ||
"""
simple_substitutions = [('I', 'III'), ('I', 'VI'), ('IV', 'II'),
('IV', 'VI'), ('V', 'VII')]
res = []
(roman, acc, suff) = parse_string(progression[substitute_index])
if suff == '' or suff == '7' or ignore_suffix:
for subs in simple_substitutions:
r = subs[1] if roman == subs[0] else None
if r == None:
r = subs[0] if roman == subs[1] else None
if r != None:
suff = suff if suff == '7' else ''
res.append(tuple_to_string((r, acc, suff)))
return res
def substitute_minor_for_major(progression, substitute_index,
ignore_suffix=False):
"""Substitute minor chords for its major equivalent.
'm' and 'm7' suffixes recognized, and ['II', 'III', 'VI'] if there is no
suffix.
Examples:
>>> substitute_minor_for_major(['VI'], 0)
['I']
>>> substitute_minor_for_major(['Vm'], 0)
['bVIIM']
>>> substitute_minor_for_major(['VIm7'], 0)
['IM7']
"""
(roman, acc, suff) = parse_string(progression[substitute_index])
res = []
# Minor to major substitution
if suff == 'm' or suff == 'm7' or suff == '' and roman in ['II', 'III', 'VI'
] or ignore_suffix:
n = skip(roman, 2)
a = interval_diff(roman, n, 3) + acc
if suff == 'm' or ignore_suffix:
res.append(tuple_to_string((n, a, 'M')))
elif suff == 'm7' or ignore_suffix:
res.append(tuple_to_string((n, a, 'M7')))
elif suff == '' or ignore_suffix:
res.append(tuple_to_string((n, a, '')))
return res
def substitute_major_for_minor(progression, substitute_index,
ignore_suffix=False):
"""Substitute major chords for their minor equivalent.
'M' and 'M7' suffixes recognized, and ['I', 'IV', 'V'] if there is no
suffix.
Examples:
>>> substitute_major_for_minor(['I'], 0)
['VI']
>>> substitute_major_for_minor(['VM7'], 0)
['IIIm7']
"""
(roman, acc, suff) = parse_string(progression[substitute_index])
res = []
# Major to minor substitution
if (suff == 'M' or suff == 'M7' or suff == '' and
roman in ['I', 'IV', 'V'] or ignore_suffix):
n = skip(roman, 5)
a = interval_diff(roman, n, 9) + acc
if suff == 'M' or ignore_suffix:
res.append(tuple_to_string((n, a, 'm')))
elif suff == 'M7' or ignore_suffix:
res.append(tuple_to_string((n, a, 'm7')))
elif suff == '' or ignore_suffix:
res.append(tuple_to_string((n, a, '')))
return res
def substitute_diminished_for_diminished(progression, substitute_index,
ignore_suffix=False):
"""Substitute a diminished chord for another diminished chord.
'dim' and 'dim7' suffixes recognized, and 'VI' if there is no suffix.
Example:
>>> substitute_diminished_for_diminished(['VII'], 0)
['IIdim', 'bIVdim', 'bbVIdim']
"""
(roman, acc, suff) = parse_string(progression[substitute_index])
res = []
# Diminished progressions
if suff == 'dim7' or suff == 'dim' or suff == '' and roman in ['VII']\
or ignore_suffix:
if suff == '':
suff = 'dim'
# Add diminished chord
last = roman
for x in range(3):
next = skip(last, 2)
acc += interval_diff(last, next, 3)
res.append(tuple_to_string((next, acc, suff)))
last = next
return res
def substitute_diminished_for_dominant(progression, substitute_index,
ignore_suffix=False):
(roman, acc, suff) = parse_string(progression[substitute_index])
res = []
# Diminished progressions
if (suff == 'dim7' or suff == 'dim' or suff == '' and
roman in ['VII'] or ignore_suffix):
if suff == '':
suff = 'dim'
# Add diminished chord
last = roman
for x in range(4):
next = skip(last, 2)
dom = skip(last, 5)
a = interval_diff(last, dom, 8) + acc
res.append(tuple_to_string((dom, a, 'dom7')))
last = next
return res
def substitute(progression, substitute_index, depth=0):
"""Give a list of possible substitutions for progression[substitute_index].
If depth > 0 the substitutions of each result will be recursively added
as well.
Example:
>>> substitute(['I', 'IV', 'V', 'I'], 0)
['III', 'III7', 'VI', 'VI7', 'I7']
"""
res = []
simple_substitutions = [
('I', 'III'),
('I', 'VI'),
('IV', 'II'),
('IV', 'VI'),
('V', 'VII'),
('V', 'VIIdim7'),
('V', 'IIdim7'),
('V', 'IVdim7'),
('V', 'bVIIdim7'),
]
p = progression[substitute_index]
(roman, acc, suff) = parse_string(p)
# Do the simple harmonic substitutions
if suff == '' or suff == '7':
for subs in simple_substitutions:
r = None
if roman == subs[0]:
r = subs[1]
elif roman == subs[1]:
r = subs[0]
if r != None:
res.append(tuple_to_string((r, acc, '')))
# Add seventh or triad depending on r
if r[-1] != '7':
res.append(tuple_to_string((r, acc, '7')))
else:
res.append(tuple_to_string((r[:-1], acc, '')))
if suff == '' or suff == 'M' or suff == 'm':
res.append(tuple_to_string((roman, acc, suff + '7')))
if suff == 'm' or suff == 'm7':
n = skip(roman, 2)
a = interval_diff(roman, n, 3) + acc
res.append(tuple_to_string((n, a, 'M')))
res.append(tuple_to_string((n, a, 'M7')))
# Major to minor substitution
if suff == 'M' or suff == 'M7':
n = skip(roman, 5)
a = interval_diff(roman, n, 9) + acc
res.append(tuple_to_string((n, a, 'm')))
res.append(tuple_to_string((n, a, 'm7')))
if suff == 'dim7' or suff == 'dim':
# Add the corresponding dominant seventh
res.append(tuple_to_string((skip(roman, 5), acc, 'dom7')))
n = skip(roman, 1)
res.append(tuple_to_string((n, acc + interval_diff(roman, n, 1),
'dom7')))
# Add diminished chord
last = roman
for x in range(4):
next = skip(last, 2)
acc += interval_diff(last, next, 3)
res.append(tuple_to_string((next, acc, suff)))
last = next
res2 = []
if depth > 0:
for x in res:
new_progr = progression
new_progr[substitute_index] = x
res2 += substitute(new_progr, substitute_index, depth - 1)
return res + res2
def interval_diff(progression1, progression2, interval):
"""Return the number of half steps progression2 needs to be diminished or
augmented until the interval between progression1 and progression2 is
interval."""
i = numeral_intervals[numerals.index(progression1)]
j = numeral_intervals[numerals.index(progression2)]
acc = 0
if j < i:
j += 12
while j - i > interval:
acc -= 1
j -= 1
while j - i < interval:
acc += 1
j += 1
return acc
def skip(roman_numeral, skip=1):
"""Skip the given places to the next roman numeral.
Examples:
>>> skip('I')
'II'
>>> skip('VII')
'I'
>>> skip('I', 2)
'III'
"""
i = numerals.index(roman_numeral) + skip
return numerals[i % 7]
| gpl-3.0 |
cognitiveclass/edx-platform | common/djangoapps/track/tests/test_contexts.py | 114 | 2108 | # pylint: disable=missing-docstring
import ddt
from unittest import TestCase
from track import contexts
@ddt.ddt
class TestContexts(TestCase):
COURSE_ID = 'test/course_name/course_run'
SPLIT_COURSE_ID = 'course-v1:test+course_name+course_run'
ORG_ID = 'test'
@ddt.data(
(COURSE_ID, ''),
(COURSE_ID, '/more/stuff'),
(COURSE_ID, '?format=json'),
(SPLIT_COURSE_ID, ''),
(SPLIT_COURSE_ID, '/more/stuff'),
(SPLIT_COURSE_ID, '?format=json')
)
@ddt.unpack
def test_course_id_from_url(self, course_id, postfix):
url = 'http://foo.bar.com/courses/{}{}'.format(course_id, postfix)
self.assert_parses_course_id_from_url(url, course_id)
def assert_parses_course_id_from_url(self, format_string, course_id):
self.assertEquals(
contexts.course_context_from_url(format_string.format(course_id=course_id)),
{
'course_id': course_id,
'org_id': self.ORG_ID
}
)
def test_no_course_id_in_url(self):
self.assert_empty_context_for_url('http://foo.bar.com/dashboard')
def assert_empty_context_for_url(self, url):
self.assertEquals(
contexts.course_context_from_url(url),
{
'course_id': '',
'org_id': ''
}
)
@ddt.data('', '/', '/?', '?format=json')
def test_malformed_course_id(self, postfix):
self.assert_empty_context_for_url('http://foo.bar.com/courses/test/course_name{}'.format(postfix))
@ddt.data(
(COURSE_ID, ''),
(COURSE_ID, '/more/stuff'),
(COURSE_ID, '?format=json'),
(SPLIT_COURSE_ID, ''),
(SPLIT_COURSE_ID, '/more/stuff'),
(SPLIT_COURSE_ID, '?format=json')
)
@ddt.unpack
def test_course_id_later_in_url(self, course_id, postfix):
url = 'http://foo.bar.com/x/y/z/courses/{}{}'.format(course_id, postfix)
self.assert_parses_course_id_from_url(url, course_id)
def test_no_url(self):
self.assert_empty_context_for_url(None)
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.