repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
ashwinr64/android_kernel_motorola_msm8974 | scripts/build-all.py | 1474 | 10189 | #! /usr/bin/env python
# Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import re
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules", "dtbs"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'KCONFIG_NOTIMESTAMP': 'true' })
make_env.setdefault('CROSS_COMPILE', 'arm-none-linux-gnueabi-')
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
arch_pats = (
r'[fm]sm[0-9]*_defconfig',
r'apq*_defconfig',
r'qsd*_defconfig',
r'msmkrypton*_defconfig',
)
for p in arch_pats:
for n in glob.glob('arch/arm/configs/' + p):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
staging_dir = 'install_staging'
modi_dir = '%s' % staging_dir
hdri_dir = '%s/usr' % staging_dir
shutil.rmtree(os.path.join(dest_dir, staging_dir), ignore_errors=True)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
# Build targets can be dependent upon the completion of previous
# build targets, so build them one at a time.
cmd_line = ['make',
'INSTALL_HDR_PATH=%s' % hdri_dir,
'INSTALL_MOD_PATH=%s' % modi_dir,
'O=%s' % dest_dir]
build_targets = []
for c in make_command:
if re.match(r'^-{1,2}\w', c):
cmd_line.append(c)
else:
build_targets.append(c)
for t in build_targets:
build = Builder(log_name)
result = build.run(cmd_line + [t])
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" %
(target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
| gpl-2.0 |
sdadia/helper_functions | setup.py | 1 | 1455 | from distutils.core import setup
setup(
name = 'helper_functions',
version = '2.0.10',
py_modules = ['helper_functions'],
author = 'Sahil Dadia',
author_email = 'dadiasahil94@yahoo.in',
url = 'https://github.com/sdadia/helper_functions.git',# use the URL to the github repo
description = 'A simple module of simple function, for opencv and python3',
license = 'MIT',
keywords = ['opencv', 'helper', 'scripts'], # arbitrary keywords
classifiers = [
'Topic :: Utilities',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Natural Language :: English'
],
install_requires= [
'numpy',
'scipy',
'sklearn',
'matplotlib',
'imutils',
'natsort',
],
)
| mit |
WellConstructed/WellCom | wellcom_app/migrations/0015_auto_20160921_1419.py | 1 | 3379 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-21 18:19
from __future__ import unicode_literals
from django.db import migrations, models
well_test_data = [["id","date","pH","turbidity_ntu","app_true_colour_hz","conductivity_uscm","temperature_c","total_iron","calcium","magnesium","chloride","sulphate","suspended_solids","total_dissolved_solids","total_solids","total_alkalinity","total_hardness","calcium_hardness","magnesium_hardness","copper","nitrite_nitrogen","nitrate_nitrogen","fluoride","mpn_index_tc_per_deciliter","ammonia_nitrogen","manganese","aluminum","well_id"],
[1,"2016-03-08",7.1,0.99,6,443,33,0.07,56,85,19,15,19,222,241,54,490,140,350,0.18,0.009,0.3,0.04,0,0.32,0.061,0.018,1],
[2,"2016-05-09",7,1.88,8,338,34,0.07,48,4.3,21,24,14,169,183,66,138,120,18,0.42,0.046,4.9,0.92,0,0.5,0.37,0.173,2],
[3,"2016-03-14",6.9,9.81,11,146,29,0.24,7.2,5.3,9,5,35,72,107,22,40,18,22,0.27,0.025,3.2,1.3,0,0.31,0.059,0.269,4],
[4,"2015-11-09",7,3.92,6,413,31,0.13,80,104,13,3,12,206,218,96,630,200,430,0.18,0.088,2,0.27,0,0.17,0.02,0.172,3],
[5,"2016-03-09",6.9,0.71,5,359,33,0.01,40,7.7,24,5,3,180,183,28,132,100,32,0.11,0.003,1.7,1.5,0,0.07,0.011,0.347,7]]
def add_well_test_data(apps, schema_editor):
WaterTest = apps.get_model("wellcom_app", "WaterTest")
for i in range(1, len(well_test_data)):
test = {}
for j in range(len(well_test_data[0])):
test[well_test_data[0][j]] = well_test_data[i][j]
well_test = WaterTest(
id=test["id"], date=test["date"], pH=test["pH"],
turbidity_ntu=test["turbidity_ntu"],
app_true_colour_hz=test["app_true_colour_hz"],
conductivity_uscm=test["conductivity_uscm"],
temperature_c=test["temperature_c"],
total_iron=test["total_iron"],
calcium=test["calcium"],
magnesium=test["magnesium"],
chloride=test["chloride"],
sulphate=test["sulphate"],
suspended_solids=test["suspended_solids"],
total_dissolved_solids=test["total_dissolved_solids"],
total_solids=test["total_solids"],
total_alkalinity=test["total_alkalinity"],
total_hardness=test["total_hardness"],
calcium_hardness=test["calcium_hardness"],
magnesium_hardness=test["magnesium_hardness"],
copper=test["copper"],
nitrite_nitrogen=test["nitrite_nitrogen"],
nitrate_nitrogen=test["nitrate_nitrogen"],
fluoride=test["fluoride"],
mpn_index_tc_per_deciliter=test[
"mpn_index_tc_per_deciliter"],
ammonia_nitrogen=test["ammonia_nitrogen"],
manganese=test["manganese"],
aluminum=test["aluminum"],
well_id=test["well_id"])
well_test.save()
class Migration(migrations.Migration):
dependencies = [
('wellcom_app', '0014_auto_20160921_1001'),
]
operations = [
migrations.RunPython(add_well_test_data)
]
| mit |
caronc/nzbget-subliminal | Subliminal/pyasn1/type/namedval.py | 200 | 1605 | # ASN.1 named integers
from pyasn1 import error
__all__ = [ 'NamedValues' ]
class NamedValues:
def __init__(self, *namedValues):
self.nameToValIdx = {}; self.valToNameIdx = {}
self.namedValues = ()
automaticVal = 1
for namedValue in namedValues:
if isinstance(namedValue, tuple):
name, val = namedValue
else:
name = namedValue
val = automaticVal
if name in self.nameToValIdx:
raise error.PyAsn1Error('Duplicate name %s' % (name,))
self.nameToValIdx[name] = val
if val in self.valToNameIdx:
raise error.PyAsn1Error('Duplicate value %s=%s' % (name, val))
self.valToNameIdx[val] = name
self.namedValues = self.namedValues + ((name, val),)
automaticVal = automaticVal + 1
def __str__(self): return str(self.namedValues)
def getName(self, value):
if value in self.valToNameIdx:
return self.valToNameIdx[value]
def getValue(self, name):
if name in self.nameToValIdx:
return self.nameToValIdx[name]
def __getitem__(self, i): return self.namedValues[i]
def __len__(self): return len(self.namedValues)
def __add__(self, namedValues):
return self.__class__(*self.namedValues + namedValues)
def __radd__(self, namedValues):
return self.__class__(*namedValues + tuple(self))
def clone(self, *namedValues):
return self.__class__(*tuple(self) + namedValues)
# XXX clone/subtype?
| gpl-3.0 |
jborlik/AdventOfCode2016 | day20.py | 1 | 1269 | # -*- coding: utf-8 -*-
with open('day20.dat') as datafile:
blacklist = [list(map(int,x.split('-'))) for x in datafile.readlines()]
blacklist.sort(key=lambda arr: arr[0])
doPartOne = False
if doPartOne:
iactiveblacklist = 0
firstip = 0
numclear = 0
for iip in range(0,4294967295+1):
if iip > blacklist[iactiveblacklist][1]:
# we are past the bounds of this blacklist item
iactiveblacklist += 1
# are we less than the lower bound of the next blacklist? If so, GREAT!
if iip < blacklist[iactiveblacklist][0]:
numclear += 1
if firstip == 0:
firstip = iip
break
print("CLEAR IP:", firstip)
# While it is possible to do Part 2 in the method above (basically,
# removing the break), it is very time consuming. It might be better
# to just walk through the backlists
numclear = 0
currentmaxendbl = 0
for iabl in range(1,len(blacklist)):
if blacklist[iabl-1][1] > currentmaxendbl:
currentmaxendbl = blacklist[iabl-1][1]
numclear += max(0, blacklist[iabl][0]-currentmaxendbl-1)
# last one
numclear += max(0, 4294967295-currentmaxendbl)
print("Num clear: ",numclear)
| gpl-3.0 |
zzpwelkin/python-oauth2 | oauth2/clients/imap.py | 885 | 1685 | """
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import oauth2
import imaplib
class IMAP4_SSL(imaplib.IMAP4_SSL):
"""IMAP wrapper for imaplib.IMAP4_SSL that implements XOAUTH."""
def authenticate(self, url, consumer, token):
if consumer is not None and not isinstance(consumer, oauth2.Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, oauth2.Token):
raise ValueError("Invalid token.")
imaplib.IMAP4_SSL.authenticate(self, 'XOAUTH',
lambda x: oauth2.build_xoauth_string(url, consumer, token))
| mit |
AstrorEnales/EyeCenterDetection | test/stitch.py | 1 | 1454 | #!/usr/bin/env python
from PIL import Image, ImageDraw, ImageFont
import os
font = ImageFont.truetype('C:/Windows/Fonts/arialbd.ttf', 16)
def draw_label(draw, position, text):
draw.text((position[0] + 1, position[1] + 1), text, (255, 255, 255), font=font)
draw.text(position, text, (0, 0, 0), font=font)
# BioID_0000_ascend_result
for i in range(0, 1521):
im_ascend = Image.open('bioid/BioID_%04i_ascend_result.png' % i)
im_ascendfit = Image.open('bioid/BioID_%04i_ascendfit_result.png' % i)
im_paul = Image.open('bioid/BioID_%04i_paul_result.png' % i)
im_naive = Image.open('bioid/BioID_%04i_naive_result.png' % i)
im_evol = Image.open('bioid/BioID_%04i_evol_result.png' % i)
im = Image.new('RGB', (im_naive.size[0] * 2, im_naive.size[1] * 3))
draw = ImageDraw.Draw(im)
im.paste(im_ascend, (0, 0))
draw_label(draw, (5, 5), 'Ascend')
im.paste(im_paul, (im_naive.size[0], 0))
draw_label(draw, (im_naive.size[0] + 5, 5), 'Paul')
im.paste(im_ascendfit, (0, im_naive.size[1]))
draw_label(draw, (5, im_naive.size[1] + 5), 'Ascendfit')
im.paste(im_naive, (im_naive.size[0], im_naive.size[1]))
draw_label(draw, (im_naive.size[0] + 5, im_naive.size[1] + 5), 'Naive')
im.paste(im_evol, (0, im_naive.size[1] * 2))
draw_label(draw, (5, im_naive.size[1] * 2 + 5), 'Evol')
im.save('movie/%04i.png' % i, 'PNG')
| mit |
wood-galaxy/FreeCAD | src/Mod/Arch/importSH3D.py | 1 | 9011 | #***************************************************************************
#* *
#* Copyright (c) 2016 Yorik van Havre <yorik@uncreated.net> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
__title__ = "FreeCAD SweetHome3D importer"
__author__ = "Yorik van Havre"
__url__ = "http://www.freecadweb.org"
import os,zipfile,xml.sax,FreeCAD,Part,Draft,Arch,Mesh,tempfile,math,Sketcher
DEBUG = True
if open.__module__ == '__builtin__':
pyopen = open # because we'll redefine open below
def open(filename):
"called when freecad wants to open a file"
docname = (os.path.splitext(os.path.basename(filename))[0]).encode("utf8")
doc = FreeCAD.newDocument(docname)
doc.Label = decode(docname)
FreeCAD.ActiveDocument = doc
read(filename)
return doc
def insert(filename,docname):
"called when freecad wants to import a file"
try:
doc = FreeCAD.getDocument(docname)
except NameError:
doc = FreeCAD.newDocument(docname)
FreeCAD.ActiveDocument = doc
read(filename)
return doc
def decode(name):
"decodes encoded strings"
try:
decodedName = (name.decode("utf8"))
except UnicodeDecodeError:
try:
decodedName = (name.decode("latin1"))
except UnicodeDecodeError:
FreeCAD.Console.PrintError(translate("Arch","Error: Couldn't determine character encoding"))
decodedName = name
return decodedName
def read(filename):
"reads the file and creates objects in the active document"
z = zipfile.ZipFile(filename)
homexml = z.read("Home.xml")
handler = SH3DHandler(z)
xml.sax.parseString(homexml,handler)
FreeCAD.ActiveDocument.recompute()
if not handler.makeIndividualWalls:
delete = []
walls = []
for k,lines in handler.lines.items():
sk = FreeCAD.ActiveDocument.addObject("Sketcher::SketchObject","Walls_trace")
for l in lines:
for edge in l.Shape.Edges:
sk.addGeometry(edge.Curve)
delete.append(l.Name)
FreeCAD.ActiveDocument.recompute()
k = k.split(";")
walls.append(Arch.makeWall(baseobj=sk,width=float(k[0]),height=float(k[1])))
for d in delete:
FreeCAD.ActiveDocument.removeObject(d)
w = walls.pop()
w.Additions = walls
w.Subtractions = handler.windows
g = FreeCAD.ActiveDocument.addObject("App::DocumentObjectGroup","Furniture")
g.Group = handler.furniture
FreeCAD.ActiveDocument.recompute()
class SH3DHandler(xml.sax.ContentHandler):
def __init__(self,z):
self.makeIndividualWalls = False
self.z = z
self.windows = []
self.furniture = []
self.lines = {}
def startElement(self, tag, attributes):
if tag == "wall":
name = attributes["id"]
p1 = FreeCAD.Vector(float(attributes["xStart"])*10,float(attributes["yStart"])*10,0)
p2 = FreeCAD.Vector(float(attributes["xEnd"])*10,float(attributes["yEnd"])*10,0)
height = float(attributes["height"])*10
thickness = float(attributes["thickness"])*10
if DEBUG: print "Creating wall: ",name
line = Draft.makeLine(p1,p2)
if self.makeIndividualWalls:
wall = Arch.makeWall(baseobj=line,width=thickness,height=height,name=name)
wall.Label = name
else:
self.lines.setdefault(str(thickness)+";"+str(height),[]).append(line)
elif tag == "pieceOfFurniture":
name = attributes["name"]
data = self.z.read(attributes["model"])
th,tf = tempfile.mkstemp(suffix=".obj")
f = pyopen(tf,"wb")
f.write(data)
f.close()
os.close(th)
m = Mesh.read(tf)
fx = (float(attributes["width"])/100)/m.BoundBox.XLength
fy = (float(attributes["height"])/100)/m.BoundBox.YLength
fz = (float(attributes["depth"])/100)/m.BoundBox.ZLength
mat = FreeCAD.Matrix()
mat.scale(1000*fx,1000*fy,1000*fz)
mat.rotateX(math.pi/2)
mat.rotateZ(math.pi)
if DEBUG: print "Creating furniture: ",name
if "angle" in attributes.keys():
mat.rotateZ(float(attributes["angle"]))
m.transform(mat)
os.remove(tf)
p = m.BoundBox.Center.negative()
p = p.add(FreeCAD.Vector(float(attributes["x"])*10,float(attributes["y"])*10,0))
p = p.add(FreeCAD.Vector(0,0,m.BoundBox.Center.z-m.BoundBox.ZMin))
m.Placement.Base = p
obj = FreeCAD.ActiveDocument.addObject("Mesh::Feature",name)
obj.Mesh = m
self.furniture.append(obj)
elif tag == "doorOrWindow":
name = attributes["name"]
data = self.z.read(attributes["model"])
th,tf = tempfile.mkstemp(suffix=".obj")
f = pyopen(tf,"wb")
f.write(data)
f.close()
os.close(th)
m = Mesh.read(tf)
fx = (float(attributes["width"])/100)/m.BoundBox.XLength
fy = (float(attributes["height"])/100)/m.BoundBox.YLength
fz = (float(attributes["depth"])/100)/m.BoundBox.ZLength
mat = FreeCAD.Matrix()
mat.scale(1000*fx,1000*fy,1000*fz)
mat.rotateX(math.pi/2)
m.transform(mat)
b = m.BoundBox
v1 = FreeCAD.Vector(b.XMin,b.YMin-500,b.ZMin)
v2 = FreeCAD.Vector(b.XMax,b.YMin-500,b.ZMin)
v3 = FreeCAD.Vector(b.XMax,b.YMax+500,b.ZMin)
v4 = FreeCAD.Vector(b.XMin,b.YMax+500,b.ZMin)
sub = Part.makePolygon([v1,v2,v3,v4,v1])
sub = Part.Face(sub)
sub = sub.extrude(FreeCAD.Vector(0,0,b.ZLength))
os.remove(tf)
shape = Arch.getShapeFromMesh(m)
if not shape:
shape=Part.Shape()
shape.makeShapeFromMesh(m.Topology,0.100000)
shape = shape.removeSplitter()
if shape:
if DEBUG: print "Creating window: ",name
if "angle" in attributes.keys():
shape.rotate(shape.BoundBox.Center,FreeCAD.Vector(0,0,1),math.degrees(float(attributes["angle"])))
sub.rotate(shape.BoundBox.Center,FreeCAD.Vector(0,0,1),math.degrees(float(attributes["angle"])))
p = shape.BoundBox.Center.negative()
p = p.add(FreeCAD.Vector(float(attributes["x"])*10,float(attributes["y"])*10,0))
p = p.add(FreeCAD.Vector(0,0,shape.BoundBox.Center.z-shape.BoundBox.ZMin))
if "elevation" in attributes.keys():
p = p.add(FreeCAD.Vector(0,0,float(attributes["elevation"])*10))
shape.translate(p)
sub.translate(p)
obj = FreeCAD.ActiveDocument.addObject("Part::Feature",name+"_body")
obj.Shape = shape
subobj = FreeCAD.ActiveDocument.addObject("Part::Feature",name+"_sub")
subobj.Shape = sub
if FreeCAD.GuiUp:
subobj.ViewObject.hide()
win = Arch.makeWindow(baseobj=obj,name=name)
win.Label = name
win.Subvolume = subobj
self.windows.append(win)
else:
print("importSH3D: Error creating shape for door/window "+name)
| lgpl-2.1 |
koomik/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/YoutubeDL.py | 2 | 57294 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import collections
import datetime
import errno
import io
import json
import locale
import os
import platform
import re
import shutil
import subprocess
import socket
import sys
import time
import traceback
if os.name == 'nt':
import ctypes
from .utils import (
compat_cookiejar,
compat_http_client,
compat_str,
compat_urllib_error,
compat_urllib_request,
ContentTooShortError,
date_from_str,
DateRange,
determine_ext,
DownloadError,
encodeFilename,
ExtractorError,
format_bytes,
formatSeconds,
get_term_width,
locked_file,
make_HTTPS_handler,
MaxDownloadsReached,
PagedList,
PostProcessingError,
platform_name,
preferredencoding,
SameFileError,
sanitize_filename,
subtitles_filename,
takewhile_inclusive,
UnavailableVideoError,
url_basename,
write_json_file,
write_string,
YoutubeDLHandler,
prepend_extension,
)
from .extractor import get_info_extractor, gen_extractors
from .downloader import get_suitable_downloader
from .postprocessor import FFmpegMergerPP
from .version import __version__
class YoutubeDL(object):
"""YoutubeDL class.
YoutubeDL objects are the ones responsible of downloading the
actual video file and writing it to disk if the user has requested
it, among some other tasks. In most cases there should be one per
program. As, given a video URL, the downloader doesn't know how to
extract all the needed information, task that InfoExtractors do, it
has to pass the URL to one of them.
For this, YoutubeDL objects have a method that allows
InfoExtractors to be registered in a given order. When it is passed
a URL, the YoutubeDL object handles it to the first InfoExtractor it
finds that reports being able to handle it. The InfoExtractor extracts
all the information about the video or videos the URL refers to, and
YoutubeDL process the extracted information, possibly using a File
Downloader to download the video.
YoutubeDL objects accept a lot of parameters. In order not to saturate
the object constructor with arguments, it receives a dictionary of
options instead. These options are available through the params
attribute for the InfoExtractors to use. The YoutubeDL also
registers itself as the downloader in charge for the InfoExtractors
that are added to it, so this is a "mutual registration".
Available options:
username: Username for authentication purposes.
password: Password for authentication purposes.
videopassword: Password for acces a video.
usenetrc: Use netrc for authentication instead.
verbose: Print additional info to stdout.
quiet: Do not print messages to stdout.
no_warnings: Do not print out anything for warnings.
forceurl: Force printing final URL.
forcetitle: Force printing title.
forceid: Force printing ID.
forcethumbnail: Force printing thumbnail URL.
forcedescription: Force printing description.
forcefilename: Force printing final filename.
forceduration: Force printing duration.
forcejson: Force printing info_dict as JSON.
simulate: Do not download the video files.
format: Video format code.
format_limit: Highest quality format to try.
outtmpl: Template for output names.
restrictfilenames: Do not allow "&" and spaces in file names
ignoreerrors: Do not stop on download errors.
nooverwrites: Prevent overwriting files.
playliststart: Playlist item to start at.
playlistend: Playlist item to end at.
matchtitle: Download only matching titles.
rejecttitle: Reject downloads for matching titles.
logger: Log messages to a logging.Logger instance.
logtostderr: Log messages to stderr instead of stdout.
writedescription: Write the video description to a .description file
writeinfojson: Write the video description to a .info.json file
writeannotations: Write the video annotations to a .annotations.xml file
writethumbnail: Write the thumbnail image to a file
writesubtitles: Write the video subtitles to a file
writeautomaticsub: Write the automatic subtitles to a file
allsubtitles: Downloads all the subtitles of the video
(requires writesubtitles or writeautomaticsub)
listsubtitles: Lists all available subtitles for the video
subtitlesformat: Subtitle format [srt/sbv/vtt] (default=srt)
subtitleslangs: List of languages of the subtitles to download
keepvideo: Keep the video file after post-processing
daterange: A DateRange object, download only if the upload_date is in the range.
skip_download: Skip the actual download of the video file
cachedir: Location of the cache files in the filesystem.
None to disable filesystem cache.
noplaylist: Download single video instead of a playlist if in doubt.
age_limit: An integer representing the user's age in years.
Unsuitable videos for the given age are skipped.
min_views: An integer representing the minimum view count the video
must have in order to not be skipped.
Videos without view count information are always
downloaded. None for no limit.
max_views: An integer representing the maximum view count.
Videos that are more popular than that are not
downloaded.
Videos without view count information are always
downloaded. None for no limit.
download_archive: File name of a file where all downloads are recorded.
Videos already present in the file are not downloaded
again.
cookiefile: File name where cookies should be read from and dumped to.
nocheckcertificate:Do not verify SSL certificates
prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
At the moment, this is only supported by YouTube.
proxy: URL of the proxy server to use
socket_timeout: Time to wait for unresponsive hosts, in seconds
bidi_workaround: Work around buggy terminals without bidirectional text
support, using fridibi
debug_printtraffic:Print out sent and received HTTP traffic
include_ads: Download ads as well
default_search: Prepend this string if an input url is not valid.
'auto' for elaborate guessing
encoding: Use this encoding instead of the system-specified.
The following parameters are not used by YoutubeDL itself, they are used by
the FileDownloader:
nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
noresizebuffer, retries, continuedl, noprogress, consoletitle
The following options are used by the post processors:
prefer_ffmpeg: If True, use ffmpeg instead of avconv if both are available,
otherwise prefer avconv.
"""
params = None
_ies = []
_pps = []
_download_retcode = None
_num_downloads = None
_screen_file = None
def __init__(self, params=None):
"""Create a FileDownloader object with the given options."""
if params is None:
params = {}
self._ies = []
self._ies_instances = {}
self._pps = []
self._progress_hooks = []
self._download_retcode = 0
self._num_downloads = 0
self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
self._err_file = sys.stderr
self.params = params
if params.get('bidi_workaround', False):
try:
import pty
master, slave = pty.openpty()
width = get_term_width()
if width is None:
width_args = []
else:
width_args = ['-w', str(width)]
sp_kwargs = dict(
stdin=subprocess.PIPE,
stdout=slave,
stderr=self._err_file)
try:
self._output_process = subprocess.Popen(
['bidiv'] + width_args, **sp_kwargs
)
except OSError:
self._output_process = subprocess.Popen(
['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
self._output_channel = os.fdopen(master, 'rb')
except OSError as ose:
if ose.errno == 2:
self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.')
else:
raise
if (sys.version_info >= (3,) and sys.platform != 'win32' and
sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
and not params['restrictfilenames']):
# On Python 3, the Unicode filesystem API will throw errors (#1474)
self.report_warning(
'Assuming --restrict-filenames since file system encoding '
'cannot encode all charactes. '
'Set the LC_ALL environment variable to fix this.')
self.params['restrictfilenames'] = True
if '%(stitle)s' in self.params.get('outtmpl', ''):
self.report_warning('%(stitle)s is deprecated. Use the %(title)s and the --restrict-filenames flag(which also secures %(uploader)s et al) instead.')
self._setup_opener()
def add_info_extractor(self, ie):
"""Add an InfoExtractor object to the end of the list."""
self._ies.append(ie)
self._ies_instances[ie.ie_key()] = ie
ie.set_downloader(self)
def get_info_extractor(self, ie_key):
"""
Get an instance of an IE with name ie_key, it will try to get one from
the _ies list, if there's no instance it will create a new one and add
it to the extractor list.
"""
ie = self._ies_instances.get(ie_key)
if ie is None:
ie = get_info_extractor(ie_key)()
self.add_info_extractor(ie)
return ie
def add_default_info_extractors(self):
"""
Add the InfoExtractors returned by gen_extractors to the end of the list
"""
for ie in gen_extractors():
self.add_info_extractor(ie)
def add_post_processor(self, pp):
"""Add a PostProcessor object to the end of the chain."""
self._pps.append(pp)
pp.set_downloader(self)
def add_progress_hook(self, ph):
"""Add the progress hook (currently only for the file downloader)"""
self._progress_hooks.append(ph)
def _bidi_workaround(self, message):
if not hasattr(self, '_output_channel'):
return message
assert hasattr(self, '_output_process')
assert type(message) == type('')
line_count = message.count('\n') + 1
self._output_process.stdin.write((message + '\n').encode('utf-8'))
self._output_process.stdin.flush()
res = ''.join(self._output_channel.readline().decode('utf-8')
for _ in range(line_count))
return res[:-len('\n')]
def to_screen(self, message, skip_eol=False):
"""Print message to stdout if not in quiet mode."""
return self.to_stdout(message, skip_eol, check_quiet=True)
def _write_string(self, s, out=None):
write_string(s, out=out, encoding=self.params.get('encoding'))
def to_stdout(self, message, skip_eol=False, check_quiet=False):
"""Print message to stdout if not in quiet mode."""
if self.params.get('logger'):
self.params['logger'].debug(message)
elif not check_quiet or not self.params.get('quiet', False):
message = self._bidi_workaround(message)
terminator = ['\n', ''][skip_eol]
output = message + terminator
self._write_string(output, self._screen_file)
def to_stderr(self, message):
"""Print message to stderr."""
assert type(message) == type('')
if self.params.get('logger'):
self.params['logger'].error(message)
else:
message = self._bidi_workaround(message)
output = message + '\n'
self._write_string(output, self._err_file)
def to_console_title(self, message):
if not self.params.get('consoletitle', False):
return
if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow():
# c_wchar_p() might not be necessary if `message` is
# already of type unicode()
ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
elif 'TERM' in os.environ:
self._write_string('\033]0;%s\007' % message, self._screen_file)
def save_console_title(self):
if not self.params.get('consoletitle', False):
return
if 'TERM' in os.environ:
# Save the title on stack
self._write_string('\033[22;0t', self._screen_file)
def restore_console_title(self):
if not self.params.get('consoletitle', False):
return
if 'TERM' in os.environ:
# Restore the title from stack
self._write_string('\033[23;0t', self._screen_file)
def __enter__(self):
self.save_console_title()
return self
def __exit__(self, *args):
self.restore_console_title()
if self.params.get('cookiefile') is not None:
self.cookiejar.save()
def trouble(self, message=None, tb=None):
"""Determine action to take when a download problem appears.
Depending on if the downloader has been configured to ignore
download errors or not, this method may throw an exception or
not when errors are found, after printing the message.
tb, if given, is additional traceback information.
"""
if message is not None:
self.to_stderr(message)
if self.params.get('verbose'):
if tb is None:
if sys.exc_info()[0]: # if .trouble has been called from an except block
tb = ''
if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
tb += compat_str(traceback.format_exc())
else:
tb_data = traceback.format_list(traceback.extract_stack())
tb = ''.join(tb_data)
self.to_stderr(tb)
if not self.params.get('ignoreerrors', False):
if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
exc_info = sys.exc_info()[1].exc_info
else:
exc_info = sys.exc_info()
raise DownloadError(message, exc_info)
self._download_retcode = 1
def report_warning(self, message):
'''
Print the message to stderr, it will be prefixed with 'WARNING:'
If stderr is a tty file the 'WARNING:' will be colored
'''
if self.params.get('logger') is not None:
self.params['logger'].warning(message)
else:
if self.params.get('no_warnings'):
return
if self._err_file.isatty() and os.name != 'nt':
_msg_header = '\033[0;33mWARNING:\033[0m'
else:
_msg_header = 'WARNING:'
warning_message = '%s %s' % (_msg_header, message)
self.to_stderr(warning_message)
def report_error(self, message, tb=None):
'''
Do the same as trouble, but prefixes the message with 'ERROR:', colored
in red if stderr is a tty file.
'''
if self._err_file.isatty() and os.name != 'nt':
_msg_header = '\033[0;31mERROR:\033[0m'
else:
_msg_header = 'ERROR:'
error_message = '%s %s' % (_msg_header, message)
self.trouble(error_message, tb)
def report_file_already_downloaded(self, file_name):
"""Report file has already been fully downloaded."""
try:
self.to_screen('[download] %s has already been downloaded' % file_name)
except UnicodeEncodeError:
self.to_screen('[download] The file has already been downloaded')
def prepare_filename(self, info_dict):
"""Generate the output filename."""
try:
template_dict = dict(info_dict)
template_dict['epoch'] = int(time.time())
autonumber_size = self.params.get('autonumber_size')
if autonumber_size is None:
autonumber_size = 5
autonumber_templ = '%0' + str(autonumber_size) + 'd'
template_dict['autonumber'] = autonumber_templ % self._num_downloads
if template_dict.get('playlist_index') is not None:
template_dict['playlist_index'] = '%05d' % template_dict['playlist_index']
if template_dict.get('resolution') is None:
if template_dict.get('width') and template_dict.get('height'):
template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height'])
elif template_dict.get('height'):
template_dict['resolution'] = '%sp' % template_dict['height']
elif template_dict.get('width'):
template_dict['resolution'] = '?x%d' % template_dict['width']
sanitize = lambda k, v: sanitize_filename(
compat_str(v),
restricted=self.params.get('restrictfilenames'),
is_id=(k == 'id'))
template_dict = dict((k, sanitize(k, v))
for k, v in template_dict.items()
if v is not None)
template_dict = collections.defaultdict(lambda: 'NA', template_dict)
tmpl = os.path.expanduser(self.params['outtmpl'])
filename = tmpl % template_dict
return filename
except ValueError as err:
self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
return None
def _match_entry(self, info_dict):
""" Returns None iff the file should be downloaded """
video_title = info_dict.get('title', info_dict.get('id', 'video'))
if 'title' in info_dict:
# This can happen when we're just evaluating the playlist
title = info_dict['title']
matchtitle = self.params.get('matchtitle', False)
if matchtitle:
if not re.search(matchtitle, title, re.IGNORECASE):
return '"' + title + '" title did not match pattern "' + matchtitle + '"'
rejecttitle = self.params.get('rejecttitle', False)
if rejecttitle:
if re.search(rejecttitle, title, re.IGNORECASE):
return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
date = info_dict.get('upload_date', None)
if date is not None:
dateRange = self.params.get('daterange', DateRange())
if date not in dateRange:
return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
view_count = info_dict.get('view_count', None)
if view_count is not None:
min_views = self.params.get('min_views')
if min_views is not None and view_count < min_views:
return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
max_views = self.params.get('max_views')
if max_views is not None and view_count > max_views:
return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
age_limit = self.params.get('age_limit')
if age_limit is not None:
if age_limit < info_dict.get('age_limit', 0):
return 'Skipping "' + title + '" because it is age restricted'
if self.in_download_archive(info_dict):
return '%s has already been recorded in archive' % video_title
return None
@staticmethod
def add_extra_info(info_dict, extra_info):
'''Set the keys from extra_info in info dict if they are missing'''
for key, value in extra_info.items():
info_dict.setdefault(key, value)
def extract_info(self, url, download=True, ie_key=None, extra_info={},
process=True):
'''
Returns a list with a dictionary for each video we find.
If 'download', also downloads the videos.
extra_info is a dict containing the extra values to add to each result
'''
if ie_key:
ies = [self.get_info_extractor(ie_key)]
else:
ies = self._ies
for ie in ies:
if not ie.suitable(url):
continue
if not ie.working():
self.report_warning('The program functionality for this site has been marked as broken, '
'and will probably not work.')
try:
ie_result = ie.extract(url)
if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
break
if isinstance(ie_result, list):
# Backwards compatibility: old IE result format
ie_result = {
'_type': 'compat_list',
'entries': ie_result,
}
self.add_default_extra_info(ie_result, ie, url)
if process:
return self.process_ie_result(ie_result, download, extra_info)
else:
return ie_result
except ExtractorError as de: # An error we somewhat expected
self.report_error(compat_str(de), de.format_traceback())
break
except MaxDownloadsReached:
raise
except Exception as e:
if self.params.get('ignoreerrors', False):
self.report_error(compat_str(e), tb=compat_str(traceback.format_exc()))
break
else:
raise
else:
self.report_error('no suitable InfoExtractor for URL %s' % url)
def add_default_extra_info(self, ie_result, ie, url):
self.add_extra_info(ie_result, {
'extractor': ie.IE_NAME,
'webpage_url': url,
'webpage_url_basename': url_basename(url),
'extractor_key': ie.ie_key(),
})
def process_ie_result(self, ie_result, download=True, extra_info={}):
"""
Take the result of the ie(may be modified) and resolve all unresolved
references (URLs, playlist items).
It will also download the videos if 'download'.
Returns the resolved ie_result.
"""
result_type = ie_result.get('_type', 'video') # If not given we suppose it's a video, support the default old system
if result_type == 'video':
self.add_extra_info(ie_result, extra_info)
return self.process_video_result(ie_result, download=download)
elif result_type == 'url':
# We have to add extra_info to the results because it may be
# contained in a playlist
return self.extract_info(ie_result['url'],
download,
ie_key=ie_result.get('ie_key'),
extra_info=extra_info)
elif result_type == 'url_transparent':
# Use the information from the embedding page
info = self.extract_info(
ie_result['url'], ie_key=ie_result.get('ie_key'),
extra_info=extra_info, download=False, process=False)
def make_result(embedded_info):
new_result = ie_result.copy()
for f in ('_type', 'url', 'ext', 'player_url', 'formats',
'entries', 'ie_key', 'duration',
'subtitles', 'annotations', 'format',
'thumbnail', 'thumbnails'):
if f in new_result:
del new_result[f]
if f in embedded_info:
new_result[f] = embedded_info[f]
return new_result
new_result = make_result(info)
assert new_result.get('_type') != 'url_transparent'
if new_result.get('_type') == 'compat_list':
new_result['entries'] = [
make_result(e) for e in new_result['entries']]
return self.process_ie_result(
new_result, download=download, extra_info=extra_info)
elif result_type == 'playlist':
# We process each entry in the playlist
playlist = ie_result.get('title', None) or ie_result.get('id', None)
self.to_screen('[download] Downloading playlist: %s' % playlist)
playlist_results = []
playliststart = self.params.get('playliststart', 1) - 1
playlistend = self.params.get('playlistend', None)
# For backwards compatibility, interpret -1 as whole list
if playlistend == -1:
playlistend = None
if isinstance(ie_result['entries'], list):
n_all_entries = len(ie_result['entries'])
entries = ie_result['entries'][playliststart:playlistend]
n_entries = len(entries)
self.to_screen(
"[%s] playlist %s: Collected %d video ids (downloading %d of them)" %
(ie_result['extractor'], playlist, n_all_entries, n_entries))
else:
assert isinstance(ie_result['entries'], PagedList)
entries = ie_result['entries'].getslice(
playliststart, playlistend)
n_entries = len(entries)
self.to_screen(
"[%s] playlist %s: Downloading %d videos" %
(ie_result['extractor'], playlist, n_entries))
for i, entry in enumerate(entries, 1):
self.to_screen('[download] Downloading video #%s of %s' % (i, n_entries))
extra = {
'playlist': playlist,
'playlist_index': i + playliststart,
'extractor': ie_result['extractor'],
'webpage_url': ie_result['webpage_url'],
'webpage_url_basename': url_basename(ie_result['webpage_url']),
'extractor_key': ie_result['extractor_key'],
}
reason = self._match_entry(entry)
if reason is not None:
self.to_screen('[download] ' + reason)
continue
entry_result = self.process_ie_result(entry,
download=download,
extra_info=extra)
playlist_results.append(entry_result)
ie_result['entries'] = playlist_results
return ie_result
elif result_type == 'compat_list':
def _fixup(r):
self.add_extra_info(r,
{
'extractor': ie_result['extractor'],
'webpage_url': ie_result['webpage_url'],
'webpage_url_basename': url_basename(ie_result['webpage_url']),
'extractor_key': ie_result['extractor_key'],
})
return r
ie_result['entries'] = [
self.process_ie_result(_fixup(r), download, extra_info)
for r in ie_result['entries']
]
return ie_result
else:
raise Exception('Invalid result type: %s' % result_type)
def select_format(self, format_spec, available_formats):
if format_spec == 'best' or format_spec is None:
return available_formats[-1]
elif format_spec == 'worst':
return available_formats[0]
elif format_spec == 'bestaudio':
audio_formats = [
f for f in available_formats
if f.get('vcodec') == 'none']
if audio_formats:
return audio_formats[-1]
elif format_spec == 'worstaudio':
audio_formats = [
f for f in available_formats
if f.get('vcodec') == 'none']
if audio_formats:
return audio_formats[0]
elif format_spec == 'bestvideo':
video_formats = [
f for f in available_formats
if f.get('acodec') == 'none']
if video_formats:
return video_formats[-1]
elif format_spec == 'worstvideo':
video_formats = [
f for f in available_formats
if f.get('acodec') == 'none']
if video_formats:
return video_formats[0]
else:
extensions = ['mp4', 'flv', 'webm', '3gp']
if format_spec in extensions:
filter_f = lambda f: f['ext'] == format_spec
else:
filter_f = lambda f: f['format_id'] == format_spec
matches = list(filter(filter_f, available_formats))
if matches:
return matches[-1]
return None
def process_video_result(self, info_dict, download=True):
assert info_dict.get('_type', 'video') == 'video'
if 'id' not in info_dict:
raise ExtractorError('Missing "id" field in extractor result')
if 'title' not in info_dict:
raise ExtractorError('Missing "title" field in extractor result')
if 'playlist' not in info_dict:
# It isn't part of a playlist
info_dict['playlist'] = None
info_dict['playlist_index'] = None
if 'display_id' not in info_dict and 'id' in info_dict:
info_dict['display_id'] = info_dict['id']
if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None:
upload_date = datetime.datetime.utcfromtimestamp(
info_dict['timestamp'])
info_dict['upload_date'] = upload_date.strftime('%Y%m%d')
# This extractors handle format selection themselves
if info_dict['extractor'] in ['Youku']:
if download:
self.process_info(info_dict)
return info_dict
# We now pick which formats have to be downloaded
if info_dict.get('formats') is None:
# There's only one format available
formats = [info_dict]
else:
formats = info_dict['formats']
if not formats:
raise ExtractorError('No video formats found!')
# We check that all the formats have the format and format_id fields
for i, format in enumerate(formats):
if 'url' not in format:
raise ExtractorError('Missing "url" key in result (index %d)' % i)
if format.get('format_id') is None:
format['format_id'] = compat_str(i)
if format.get('format') is None:
format['format'] = '{id} - {res}{note}'.format(
id=format['format_id'],
res=self.format_resolution(format),
note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '',
)
# Automatically determine file extension if missing
if 'ext' not in format:
format['ext'] = determine_ext(format['url']).lower()
format_limit = self.params.get('format_limit', None)
if format_limit:
formats = list(takewhile_inclusive(
lambda f: f['format_id'] != format_limit, formats
))
# TODO Central sorting goes here
if formats[0] is not info_dict:
# only set the 'formats' fields if the original info_dict list them
# otherwise we end up with a circular reference, the first (and unique)
# element in the 'formats' field in info_dict is info_dict itself,
# wich can't be exported to json
info_dict['formats'] = formats
if self.params.get('listformats', None):
self.list_formats(info_dict)
return
req_format = self.params.get('format')
if req_format is None:
req_format = 'best'
formats_to_download = []
# The -1 is for supporting YoutubeIE
if req_format in ('-1', 'all'):
formats_to_download = formats
else:
# We can accept formats requested in the format: 34/5/best, we pick
# the first that is available, starting from left
req_formats = req_format.split('/')
for rf in req_formats:
if re.match(r'.+?\+.+?', rf) is not None:
# Two formats have been requested like '137+139'
format_1, format_2 = rf.split('+')
formats_info = (self.select_format(format_1, formats),
self.select_format(format_2, formats))
if all(formats_info):
selected_format = {
'requested_formats': formats_info,
'format': rf,
'ext': formats_info[0]['ext'],
}
else:
selected_format = None
else:
selected_format = self.select_format(rf, formats)
if selected_format is not None:
formats_to_download = [selected_format]
break
if not formats_to_download:
raise ExtractorError('requested format not available',
expected=True)
if download:
if len(formats_to_download) > 1:
self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download)))
for format in formats_to_download:
new_info = dict(info_dict)
new_info.update(format)
self.process_info(new_info)
# We update the info dict with the best quality format (backwards compatibility)
info_dict.update(formats_to_download[-1])
return info_dict
def process_info(self, info_dict):
"""Process a single resolved IE result."""
assert info_dict.get('_type', 'video') == 'video'
max_downloads = self.params.get('max_downloads')
if max_downloads is not None:
if self._num_downloads >= int(max_downloads):
raise MaxDownloadsReached()
info_dict['fulltitle'] = info_dict['title']
if len(info_dict['title']) > 200:
info_dict['title'] = info_dict['title'][:197] + '...'
# Keep for backwards compatibility
info_dict['stitle'] = info_dict['title']
if not 'format' in info_dict:
info_dict['format'] = info_dict['ext']
reason = self._match_entry(info_dict)
if reason is not None:
self.to_screen('[download] ' + reason)
return
self._num_downloads += 1
filename = self.prepare_filename(info_dict)
# Forced printings
if self.params.get('forcetitle', False):
self.to_stdout(info_dict['fulltitle'])
if self.params.get('forceid', False):
self.to_stdout(info_dict['id'])
if self.params.get('forceurl', False):
# For RTMP URLs, also include the playpath
self.to_stdout(info_dict['url'] + info_dict.get('play_path', ''))
if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None:
self.to_stdout(info_dict['thumbnail'])
if self.params.get('forcedescription', False) and info_dict.get('description') is not None:
self.to_stdout(info_dict['description'])
if self.params.get('forcefilename', False) and filename is not None:
self.to_stdout(filename)
if self.params.get('forceduration', False) and info_dict.get('duration') is not None:
self.to_stdout(formatSeconds(info_dict['duration']))
if self.params.get('forceformat', False):
self.to_stdout(info_dict['format'])
if self.params.get('forcejson', False):
info_dict['_filename'] = filename
self.to_stdout(json.dumps(info_dict))
# Do nothing else if in simulate mode
if self.params.get('simulate', False):
return
if filename is None:
return
try:
dn = os.path.dirname(encodeFilename(filename))
if dn and not os.path.exists(dn):
os.makedirs(dn)
except (OSError, IOError) as err:
self.report_error('unable to create directory ' + compat_str(err))
return
if self.params.get('writedescription', False):
descfn = filename + '.description'
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)):
self.to_screen('[info] Video description is already present')
else:
try:
self.to_screen('[info] Writing video description to: ' + descfn)
with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
descfile.write(info_dict['description'])
except (KeyError, TypeError):
self.report_warning('There\'s no description to write.')
except (OSError, IOError):
self.report_error('Cannot write description file ' + descfn)
return
if self.params.get('writeannotations', False):
annofn = filename + '.annotations.xml'
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)):
self.to_screen('[info] Video annotations are already present')
else:
try:
self.to_screen('[info] Writing video annotations to: ' + annofn)
with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
annofile.write(info_dict['annotations'])
except (KeyError, TypeError):
self.report_warning('There are no annotations to write.')
except (OSError, IOError):
self.report_error('Cannot write annotations file: ' + annofn)
return
subtitles_are_requested = any([self.params.get('writesubtitles', False),
self.params.get('writeautomaticsub')])
if subtitles_are_requested and 'subtitles' in info_dict and info_dict['subtitles']:
# subtitles download errors are already managed as troubles in relevant IE
# that way it will silently go on when used with unsupporting IE
subtitles = info_dict['subtitles']
sub_format = self.params.get('subtitlesformat', 'srt')
for sub_lang in subtitles.keys():
sub = subtitles[sub_lang]
if sub is None:
continue
try:
sub_filename = subtitles_filename(filename, sub_lang, sub_format)
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)):
self.to_screen('[info] Video subtitle %s.%s is already_present' % (sub_lang, sub_format))
else:
self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
subfile.write(sub)
except (OSError, IOError):
self.report_error('Cannot write subtitles file ' + sub_filename)
return
if self.params.get('writeinfojson', False):
infofn = os.path.splitext(filename)[0] + '.info.json'
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)):
self.to_screen('[info] Video description metadata is already present')
else:
self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn)
try:
write_json_file(info_dict, encodeFilename(infofn))
except (OSError, IOError):
self.report_error('Cannot write metadata to JSON file ' + infofn)
return
if self.params.get('writethumbnail', False):
if info_dict.get('thumbnail') is not None:
thumb_format = determine_ext(info_dict['thumbnail'], 'jpg')
thumb_filename = os.path.splitext(filename)[0] + '.' + thumb_format
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)):
self.to_screen('[%s] %s: Thumbnail is already present' %
(info_dict['extractor'], info_dict['id']))
else:
self.to_screen('[%s] %s: Downloading thumbnail ...' %
(info_dict['extractor'], info_dict['id']))
try:
uf = self.urlopen(info_dict['thumbnail'])
with open(thumb_filename, 'wb') as thumbf:
shutil.copyfileobj(uf, thumbf)
self.to_screen('[%s] %s: Writing thumbnail to: %s' %
(info_dict['extractor'], info_dict['id'], thumb_filename))
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self.report_warning('Unable to download thumbnail "%s": %s' %
(info_dict['thumbnail'], compat_str(err)))
if not self.params.get('skip_download', False):
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)):
success = True
else:
try:
def dl(name, info):
fd = get_suitable_downloader(info)(self, self.params)
for ph in self._progress_hooks:
fd.add_progress_hook(ph)
return fd.download(name, info)
if info_dict.get('requested_formats') is not None:
downloaded = []
success = True
merger = FFmpegMergerPP(self)
if not merger._get_executable():
postprocessors = []
self.report_warning('You have requested multiple '
'formats but ffmpeg or avconv are not installed.'
' The formats won\'t be merged')
else:
postprocessors = [merger]
for f in info_dict['requested_formats']:
new_info = dict(info_dict)
new_info.update(f)
fname = self.prepare_filename(new_info)
fname = prepend_extension(fname, 'f%s' % f['format_id'])
downloaded.append(fname)
partial_success = dl(fname, new_info)
success = success and partial_success
info_dict['__postprocessors'] = postprocessors
info_dict['__files_to_merge'] = downloaded
else:
# Just a single file
success = dl(filename, info_dict)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self.report_error('unable to download video data: %s' % str(err))
return
except (OSError, IOError) as err:
raise UnavailableVideoError(err)
except (ContentTooShortError, ) as err:
self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
return
if success:
try:
self.post_process(filename, info_dict)
except (PostProcessingError) as err:
self.report_error('postprocessing: %s' % str(err))
return
self.record_download_archive(info_dict)
def download(self, url_list):
"""Download a given list of URLs."""
if (len(url_list) > 1 and
'%' not in self.params['outtmpl']
and self.params.get('max_downloads') != 1):
raise SameFileError(self.params['outtmpl'])
for url in url_list:
try:
#It also downloads the videos
self.extract_info(url)
except UnavailableVideoError:
self.report_error('unable to download video')
except MaxDownloadsReached:
self.to_screen('[info] Maximum number of downloaded files reached.')
raise
return self._download_retcode
def download_with_info_file(self, info_filename):
with io.open(info_filename, 'r', encoding='utf-8') as f:
info = json.load(f)
try:
self.process_ie_result(info, download=True)
except DownloadError:
webpage_url = info.get('webpage_url')
if webpage_url is not None:
self.report_warning('The info failed to download, trying with "%s"' % webpage_url)
return self.download([webpage_url])
else:
raise
return self._download_retcode
def post_process(self, filename, ie_info):
"""Run all the postprocessors on the given file."""
info = dict(ie_info)
info['filepath'] = filename
keep_video = None
pps_chain = []
if ie_info.get('__postprocessors') is not None:
pps_chain.extend(ie_info['__postprocessors'])
pps_chain.extend(self._pps)
for pp in pps_chain:
try:
keep_video_wish, new_info = pp.run(info)
if keep_video_wish is not None:
if keep_video_wish:
keep_video = keep_video_wish
elif keep_video is None:
# No clear decision yet, let IE decide
keep_video = keep_video_wish
except PostProcessingError as e:
self.report_error(e.msg)
if keep_video is False and not self.params.get('keepvideo', False):
try:
self.to_screen('Deleting original file %s (pass -k to keep)' % filename)
os.remove(encodeFilename(filename))
except (IOError, OSError):
self.report_warning('Unable to remove downloaded video file')
def _make_archive_id(self, info_dict):
# Future-proof against any change in case
# and backwards compatibility with prior versions
extractor = info_dict.get('extractor_key')
if extractor is None:
if 'id' in info_dict:
extractor = info_dict.get('ie_key') # key in a playlist
if extractor is None:
return None # Incomplete video information
return extractor.lower() + ' ' + info_dict['id']
def in_download_archive(self, info_dict):
fn = self.params.get('download_archive')
if fn is None:
return False
vid_id = self._make_archive_id(info_dict)
if vid_id is None:
return False # Incomplete video information
try:
with locked_file(fn, 'r', encoding='utf-8') as archive_file:
for line in archive_file:
if line.strip() == vid_id:
return True
except IOError as ioe:
if ioe.errno != errno.ENOENT:
raise
return False
def record_download_archive(self, info_dict):
fn = self.params.get('download_archive')
if fn is None:
return
vid_id = self._make_archive_id(info_dict)
assert vid_id
with locked_file(fn, 'a', encoding='utf-8') as archive_file:
archive_file.write(vid_id + '\n')
@staticmethod
def format_resolution(format, default='unknown'):
if format.get('vcodec') == 'none':
return 'audio only'
if format.get('resolution') is not None:
return format['resolution']
if format.get('height') is not None:
if format.get('width') is not None:
res = '%sx%s' % (format['width'], format['height'])
else:
res = '%sp' % format['height']
elif format.get('width') is not None:
res = '?x%d' % format['width']
else:
res = default
return res
def list_formats(self, info_dict):
def format_note(fdict):
res = ''
if fdict.get('ext') in ['f4f', 'f4m']:
res += '(unsupported) '
if fdict.get('format_note') is not None:
res += fdict['format_note'] + ' '
if fdict.get('tbr') is not None:
res += '%4dk ' % fdict['tbr']
if fdict.get('container') is not None:
if res:
res += ', '
res += '%s container' % fdict['container']
if (fdict.get('vcodec') is not None and
fdict.get('vcodec') != 'none'):
if res:
res += ', '
res += fdict['vcodec']
if fdict.get('vbr') is not None:
res += '@'
elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
res += 'video@'
if fdict.get('vbr') is not None:
res += '%4dk' % fdict['vbr']
if fdict.get('acodec') is not None:
if res:
res += ', '
if fdict['acodec'] == 'none':
res += 'video only'
else:
res += '%-5s' % fdict['acodec']
elif fdict.get('abr') is not None:
if res:
res += ', '
res += 'audio'
if fdict.get('abr') is not None:
res += '@%3dk' % fdict['abr']
if fdict.get('asr') is not None:
res += ' (%5dHz)' % fdict['asr']
if fdict.get('filesize') is not None:
if res:
res += ', '
res += format_bytes(fdict['filesize'])
return res
def line(format, idlen=20):
return (('%-' + compat_str(idlen + 1) + 's%-10s%-12s%s') % (
format['format_id'],
format['ext'],
self.format_resolution(format),
format_note(format),
))
formats = info_dict.get('formats', [info_dict])
idlen = max(len('format code'),
max(len(f['format_id']) for f in formats))
formats_s = [line(f, idlen) for f in formats]
if len(formats) > 1:
formats_s[0] += (' ' if format_note(formats[0]) else '') + '(worst)'
formats_s[-1] += (' ' if format_note(formats[-1]) else '') + '(best)'
header_line = line({
'format_id': 'format code', 'ext': 'extension',
'resolution': 'resolution', 'format_note': 'note'}, idlen=idlen)
self.to_screen('[info] Available formats for %s:\n%s\n%s' %
(info_dict['id'], header_line, '\n'.join(formats_s)))
def urlopen(self, req):
""" Start an HTTP download """
return self._opener.open(req, timeout=self._socket_timeout)
def print_debug_header(self):
if not self.params.get('verbose'):
return
write_string(
'[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
locale.getpreferredencoding(),
sys.getfilesystemencoding(),
sys.stdout.encoding,
self.get_encoding()),
encoding=None
)
self._write_string('[debug] youtube-dl version ' + __version__ + '\n')
try:
sp = subprocess.Popen(
['git', 'rev-parse', '--short', 'HEAD'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=os.path.dirname(os.path.abspath(__file__)))
out, err = sp.communicate()
out = out.decode().strip()
if re.match('[0-9a-f]+', out):
self._write_string('[debug] Git HEAD: ' + out + '\n')
except:
try:
sys.exc_clear()
except:
pass
self._write_string('[debug] Python version %s - %s' %
(platform.python_version(), platform_name()) + '\n')
proxy_map = {}
for handler in self._opener.handlers:
if hasattr(handler, 'proxies'):
proxy_map.update(handler.proxies)
self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n')
def _setup_opener(self):
timeout_val = self.params.get('socket_timeout')
self._socket_timeout = 600 if timeout_val is None else float(timeout_val)
opts_cookiefile = self.params.get('cookiefile')
opts_proxy = self.params.get('proxy')
if opts_cookiefile is None:
self.cookiejar = compat_cookiejar.CookieJar()
else:
self.cookiejar = compat_cookiejar.MozillaCookieJar(
opts_cookiefile)
if os.access(opts_cookiefile, os.R_OK):
self.cookiejar.load()
cookie_processor = compat_urllib_request.HTTPCookieProcessor(
self.cookiejar)
if opts_proxy is not None:
if opts_proxy == '':
proxies = {}
else:
proxies = {'http': opts_proxy, 'https': opts_proxy}
else:
proxies = compat_urllib_request.getproxies()
# Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805)
if 'http' in proxies and 'https' not in proxies:
proxies['https'] = proxies['http']
proxy_handler = compat_urllib_request.ProxyHandler(proxies)
debuglevel = 1 if self.params.get('debug_printtraffic') else 0
https_handler = make_HTTPS_handler(
self.params.get('nocheckcertificate', False), debuglevel=debuglevel)
ydlh = YoutubeDLHandler(debuglevel=debuglevel)
opener = compat_urllib_request.build_opener(
https_handler, proxy_handler, cookie_processor, ydlh)
# Delete the default user-agent header, which would otherwise apply in
# cases where our custom HTTP handler doesn't come into play
# (See https://github.com/rg3/youtube-dl/issues/1309 for details)
opener.addheaders = []
self._opener = opener
def encode(self, s):
if isinstance(s, bytes):
return s # Already encoded
try:
return s.encode(self.get_encoding())
except UnicodeEncodeError as err:
err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
raise
def get_encoding(self):
encoding = self.params.get('encoding')
if encoding is None:
encoding = preferredencoding()
return encoding
| gpl-3.0 |
havatv/QGIS | tests/src/python/test_qgsdatumtransforms.py | 5 | 18986 |
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsDatumTransforms.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '2019-05-25'
__copyright__ = 'Copyright 2019, The QGIS Project'
from qgis.core import (
QgsProjUtils,
QgsCoordinateReferenceSystem,
QgsDatumTransform
)
from qgis.testing import (start_app,
unittest,
)
from utilities import unitTestDataPath
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestPyQgsDatumTransform(unittest.TestCase):
@unittest.skipIf(QgsProjUtils.projVersionMajor() < 6, 'Not a proj6 build')
def testOperations(self):
ops = QgsDatumTransform.operations(QgsCoordinateReferenceSystem(),
QgsCoordinateReferenceSystem())
self.assertEqual(ops, [])
ops = QgsDatumTransform.operations(QgsCoordinateReferenceSystem('EPSG:3111'),
QgsCoordinateReferenceSystem())
self.assertEqual(ops, [])
ops = QgsDatumTransform.operations(QgsCoordinateReferenceSystem(),
QgsCoordinateReferenceSystem('EPSG:3111'))
self.assertEqual(ops, [])
ops = QgsDatumTransform.operations(QgsCoordinateReferenceSystem('EPSG:3111'),
QgsCoordinateReferenceSystem('EPSG:3111'))
self.assertEqual(len(ops), 1)
self.assertTrue(ops[0].name)
self.assertEqual(ops[0].proj, '+proj=noop')
self.assertEqual(ops[0].accuracy, 0.0)
self.assertTrue(ops[0].isAvailable)
ops = QgsDatumTransform.operations(QgsCoordinateReferenceSystem('EPSG:3111'),
QgsCoordinateReferenceSystem('EPSG:4283'))
self.assertEqual(len(ops), 1)
self.assertTrue(ops[0].name)
self.assertEqual(ops[0].proj, '+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=unitconvert +xy_in=rad +xy_out=deg')
self.assertEqual(ops[0].accuracy, -1.0)
self.assertTrue(ops[0].isAvailable)
ops = QgsDatumTransform.operations(QgsCoordinateReferenceSystem('EPSG:3111'),
QgsCoordinateReferenceSystem('EPSG:28355'))
self.assertEqual(len(ops), 1)
self.assertTrue(ops[0].name)
self.assertEqual(ops[0].proj, '+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=utm +zone=55 +south +ellps=GRS80')
self.assertEqual(ops[0].accuracy, 0.0)
self.assertTrue(ops[0].isAvailable)
# uses a grid file
ops = QgsDatumTransform.operations(QgsCoordinateReferenceSystem('EPSG:4283'),
QgsCoordinateReferenceSystem('EPSG:7844'))
self.assertGreaterEqual(len(ops), 5)
op1_index = [i for i in range(len(ops)) if ops[i].proj == '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=push +v_3 +step +proj=cart +ellps=GRS80 +step +proj=helmert +x=0.06155 +y=-0.01087 +z=-0.04019 +rx=-0.0394924 +ry=-0.0327221 +rz=-0.0328979 +s=-0.009994 +convention=coordinate_frame +step +inv +proj=cart +ellps=GRS80 +step +proj=pop +v_3 +step +proj=unitconvert +xy_in=rad +xy_out=deg'][0]
self.assertTrue(ops[op1_index].name)
self.assertEqual(ops[op1_index].proj, '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=push +v_3 +step +proj=cart +ellps=GRS80 +step +proj=helmert +x=0.06155 +y=-0.01087 +z=-0.04019 +rx=-0.0394924 +ry=-0.0327221 +rz=-0.0328979 +s=-0.009994 +convention=coordinate_frame +step +inv +proj=cart +ellps=GRS80 +step +proj=pop +v_3 +step +proj=unitconvert +xy_in=rad +xy_out=deg')
self.assertTrue(ops[op1_index].isAvailable)
self.assertEqual(ops[op1_index].accuracy, 0.01)
self.assertEqual(len(ops[op1_index].grids), 0)
if QgsProjUtils.projVersionMajor() == 6:
op2_index = [i for i in range(len(ops)) if ops[i].proj == '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=GDA94_GDA2020_conformal_and_distortion.gsb +step +proj=unitconvert +xy_in=rad +xy_out=deg'][0]
else:
op2_index = [i for i in range(len(ops)) if ops[
i].proj == '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal_and_distortion.tif +step +proj=unitconvert +xy_in=rad +xy_out=deg'][
0]
self.assertTrue(ops[op2_index].name)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op2_index].proj, '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=GDA94_GDA2020_conformal_and_distortion.gsb +step +proj=unitconvert +xy_in=rad +xy_out=deg')
else:
self.assertEqual(ops[op2_index].proj,
'+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal_and_distortion.tif +step +proj=unitconvert +xy_in=rad +xy_out=deg')
self.assertEqual(ops[op2_index].accuracy, 0.05)
self.assertEqual(len(ops[op2_index].grids), 1)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op2_index].grids[0].shortName, 'GDA94_GDA2020_conformal_and_distortion.gsb')
else:
self.assertEqual(ops[op2_index].grids[0].shortName, 'au_icsm_GDA94_GDA2020_conformal_and_distortion.tif')
self.assertEqual(ops[op2_index].grids[0].fullName, '')
if QgsProjUtils.projVersionMajor() == 6:
self.assertTrue(ops[op2_index].grids[0].packageName)
self.assertIn('http', ops[op2_index].grids[0].url)
self.assertTrue(ops[op2_index].grids[0].directDownload)
self.assertTrue(ops[op2_index].grids[0].openLicense)
if QgsProjUtils.projVersionMajor() == 6:
op3_index = [i for i in range(len(ops)) if ops[i].proj == '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=GDA94_GDA2020_conformal.gsb +step +proj=unitconvert +xy_in=rad +xy_out=deg'][0]
else:
op3_index = [i for i in range(len(ops)) if ops[
i].proj == '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal.tif +step +proj=unitconvert +xy_in=rad +xy_out=deg'][
0]
self.assertTrue(ops[op3_index].name)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op3_index].proj, '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=GDA94_GDA2020_conformal.gsb +step +proj=unitconvert +xy_in=rad +xy_out=deg')
else:
self.assertEqual(ops[op3_index].proj,
'+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal.tif +step +proj=unitconvert +xy_in=rad +xy_out=deg')
self.assertEqual(ops[op3_index].accuracy, 0.05)
self.assertEqual(len(ops[op3_index].grids), 1)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op3_index].grids[0].shortName, 'GDA94_GDA2020_conformal.gsb')
else:
self.assertEqual(ops[op3_index].grids[0].shortName, 'au_icsm_GDA94_GDA2020_conformal.tif')
self.assertEqual(ops[op3_index].grids[0].fullName, '')
if QgsProjUtils.projVersionMajor() == 6:
self.assertTrue(ops[op3_index].grids[0].packageName)
self.assertIn('http', ops[op3_index].grids[0].url)
self.assertTrue(ops[op3_index].grids[0].directDownload)
self.assertTrue(ops[op3_index].grids[0].openLicense)
if QgsProjUtils.projVersionMajor() == 6:
op4_index = [i for i in range(len(ops)) if ops[i].proj == '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=GDA94_GDA2020_conformal_cocos_island.gsb +step +proj=unitconvert +xy_in=rad +xy_out=deg'][0]
else:
op4_index = [i for i in range(len(ops)) if ops[
i].proj == '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal_cocos_island.tif +step +proj=unitconvert +xy_in=rad +xy_out=deg'][
0]
self.assertTrue(ops[op4_index].name)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op4_index].proj, '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=GDA94_GDA2020_conformal_cocos_island.gsb +step +proj=unitconvert +xy_in=rad +xy_out=deg')
else:
self.assertEqual(ops[op4_index].proj,
'+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal_cocos_island.tif +step +proj=unitconvert +xy_in=rad +xy_out=deg')
self.assertEqual(ops[op4_index].accuracy, 0.05)
self.assertEqual(len(ops[op4_index].grids), 1)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op4_index].grids[0].shortName, 'GDA94_GDA2020_conformal_cocos_island.gsb')
else:
self.assertEqual(ops[op4_index].grids[0].shortName, 'au_icsm_GDA94_GDA2020_conformal_cocos_island.tif')
self.assertEqual(ops[op4_index].grids[0].fullName, '')
if QgsProjUtils.projVersionMajor() == 6:
self.assertTrue(ops[op4_index].grids[0].packageName)
self.assertIn('http', ops[op4_index].grids[0].url)
if QgsProjUtils.projVersionMajor() == 6:
op5_index = [i for i in range(len(ops)) if ops[i].proj == '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=GDA94_GDA2020_conformal_christmas_island.gsb +step +proj=unitconvert +xy_in=rad +xy_out=deg'][0]
else:
op5_index = [i for i in range(len(ops)) if ops[
i].proj == '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal_christmas_island.tif +step +proj=unitconvert +xy_in=rad +xy_out=deg'][
0]
self.assertTrue(ops[op5_index].name)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op5_index].proj, '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=GDA94_GDA2020_conformal_christmas_island.gsb +step +proj=unitconvert +xy_in=rad +xy_out=deg')
else:
self.assertEqual(ops[op5_index].proj,
'+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal_christmas_island.tif +step +proj=unitconvert +xy_in=rad +xy_out=deg')
self.assertEqual(ops[op5_index].accuracy, 0.05)
self.assertEqual(len(ops[op5_index].grids), 1)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op5_index].grids[0].shortName, 'GDA94_GDA2020_conformal_christmas_island.gsb')
else:
self.assertEqual(ops[op5_index].grids[0].shortName, 'au_icsm_GDA94_GDA2020_conformal_christmas_island.tif')
self.assertEqual(ops[op5_index].grids[0].fullName, '')
if QgsProjUtils.projVersionMajor() == 6:
self.assertTrue(ops[op5_index].grids[0].packageName)
self.assertIn('http', ops[op5_index].grids[0].url)
# uses a pivot datum (technically a proj test, but this will help me sleep at night ;)
ops = QgsDatumTransform.operations(QgsCoordinateReferenceSystem('EPSG:3111'),
QgsCoordinateReferenceSystem('EPSG:7899'))
self.assertGreaterEqual(len(ops), 3)
op1_index = [i for i in range(len(ops)) if ops[i].proj == '+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=push +v_3 +step +proj=cart +ellps=GRS80 +step +proj=helmert +x=0.06155 +y=-0.01087 +z=-0.04019 +rx=-0.0394924 +ry=-0.0327221 +rz=-0.0328979 +s=-0.009994 +convention=coordinate_frame +step +inv +proj=cart +ellps=GRS80 +step +proj=pop +v_3 +step +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80'][0]
self.assertTrue(ops[op1_index].name)
self.assertEqual(ops[op1_index].proj, '+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=push +v_3 +step +proj=cart +ellps=GRS80 +step +proj=helmert +x=0.06155 +y=-0.01087 +z=-0.04019 +rx=-0.0394924 +ry=-0.0327221 +rz=-0.0328979 +s=-0.009994 +convention=coordinate_frame +step +inv +proj=cart +ellps=GRS80 +step +proj=pop +v_3 +step +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80')
self.assertTrue(ops[op1_index].isAvailable)
self.assertEqual(ops[op1_index].accuracy, 0.01)
self.assertEqual(len(ops[op1_index].grids), 0)
if QgsProjUtils.projVersionMajor() == 6:
op2_index = [i for i in range(len(ops)) if ops[i].proj == '+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=hgridshift +grids=GDA94_GDA2020_conformal_and_distortion.gsb +step +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80'][0]
else:
op2_index = [i for i in range(len(ops)) if ops[
i].proj == '+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal_and_distortion.tif +step +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80'][
0]
self.assertTrue(ops[op2_index].name)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op2_index].proj, '+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=hgridshift +grids=GDA94_GDA2020_conformal_and_distortion.gsb +step +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80')
else:
self.assertEqual(ops[op2_index].proj,
'+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal_and_distortion.tif +step +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80')
self.assertEqual(ops[op2_index].accuracy, 0.05)
self.assertEqual(len(ops[op2_index].grids), 1)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op2_index].grids[0].shortName, 'GDA94_GDA2020_conformal_and_distortion.gsb')
else:
self.assertEqual(ops[op2_index].grids[0].shortName, 'au_icsm_GDA94_GDA2020_conformal_and_distortion.tif')
self.assertEqual(ops[op2_index].grids[0].fullName, '')
if QgsProjUtils.projVersionMajor() == 6:
self.assertTrue(ops[op2_index].grids[0].packageName)
self.assertIn('http', ops[op2_index].grids[0].url)
self.assertTrue(ops[op2_index].grids[0].directDownload)
self.assertTrue(ops[op2_index].grids[0].openLicense)
if QgsProjUtils.projVersionMajor() == 6:
op3_index = [i for i in range(len(ops)) if ops[i].proj == '+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=hgridshift +grids=GDA94_GDA2020_conformal.gsb +step +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80'][0]
else:
op3_index = [i for i in range(len(ops)) if ops[
i].proj == '+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal.tif +step +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80'][
0]
self.assertTrue(ops[op3_index].name)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op3_index].proj, '+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=hgridshift +grids=GDA94_GDA2020_conformal.gsb +step +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80')
else:
self.assertEqual(ops[op3_index].proj,
'+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal.tif +step +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80')
self.assertEqual(ops[op3_index].accuracy, 0.05)
self.assertEqual(len(ops[op3_index].grids), 1)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op3_index].grids[0].shortName, 'GDA94_GDA2020_conformal.gsb')
else:
self.assertEqual(ops[op3_index].grids[0].shortName, 'au_icsm_GDA94_GDA2020_conformal.tif')
self.assertEqual(ops[op3_index].grids[0].fullName, '')
if QgsProjUtils.projVersionMajor() == 6:
self.assertTrue(ops[op3_index].grids[0].packageName)
self.assertIn('http', ops[op3_index].grids[0].url)
self.assertTrue(ops[op3_index].grids[0].directDownload)
self.assertTrue(ops[op3_index].grids[0].openLicense)
@unittest.skipIf(QgsProjUtils.projVersionMajor() < 7, 'Not a proj >= 7 build')
def testNoLasLos(self):
"""
Test that operations which rely on an las/los grid shift file (which are unsupported by Proj6) are not returned
"""
ops = QgsDatumTransform.operations(QgsCoordinateReferenceSystem('EPSG:3035'),
QgsCoordinateReferenceSystem('EPSG:5514'))
self.assertEqual(len(ops), 3)
self.assertTrue(ops[0].name)
self.assertTrue(ops[0].proj)
self.assertTrue(ops[1].name)
self.assertTrue(ops[1].proj)
self.assertTrue(ops[2].name)
self.assertTrue(ops[2].proj)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
wittrup/crap | sys/2/checkpayments.py | 1 | 7805 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# PEP 263 -- Defining Python Source Code Encodings https://www.python.org/dev/peps/pep-0263/
# PEP 3120 -- Using UTF-8 as the default source encoding https://www.python.org/dev/peps/pep-3120/
"""At least once a day, get all bookings, last 32 hours from now.
If booking not in list, append to list, trigger new booking event.
Check if customer is in list, if not, fetch customer data.
If customer has Bnr, post datetime, amount, Bnr."""
import os
from datetime import timedelta, datetime
from dateutil.tz import tzlocal
import argparse
import requests
import json
import common
from time import strftime as now
m = [l.strip() for l in open("config.txt").readlines()]
apiKey, secretKey, host, hostpost = m[:4]
payload = {"apiKey": apiKey, "secretKey": secretKey}
paketoke = json.loads(m[4]) # payments keys to keep
meidcufi = m[5].lower() # member id custom field
username = m[6]
token = m[7]
def getpayments(url, startTime, endTime, payload):
payload.update({"startTime": strftime(startTime), "endTime": strftime(endTime)})
r = requests.get(url, params=payload)
if r.status_code != 200:
print(r, r.text, file=logfile)
return None
else:
response = json.loads(r.text)
if all(key in response for key in ["info", "data"]):
if all(key in response["info"] for key in ['totalPages', 'currentPage', 'pageNavigationToken']):
# locals().update(response["info"])
# for pageNumber in range(currentPage + 1, totalPages + 1):
for pageNumber in range(response["info"]["currentPage"] + 1, response["info"]["totalPages"] + 1):
payload.update({"pageNavigationToken": response["info"]["pageNavigationToken"],
"pageNumber": pageNumber})
r = requests.get(url, params=payload)
nextlist = json.loads(r.text)
if "data" in nextlist:
response["data"] += nextlist["data"]
return response
def append(record):
date = record["date"]
timestamp = datetime.strptime(date, "%Y-%m-%d %H:%M:%S")
filekey = datetime.strftime(timestamp, "%Y-%m-%d")
if filekey not in files or not files[filekey]:
filename = datetime.strftime(timestamp, "%Y/%m %B/%d.json")
files[filekey] = common.jsloifex(filename, {})
customerId = record["customerId"]
if not date in files[filekey]:
files[filekey][date] = {}
if not customerId in files[filekey][date]:
files[filekey][date][customerId] = {}
files[filekey][date][customerId].update((key, record[key]) for key in ["amount", "currency"])
load_customer_id(host, customerId)
memberId = customers[customerId]
target = files[filekey][date][customerId]
if memberId not in memberIds:
r = requests.get(hostpost % r'members/' + memberId + r'/status', auth=(username, token))
if r.status_code != 200:
print("WARNING:", r.status_code, r.text, file=logfile)
if "statusCode" in r.json():
memberIds[memberId] = r.json()["statusCode"]
sevaifno(target, "MID", memberId)
sevaifno(target, "MSC", memberIds[memberId])
if memberId in memberIds:
if memberIds[memberId] == 1 and "TSC" not in target and "TRI" not in target:
payload = {}
payload["date"] = date
payload.update((key, record[key]) for key in ["amount", "currency"])
payload["memberId"] = memberId
r = requests.post(hostpost % 'transactions', data=payload, auth=(username, token))
if r.status_code != 200:
print("WARNING:", r.status_code, r.text, file=logfile)
else:
print(r.text, file=logfile)
sevaifno(target, "TSC", r.json()["statusCode"])
sevaifno(target, "TRI", r.json()["data"]["transaction_ref_id"])
def load_customer_id(host, customerId): #
if customerId not in customers:
customer = get_customer_id(host, customerId)
if customer and "customFields" in customer and type(customer["customFields"]) is list:
for customField in customer["customFields"]:
if type(customField) is dict and all(k in customField for k in ["name", "value"]):
if customField["name"].lower() == meidcufi:
customers[customerId] = customField["value"]
def sevaifno(target, key, value): # Set value if not exists
if key not in target:
target[key] = value
def write():
for filekey, record in files.items():
timestamp = datetime.strptime(filekey, "%Y-%m-%d")
filename = datetime.strftime(timestamp, "%Y/%m %B/%d.json")
common.jsstfacp(record, filename, odpl=True)
def get_customer_id(host, customerId):
r = requests.get((host + "/%s") % ("customers", customerId), params=payload)
return r.json() if r.status_code == 200 else None
def strftime(time):
""" Return a string representing the date and time as expressed in the RFC 3339 date-time format.
https://tools.ietf.org/html/rfc3339"""
text = datetime.strftime(time, "%Y-%m-%dT%H:%M:%S%z")
return text[:22] + ":" + text[22:]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-O', dest='output_folder', help="Output folder",
default=os.path.dirname(os.path.realpath(__file__)))
parser.add_argument('-d', dest='date', help="Date to check, default today FORMAT: YYYY-MM-DD")
parser.add_argument('-s', dest='strtime', help="Date to start at, default today minus one day")
args = parser.parse_args()
workpath = args.output_folder
# Get the current date/time with the timezone.
runTime = datetime.now(tzlocal())
if args.date:
argTime = datetime.strptime(args.date, '%Y-%m-%d')
endTime = runTime.replace(argTime.year, argTime.month, argTime.day, hour=0, minute=0, second=0, microsecond=0)
endTime += timedelta(days=1)
else:
endTime = runTime
if args.strtime:
argTime = datetime.strptime(args.strtime, '%Y-%m-%d')
strTime = runTime.replace(argTime.year, argTime.month, argTime.day, hour=0, minute=0, second=0, microsecond=0)
else:
strTime = endTime - timedelta(days=1)
logfile = open(now("%Y") + ".log", "a", encoding="utf-8")
print(strftime(runTime), "Script Start - Checking for new payments", file=logfile)
print(strftime(strTime), "startTime", file=logfile)
print(strftime(endTime), "endTime", file=logfile)
files = {}
customers = common.jsloifex(workpath + r'/customers.json')
memberIds = common.jsloifex(workpath + r'/memberIds.json')
payments = getpayments(host % "payments", strTime, endTime, payload)["data"]
for payment in payments:
values = {}
for key, val in paketoke.items():
if key in payment:
if type(val) is list:
for item in val:
values[item] = payment[key][item]
elif type(val) is str:
values[val] = payment[key]
else:
values[key] = payment[key]
continue
print("All required keys [" + key + "] not found in data: '", payment, "'", file=logfile)
break
if len(values["date"]) != 19:
values["date"] = values["date"][:19].replace("T", " ")
append(values)
common.jsstfacp(customers, workpath + r'/customers.json', indent=2)
common.jsstfacp(memberIds, workpath + r'/memberIds.json', indent=2)
write()
print(strftime(datetime.now(tzlocal())), "Script End", file=logfile)
| mit |
bussiere/pypyjs | website/demo/home/rfk/repos/pypy/lib_pypy/_pypy_wait.py | 10 | 1412 | from resource import _struct_rusage, struct_rusage
from ctypes import CDLL, c_int, POINTER, byref
from ctypes.util import find_library
__all__ = ["wait3", "wait4"]
libc = CDLL(find_library("c"))
c_wait3 = libc.wait3
c_wait3.argtypes = [POINTER(c_int), c_int, POINTER(_struct_rusage)]
c_wait3.restype = c_int
c_wait4 = libc.wait4
c_wait4.argtypes = [c_int, POINTER(c_int), c_int, POINTER(_struct_rusage)]
c_wait4.restype = c_int
def create_struct_rusage(c_struct):
return struct_rusage((
float(c_struct.ru_utime),
float(c_struct.ru_stime),
c_struct.ru_maxrss,
c_struct.ru_ixrss,
c_struct.ru_idrss,
c_struct.ru_isrss,
c_struct.ru_minflt,
c_struct.ru_majflt,
c_struct.ru_nswap,
c_struct.ru_inblock,
c_struct.ru_oublock,
c_struct.ru_msgsnd,
c_struct.ru_msgrcv,
c_struct.ru_nsignals,
c_struct.ru_nvcsw,
c_struct.ru_nivcsw))
def wait3(options):
status = c_int()
_rusage = _struct_rusage()
pid = c_wait3(byref(status), c_int(options), byref(_rusage))
rusage = create_struct_rusage(_rusage)
return pid, status.value, rusage
def wait4(pid, options):
status = c_int()
_rusage = _struct_rusage()
pid = c_wait4(c_int(pid), byref(status), c_int(options), byref(_rusage))
rusage = create_struct_rusage(_rusage)
return pid, status.value, rusage
| mit |
juangj/selenium | py/test/selenium/webdriver/common/api_example_tests.py | 4 | 10165 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from selenium.common.exceptions import (
NoSuchElementException,
NoSuchWindowException,
TimeoutException,
WebDriverException)
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
def testGetTitle(driver, pages):
pages.load("simpleTest.html")
title = driver.title
assert "Hello WebDriver" == title
def testGetCurrentUrl(driver, pages, webserver):
pages.load("simpleTest.html")
url = driver.current_url
assert webserver.where_is('simpleTest.html') == url
def testFindElementsByXPath(driver, pages):
pages.load("simpleTest.html")
elem = driver.find_element_by_xpath("//h1")
assert "Heading" == elem.text
def testFindElementByXpathThrowNoSuchElementException(driver, pages):
pages.load("simpleTest.html")
with pytest.raises(NoSuchElementException):
driver.find_element_by_xpath("//h4")
def testFindElementsByXpath(driver, pages):
pages.load("nestedElements.html")
elems = driver.find_elements_by_xpath("//option")
assert 48 == len(elems)
assert "One" == elems[0].get_attribute("value")
def testFindElementsByName(driver, pages):
pages.load("xhtmlTest.html")
elem = driver.find_element_by_name("windowOne")
assert "Open new window" == elem.text
def testFindElementsByNameInElementContext(driver, pages):
pages.load("nestedElements.html")
elem = driver.find_element_by_name("form2")
sub_elem = elem.find_element_by_name("selectomatic")
assert "2" == sub_elem.get_attribute("id")
def testFindElementsByLinkTextInElementContext(driver, pages):
pages.load("nestedElements.html")
elem = driver.find_element_by_name("div1")
sub_elem = elem.find_element_by_link_text("hello world")
assert "link1" == sub_elem.get_attribute("name")
def testFindElementByIdInElementContext(driver, pages):
pages.load("nestedElements.html")
elem = driver.find_element_by_name("form2")
sub_elem = elem.find_element_by_id("2")
assert "selectomatic" == sub_elem.get_attribute("name")
def testFindElementByXpathInElementContext(driver, pages):
pages.load("nestedElements.html")
elem = driver.find_element_by_name("form2")
sub_elem = elem.find_element_by_xpath("select")
assert "2" == sub_elem.get_attribute("id")
def testFindElementByXpathInElementContextNotFound(driver, pages):
pages.load("nestedElements.html")
elem = driver.find_element_by_name("form2")
with pytest.raises(NoSuchElementException):
elem.find_element_by_xpath("div")
def testShouldBeAbleToEnterDataIntoFormFields(driver, pages):
pages.load("xhtmlTest.html")
elem = driver.find_element_by_xpath("//form[@name='someForm']/input[@id='username']")
elem.clear()
elem.send_keys("some text")
elem = driver.find_element_by_xpath("//form[@name='someForm']/input[@id='username']")
assert "some text" == elem.get_attribute("value")
def testFindElementByTagName(driver, pages):
pages.load("simpleTest.html")
elems = driver.find_elements_by_tag_name("div")
num_by_xpath = len(driver.find_elements_by_xpath("//div"))
assert num_by_xpath == len(elems)
elems = driver.find_elements_by_tag_name("iframe")
assert 0 == len(elems)
def testFindElementByTagNameWithinElement(driver, pages):
pages.load("simpleTest.html")
div = driver.find_element_by_id("multiline")
elems = div.find_elements_by_tag_name("p")
assert len(elems) == 1
@pytest.mark.xfail_marionette(
reason="W3C implementations can't switch to a window by name",
raises=TimeoutException,
run=False)
def testSwitchToWindow(driver, pages):
title_1 = "XHTML Test Page"
title_2 = "We Arrive Here"
switch_to_window_timeout = 5
wait = WebDriverWait(driver, switch_to_window_timeout, ignored_exceptions=[NoSuchWindowException])
pages.load("xhtmlTest.html")
driver.find_element_by_link_text("Open new window").click()
assert title_1 == driver.title
wait.until(lambda dr: dr.switch_to.window("result") is None)
assert title_2 == driver.title
def testSwitchFrameByName(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(driver.find_element_by_name("third"))
checkbox = driver.find_element_by_id("checky")
checkbox.click()
checkbox.submit()
def testIsEnabled(driver, pages):
pages.load("formPage.html")
elem = driver.find_element_by_xpath("//input[@id='working']")
assert elem.is_enabled()
elem = driver.find_element_by_xpath("//input[@id='notWorking']")
assert not elem.is_enabled()
def testIsSelectedAndToggle(driver, pages):
pages.load("formPage.html")
elem = driver.find_element_by_id("multi")
option_elems = elem.find_elements_by_xpath("option")
assert option_elems[0].is_selected()
option_elems[0].click()
assert not option_elems[0].is_selected()
option_elems[0].click()
assert option_elems[0].is_selected()
assert option_elems[2].is_selected()
def testNavigate(driver, pages):
pages.load("formPage.html")
driver.find_element_by_id("imageButton").submit()
WebDriverWait(driver, 3).until(EC.title_is("We Arrive Here"))
driver.back()
assert "We Leave From Here" == driver.title
driver.forward()
assert "We Arrive Here" == driver.title
def testGetAttribute(driver, pages):
url = pages.url('xhtmlTest.html')
driver.get(url)
elem = driver.find_element_by_id("id1")
attr = elem.get_attribute("href")
assert '{0}#'.format(url) == attr
def testGetImplicitAttribute(driver, pages):
pages.load("nestedElements.html")
elems = driver.find_elements_by_xpath("//option")
assert len(elems) >= 3
for i, elem in enumerate(elems[:3]):
assert i == int(elem.get_attribute("index"))
def testExecuteSimpleScript(driver, pages):
pages.load("xhtmlTest.html")
title = driver.execute_script("return document.title;")
assert "XHTML Test Page" == title
def testExecuteScriptAndReturnElement(driver, pages):
pages.load("xhtmlTest.html")
elem = driver.execute_script("return document.getElementById('id1');")
assert "WebElement" in str(type(elem))
def testExecuteScriptWithArgs(driver, pages):
pages.load("xhtmlTest.html")
result = driver.execute_script("return arguments[0] == 'fish' ? 'fish' : 'not fish';", "fish")
assert "fish" == result
def testExecuteScriptWithMultipleArgs(driver, pages):
pages.load("xhtmlTest.html")
result = driver.execute_script(
"return arguments[0] + arguments[1]", 1, 2)
assert 3 == result
def testExecuteScriptWithElementArgs(driver, pages):
pages.load("javascriptPage.html")
button = driver.find_element_by_id("plainButton")
result = driver.execute_script("arguments[0]['flibble'] = arguments[0].getAttribute('id'); return arguments[0]['flibble'];", button)
assert "plainButton" == result
def testFindElementsByPartialLinkText(driver, pages):
pages.load("xhtmlTest.html")
elem = driver.find_element_by_partial_link_text("new window")
elem.click()
def testIsElementDisplayed(driver, pages):
pages.load("javascriptPage.html")
visible = driver.find_element_by_id("displayed").is_displayed()
not_visible = driver.find_element_by_id("hidden").is_displayed()
assert visible
assert not not_visible
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/466')
def testMoveWindowPosition(driver, pages):
pages.load("blank.html")
loc = driver.get_window_position()
# note can't test 0,0 since some OS's dont allow that location
# because of system toolbars
new_x = 50
new_y = 50
if loc['x'] == new_x:
new_x += 10
if loc['y'] == new_y:
new_y += 10
driver.set_window_position(new_x, new_y)
loc = driver.get_window_position()
assert loc['x'] == new_x
assert loc['y'] == new_y
def testChangeWindowSize(driver, pages):
pages.load("blank.html")
size = driver.get_window_size()
newSize = [600, 600]
if size['width'] == 600:
newSize[0] = 500
if size['height'] == 600:
newSize[1] = 500
driver.set_window_size(newSize[0], newSize[1])
size = driver.get_window_size()
assert size['width'] == newSize[0]
assert size['height'] == newSize[1]
@pytest.mark.xfail_marionette(
raises=WebDriverException)
def testGetLogTypes(driver, pages):
pages.load("blank.html")
assert isinstance(driver.log_types, list)
@pytest.mark.xfail_marionette(
raises=WebDriverException)
def testGetLog(driver, pages):
pages.load("blank.html")
for log_type in driver.log_types:
log = driver.get_log(log_type)
assert isinstance(log, list)
| apache-2.0 |
toshywoshy/ansible | test/units/modules/storage/netapp/test_na_ontap_vscan_scanner_pool.py | 38 | 6534 | # (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
''' unit tests for Ansible module: na_ontap_vscan_scanner_pool '''
from __future__ import print_function
import json
import pytest
from units.compat import unittest
from units.compat.mock import patch, Mock
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
import ansible.module_utils.netapp as netapp_utils
from ansible.modules.storage.netapp.na_ontap_vscan_scanner_pool \
import NetAppOntapVscanScannerPool as scanner_module # module under test
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class MockONTAPConnection(object):
''' mock server connection to ONTAP host '''
def __init__(self, kind=None, data=None):
''' save arguments '''
self.kind = kind
self.params = data
self.xml_in = None
self.xml_out = None
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
''' mock invoke_successfully returning xml data '''
self.xml_in = xml
if self.kind == 'scanner':
xml = self.build_scanner_pool_info(self.params)
self.xml_out = xml
return xml
@staticmethod
def build_scanner_pool_info(sanner_details):
xml = netapp_utils.zapi.NaElement('xml')
attributes = {
'num-records': 1,
'attributes-list': {
'vscan-scanner-pool-info': {
'scanner-pool': sanner_details['scanner_pool'],
'scanner-policy': sanner_details['scanner_policy']
}
}
}
xml.translate_struct(attributes)
return xml
class TestMyModule(unittest.TestCase):
''' Unit tests for na_ontap_job_schedule '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
self.mock_scanner = {
'state': 'present',
'scanner_pool': 'test_pool',
'vserver': 'test_vserver',
'hostnames': ['host1', 'host2'],
'privileged_users': ['domain\\admin', 'domain\\carchi8py'],
'scanner_policy': 'primary'
}
def mock_args(self):
return {
'state': self.mock_scanner['state'],
'scanner_pool': self.mock_scanner['scanner_pool'],
'vserver': self.mock_scanner['vserver'],
'hostnames': self.mock_scanner['hostnames'],
'privileged_users': self.mock_scanner['privileged_users'],
'hostname': 'test',
'username': 'test_user',
'password': 'test_pass!',
'scanner_policy': self.mock_scanner['scanner_policy']
}
def get_scanner_mock_object(self, kind=None):
scanner_obj = scanner_module()
scanner_obj.asup_log_for_cserver = Mock(return_value=None)
if kind is None:
scanner_obj.server = MockONTAPConnection()
else:
scanner_obj.server = MockONTAPConnection(kind='scanner', data=self.mock_scanner)
return scanner_obj
def test_module_fail_when_required_args_missing(self):
''' required arguments are reported as errors '''
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({})
scanner_module()
print('Info: %s' % exc.value.args[0]['msg'])
def test_get_nonexistent_scanner(self):
''' Test if get_scanner_pool returns None for non-existent job '''
set_module_args(self.mock_args())
result = self.get_scanner_mock_object().get_scanner_pool()
assert not result
def test_get_existing_scanner(self):
''' Test if get_scanner_pool returns None for non-existent job '''
set_module_args(self.mock_args())
result = self.get_scanner_mock_object('scanner').get_scanner_pool()
assert result
def test_successfully_create(self):
set_module_args(self.mock_args())
with pytest.raises(AnsibleExitJson) as exc:
self.get_scanner_mock_object().apply()
assert exc.value.args[0]['changed']
def test_create_idempotency(self):
set_module_args(self.mock_args())
with pytest.raises(AnsibleExitJson) as exc:
self.get_scanner_mock_object('scanner').apply()
assert not exc.value.args[0]['changed']
def test_apply_policy(self):
data = self.mock_args()
data['scanner_policy'] = 'secondary'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_scanner_mock_object('scanner').apply()
assert exc.value.args[0]['changed']
def test_successfully_delete(self):
data = self.mock_args()
data['state'] = 'absent'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_scanner_mock_object('scanner').apply()
assert exc.value.args[0]['changed']
def test_delete_idempotency(self):
data = self.mock_args()
data['state'] = 'absent'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_scanner_mock_object().apply()
assert not exc.value.args[0]['changed']
| gpl-3.0 |
mpatacchiola/pyERA | pyERA/utils.py | 1 | 2520 | #!/usr/bin/python
import numpy as np
class ExponentialDecay:
def __init__(self, starter_value, decay_step, decay_rate, staircase=False):
"""Function that applyes an exponential decay to a value.
@param starting_value the value to decay
@param global_step the global step to use for decay (positive integer)
@param decay_step (positive integer)
@param decay_rate
"""
self.starter_value = starter_value
self.decay_step = decay_step
self.decay_rate = decay_rate
self.staircase = staircase
self.last_value = starter_value
def return_decayed_value(self, global_step):
"""Returns the decayed value.
decayed_value = starting_value * decay_rate ^ (global_step / decay_steps)
@param global_step the global step to use for decay (positive integer)
"""
if(self.staircase == False):
decayed_value = self.last_value * np.power(self.decay_rate, (global_step / self.decay_step))
self.last_value = decayed_value
return decayed_value
else:
if(global_step % self.decay_step == 0):
decayed_value = self.starter_value * np.power(self.decay_rate, (global_step / self.decay_step))
self.last_value = decayed_value
return decayed_value
else:
return self.last_value
class LinearDecay:
def __init__(self, starter_value, decay_rate, allow_negative=True):
"""Function that applyes an exponential decay to a value.
@param starting_value the value to decay
@param decay_rate
"""
self.starter_value = starter_value
self.decay_rate = decay_rate
self.allow_negative = allow_negative
def return_decayed_value(self, global_step):
"""Returns the decayed value.
decayed_value = starting_value * decay_rate ^ (global_step / decay_steps)
@param global_step the global step to use for decay (positive integer)
"""
decayed_value = self.starter_value - (self.decay_rate * global_step)
if(self.allow_negative == False and decayed_value < 0): return 0.0
else: return decayed_value
class Normalizer:
def softmax(x):
"""Compute softmax values for each sets of scores in the input vector.
@param x is the input vector/matrix to normalize
"""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
| mit |
devclone/enigma2-9f38fd6 | tools/svg2skin.py | 98 | 2458 | #!/usr/bin/python
# don't expect too much.
# this is a really simple&stupid svg parser, which will use rectangles
# and text fields to produce <widget> snippets for a skin.
# use object "id" fields for source names if you want.
# extracting font information is buggy.
# if you want text fields, please use flow text regions, instead of simple
# text. otherwise, width and height are unknown.
#
# tested only with a single inkscape-generated SVG.
import sys
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
def getattrs(attrs, *a):
res = []
for x in a:
res.append(float(attrs[x]))
return res
def parsedict(attrs):
if not attrs:
return []
d = attrs.split(';')
r = { }
for x in d:
(key, val) = x.split(':')
r[key] = val
return r
def px(x):
return int(float(x[:-2]) + .5)
def contains(box_o, box_i):
return box_o[0] <= box_i[0] and box_o[1] <= box_i[1] and box_o[2] >= box_i[2] and box_o[3] >= box_i[3]
class parseXML(ContentHandler):
def __init__(self):
self.isPointsElement, self.isReboundsElement = 0, 0
self.bbox = None
self.find_bbox = False
self.flow = None
def startElement(self, name, attrs):
if self.find_bbox:
if name != "rect":
return
box = getattrs(attrs, "x", "y", "width", "height")
if not self.bbox or contains(box, self.bbox):
self.bbox = box
return
if name == "rect":
(x, y, width, height) = getattrs(attrs, "x", "y", "width", "height")
x -= self.bbox[0]
y -= self.bbox[1]
id = attrs["id"]
if self.flow:
id = self.flow
self.flow = None
styles = parsedict(attrs.get("style", ""))
elif name == "text":
(x, y) = getattrs(attrs, "x", "y")
x -= self.bbox[0]
y -= self.bbox[1]
width, height = 0, 0
styles = parsedict(attrs["style"])
id = attrs["id"]
elif name == "flowRoot":
self.flow = attrs["id"]
return
else:
return
if "font-size" in styles:
font = ' font="Regular;%d"' % px(styles["font-size"])
else:
font = ""
print """\t\t<widget source="%s" render="Label" position="%d,%d" size="%d,%d" %s />""" % (id, x, y, width, height, font)
parser = make_parser()
contentHandler = parseXML()
parser.setContentHandler(contentHandler)
contentHandler.find_bbox = True
parser.parse(sys.argv[1])
bboxi = tuple([int(x) for x in contentHandler.bbox])
contentHandler.find_bbox = False
print '\t<screen name="" position="%d,%d" size="%d,%d" title="">' % bboxi
parser.parse(sys.argv[1])
print '\t</screen>'
| gpl-2.0 |
t-zuehlsdorff/wesnoth | data/tools/wesnoth/wmlgrammar.py | 18 | 5139 | """
wmlgrammar -- parses a given schema into a more usable form
"""
import collections
import re
REQUIRED = 1
OPTIONAL = 2
REPEATED = 3
FORBIDDEN = 4
class Grammar(object):
def __init__(self, schema):
schema = schema.get_first("schema")
self.datatypes = {
"boolean": re.compile("^(yes|no|true|false|on|off)$"),
# "character" : re.compile("^.$"),
"float": re.compile("^(\\+|-)?[0-9]+(\.[0-9]*)?$"),
"integer": re.compile("^(\\+|-)?[0-9]+$"),
"string": re.compile(".*"),
"tstring": re.compile(".*"),
}
self.elements = {}
self.categories = collections.defaultdict(list)
for type in schema.get_all_text():
match = parse_match(type.data)
self.datatypes.update({type.name: match})
for element in schema.get_all_subs():
node = Node(element, self.datatypes)
self.elements.update({node.name: node})
for element in [el for el in self.elements.values() if el.parent]:
element.inherit(self.elements[element.parent])
# categories
for element in [el for el in self.elements.values() if el.category]:
self.categories[element.category].append(element)
def get_element(self, name):
return self.elements[name]
def get_datatype(self, name):
return self.datatypes[name]
def get_category(self, name):
return self.categories.get(name, [])
class Node(object):
def __init__(self, schema, datatypes):
self.name = schema.name
self.elements = set([])
self.ext_elements = [] # Ugh, do we really want to do this?
self.attributes = set()
self.parent = None
self.description = None
self.category = None
for item in schema.get_all_text():
if item.name[0] == '_':
self.elements.add(Element(item))
else:
self.attributes.add(Attribute(item, datatypes))
for item in schema.get_all_subs():
if item.name == "element":
print "[element] found in schema, not parsing yet"
# self.ext_elements...
elif item.name == "description":
self.description = item.get_text("text")
self.category = item.get_text("category")
else:
raise Exception("Unknown element [%s] encountered in grammar for [%s]" % (item.name, self.name))
if ':' in self.name:
self.name, self.parent = self.name.split(':', 1)
def inherit(self, other):
assert self.parent == other.name
self.elements.update(other.elements)
self.attributes.update(other.attributes)
self.parent = None
def get_attributes(self):
return self.attributes
def get_elements(self):
return self.elements
class Element(object):
def __init__(self, schema):
first, second = schema.data.split(" ", 1)
self.name = schema.name[1:]
self.freq = parse_frequency(first)
self.subname = second
def match(self, name):
return self.name == name
def __hash__(self):
return hash(self.name)
def __cmp__(self, other):
return (isinstance(other, type(self)) or isinstance(self, type(other))) and cmp(self.name, other.name)
class ExtElement(Element):
def __init__(self, schema):
self.re = parse_match(schema.get_text("match").data)
self.freq = parse_frequency(schema.get_text("freq").data)
self.subname = schema.get_text("name").data
def match(self, name):
return bool(self.re.match(name))
class Attribute(object):
def __init__(self, schema, datatypes):
first, second = schema.data.split(" ", 1)
if second not in datatypes:
raise Exception("Unknown datatype '%s'" % second)
self.name = schema.name
self.freq = parse_frequency(first)
self.type = second
self.re = datatypes[second]
def match(self, name):
return self.name == name
def validate(self, value):
return bool(self.re.match(value))
def __hash__(self):
return hash(self.name)
def __cmp__(self, other):
return (isinstance(other, type(self)) or isinstance(self, type(other))) and cmp(self.name, other.name)
def parse_frequency(string):
if string == "required":
return REQUIRED
elif string == "optional":
return OPTIONAL
elif string == "repeated":
return REPEATED
elif string == "forbidden":
return FORBIDDEN
else:
raise Exception("Unknown frequency '%s'" % string)
def parse_match(string):
(matchtype, matchtext) = string.split(" ", 1)
if matchtype == "re":
match = re.compile(matchtext)
elif matchtype == "enum":
match = re.compile("^(" + matchtext.replace(',', '|') + ")$")
else:
raise Exception("Unknown datatype encountered in %s=\"%s\": '%s'" % (type.name, type.data, matchtype))
return match
# vim: tabstop=4: shiftwidth=4: expandtab: softtabstop=4: autoindent:
| gpl-2.0 |
fidomason/kbengine | kbe/res/scripts/common/Lib/lib2to3/fixes/fix_filter.py | 164 | 2102 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that changes filter(F, X) into list(filter(F, X)).
We avoid the transformation if the filter() call is directly contained
in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or
for V in <>:.
NOTE: This is still not correct if the original code was depending on
filter(F, X) to return a string if X is a string and a tuple if X is a
tuple. That would require type inference, which we don't do. Let
Python 2.6 figure it out.
"""
# Local imports
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, ListComp, in_special_context
class FixFilter(fixer_base.ConditionalFix):
BM_compatible = True
PATTERN = """
filter_lambda=power<
'filter'
trailer<
'('
arglist<
lambdef< 'lambda'
(fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any
>
','
it=any
>
')'
>
>
|
power<
'filter'
trailer< '(' arglist< none='None' ',' seq=any > ')' >
>
|
power<
'filter'
args=trailer< '(' [any] ')' >
>
"""
skip_on = "future_builtins.filter"
def transform(self, node, results):
if self.should_skip(node):
return
if "filter_lambda" in results:
new = ListComp(results.get("fp").clone(),
results.get("fp").clone(),
results.get("it").clone(),
results.get("xp").clone())
elif "none" in results:
new = ListComp(Name("_f"),
Name("_f"),
results["seq"].clone(),
Name("_f"))
else:
if in_special_context(node):
return None
new = node.clone()
new.prefix = ""
new = Call(Name("list"), [new])
new.prefix = node.prefix
return new
| lgpl-3.0 |
BenoitPerrot/oce | test/gtest-1.7.0/test/gtest_env_var_test.py | 2408 | 3487 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs gtest_env_var_test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
TestFlag('filter', 'FooTest.Bar', '*')
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
if __name__ == '__main__':
gtest_test_utils.Main()
| lgpl-2.1 |
rudhir-upretee/Sumo_With_Netsim | tools/projects/TLS-Evaluationsszenarien/evaluator.py | 6 | 5884 | import xml.dom.minidom as dom
from genRoutes import *
import subprocess, sys
from changeNet import *
from pylab import *
import libxml2
durWE = []
durNS = []
routeLenWE = []
routeLenNS = []
minTravelTime = 69.0
def evalTrips(begin, N):
global durWE, durNS
doc = libxml2.parseFile('tripinfo.txt')
lastTWE = max([int(d.content) for d in doc.xpathEval("/tripinfos/tripinfo[@vtype='carRight']/@depart")])
lastTNS = max([int(d.content) for d in doc.xpathEval("/tripinfos/tripinfo[@vtype='carDown']/@depart")])
nWE = len(doc.xpathEval("/tripinfos/tripinfo[@vtype='carRight']"))
nNS = len(doc.xpathEval("/tripinfos/tripinfo[@vtype='carDown']"))
durWE = [int(d.content) for d in doc.xpathEval("/tripinfos/tripinfo[@vtype='carRight'][@depart>%d]/@duration" % begin)]
durNS = [int(d.content) for d in doc.xpathEval("/tripinfos/tripinfo[@vtype='carDown'][@depart>%d]/@duration" % begin)]
doc.freeDoc()
inputNS = nNS * 3600.0 / N
inputWE = nWE * 3600.0 / N
flowNS = nNS * 3600.0 / lastTNS
flowWE = nWE * 3600.0 / lastTWE
return inputWE, inputNS, flowWE, flowNS
def getAvgDelayWE():
# avgRouteLen = sum(routeLenWE) / len(routeLenWE)
# minTravelTime = avgRouteLen / (16.67 - 0.5 * 0.5 * 2.6) # maxspeed - 0.5 * sigma * accel
return (1.0 * sum(durWE)/len(durWE) - minTravelTime)
def getAvgDelayNS():
# avgRouteLen = sum(routeLenNS) / len(routeLenNS)
# minTravelTime = avgRouteLen / (16.67 - 0.5 * 0.5 * 2.6) # maxspeed - 0.5 * sigma * accel
return (1.0 * sum(durNS)/len(durNS) - minTravelTime)
def getAvgDelay():
# avgRouteLen = (sum(routeLenWE)+sum(routeLenNS)) / (len(routeLenWE)+len(routeLenNS))
# minTravelTime = avgRouteLen / (16.67 - 0.5 * 0.5 * 2.6) # maxspeed - 0.5 * sigma * accel
return (1.0 * (sum(durWE)+sum(durNS))/(len(durWE)+len(durNS)) - minTravelTime)
def getAvgGreenTime(intergreen1, intergreen2):
doc = libxml2.parseFile('tls.out')
lNS = doc.xpathEval("count(/tls-states/tlsstate[@phase='0'])")
lWE = doc.xpathEval("count(/tls-states/tlsstate[@phase='2'])")
lIG1 = doc.xpathEval("count(/tls-states/tlsstate[@phase='1'])")
lIG2 = doc.xpathEval("count(/tls-states/tlsstate[@phase='3'])")
doc.freeDoc()
greenNS = lNS / ceil((lIG1 / intergreen1))
greenWE = lWE / ceil((lIG2 / intergreen2))
return greenWE, greenNS
def getSaturationFlow():
genRoutes(300, 'u', 3600, 'u', 0, 'u', 0, 'u', 0)
gui = False
setTLType('sumoConfig\cross_sat\cross_sat.net.xml', 'static')
changePhaseDurations('sumoConfig\cross_sat\cross_sat.net.xml', 1800, 6, 300, 6)
if gui:
sumoExe = "guisim"
sumoConfig = "sumoConfig\cross_sat\cross_sat.sumocfg"
sumoProcess = subprocess.Popen("%s -c %s" % (sumoExe, sumoConfig), shell=True, stdout=sys.stdout)
else:
sumoExe = "sumo"
sumoConfig = "sumoConfig\cross_sat\cross_sat.sumocfg"
sumoProcess = subprocess.Popen("%s -c %s --no-duration-log --no-step-log --time-to-teleport 10000000" % (sumoExe, sumoConfig), shell=True, stdout=sys.stdout)
sumoProcess.wait()
doc = libxml2.parseFile('sumoConfig\cross_sat\cross.out')
veh = [int(d.content) for d in doc.xpathEval("/detector/interval[@id='SAT']/@nVehEntered")]
doc.freeDoc()
sumVeh = [sum(veh[1:(i+1)]) for i in range(len(veh))]
xSat = range(323,538)
ySat = sumVeh[323:538]
[aSat,b] = linreg(xSat,ySat)
print 'Saturation flow: ', aSat * 3600
ySatN = [aSat*xSat[i]+b for i in range(len(xSat))]
x = range(700,850)
y = sumVeh[700:850]
[a,b] = linreg(x,y)
print 'Saturation flow 2: ', a * 3600
ynew = [a*x[i]+b for i in range(len(x))]
# plot(range(len(veh)), sumVeh, xSat, ySatN, x, ynew)
# legend(('Anzahl Fahrzeuge', 'saturation flow = %f' % (aSat * 3600), 'flow = %f' % (a * 3600)))
# xlabel('Time t')
# ylabel('# vehicles')
#
# show()
return (aSat * 3600)
def getMinTravelTime():
genRoutes(1, 'u', 1, 'u', 0, 'u', 0, 'u', 0)
gui = False
setTLType('sumoConfig\one_intersection\cross.net.xml', 'static')
changePhaseDurations('sumoConfig\one_intersection\cross.net.xml', 1000, 6, 1, 6)
if gui:
sumoExe = "guisim"
sumoConfig = "sumoConfig\one_intersection\cross.sumocfg"
sumoProcess = subprocess.Popen("%s -c %s" % (sumoExe, sumoConfig), shell=True, stdout=sys.stdout)
else:
sumoExe = "sumo"
sumoConfig = "sumoConfig\one_intersection\cross.sumocfg"
sumoProcess = subprocess.Popen("%s -c %s --no-duration-log --no-step-log --time-to-teleport 10000000" % (sumoExe, sumoConfig), shell=True, stdout=sys.stdout)
sumoProcess.wait()
doc = libxml2.parseFile('tripinfo.txt')
travelDur = int(doc.xpathEval("/tripinfos/tripinfo/@duration")[0].content)
doc.freeDoc()
return travelDur
# Webster formula
def getOptGreenTimes(satFlowRateWE, satFlowRateNS, intergreenLengthWE, intergreenLengthNS):
L = intergreenLengthWE + intergreenLengthNS
Y = satFlowRateWE + satFlowRateNS
C = (5 + 1.5 * L) / (1 - Y)
GWE = (C - L) * satFlowRateWE / Y
GNS = (C - L) * satFlowRateNS / Y
return GWE, GNS
# linear regression
def linreg(X, Y):
"""
Summary
Linear regression of y = ax + b
Usage
real, real = linreg(list, list)
Returns coefficients to the regression line "y=ax+b" from x[] and y[]
"""
if len(X) != len(Y): raise ValueError, 'unequal length'
N = len(X)
Sx = Sy = Sxx = Syy = Sxy = 0.0
for x, y in map(None, X, Y):
Sx = Sx + x
Sy = Sy + y
Sxx = Sxx + x*x
Syy = Syy + y*y
Sxy = Sxy + x*y
det = Sxx * N - Sx * Sx
a, b = (Sxy * N - Sy * Sx)/det, (Sxx * Sy - Sx * Sxy)/det
return a, b
| gpl-3.0 |
tanglei528/nova | nova/scheduler/filters/core_filter.py | 2 | 3994 | # Copyright (c) 2011 OpenStack Foundation
# Copyright (c) 2012 Justin Santa Barbara
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from nova import db
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.scheduler import filters
LOG = logging.getLogger(__name__)
cpu_allocation_ratio_opt = cfg.FloatOpt('cpu_allocation_ratio',
default=16.0,
help='Virtual CPU to physical CPU allocation ratio which affects '
'all CPU filters. This configuration specifies a global ratio '
'for CoreFilter. For AggregateCoreFilter, it will fall back to '
'this configuration value if no per-aggregate setting found.')
CONF = cfg.CONF
CONF.register_opt(cpu_allocation_ratio_opt)
class BaseCoreFilter(filters.BaseHostFilter):
def _get_cpu_allocation_ratio(self, host_state, filter_properties):
raise NotImplementedError
def host_passes(self, host_state, filter_properties):
"""Return True if host has sufficient CPU cores."""
instance_type = filter_properties.get('instance_type')
if not instance_type:
return True
if not host_state.vcpus_total:
# Fail safe
LOG.warning(_("VCPUs not set; assuming CPU collection broken"))
return True
instance_vcpus = instance_type['vcpus']
cpu_allocation_ratio = self._get_cpu_allocation_ratio(host_state,
filter_properties)
vcpus_total = host_state.vcpus_total * cpu_allocation_ratio
# Only provide a VCPU limit to compute if the virt driver is reporting
# an accurate count of installed VCPUs. (XenServer driver does not)
if vcpus_total > 0:
host_state.limits['vcpu'] = vcpus_total
return (vcpus_total - host_state.vcpus_used) >= instance_vcpus
class CoreFilter(BaseCoreFilter):
"""CoreFilter filters based on CPU core utilization."""
def _get_cpu_allocation_ratio(self, host_state, filter_properties):
return CONF.cpu_allocation_ratio
class AggregateCoreFilter(BaseCoreFilter):
"""AggregateCoreFilter with per-aggregate CPU subscription flag.
Fall back to global cpu_allocation_ratio if no per-aggregate setting found.
"""
def _get_cpu_allocation_ratio(self, host_state, filter_properties):
context = filter_properties['context']
# TODO(uni): DB query in filter is a performance hit, especially for
# system with lots of hosts. Will need a general solution here to fix
# all filters with aggregate DB call things.
metadata = db.aggregate_metadata_get_by_host(
context, host_state.host, key='cpu_allocation_ratio')
aggregate_vals = metadata.get('cpu_allocation_ratio', set())
num_values = len(aggregate_vals)
if num_values == 0:
return CONF.cpu_allocation_ratio
if num_values > 1:
LOG.warning(_("%(num_values)d ratio values found, "
"of which the minimum value will be used."),
{'num_values': num_values})
try:
ratio = float(min(aggregate_vals))
except ValueError as e:
LOG.warning(_("Could not decode cpu_allocation_ratio: '%s'"), e)
ratio = CONF.cpu_allocation_ratio
return ratio
| apache-2.0 |
odubno/microblog | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/langhungarianmodel.py | 2763 | 12536 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin2_HungarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 71, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,
175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 75,198,199,200,201,202,203,204,205,
79,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 81,222, 78,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 69, 63,239,240,241,
82, 14, 74,242, 70, 80,243, 72,244, 15, 83, 77, 84, 30, 76, 85,
245,246,247, 25, 73, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
win1250HungarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 72, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,
177,178,179,180, 78,181, 69,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 76,198,199,200,201,202,203,204,205,
81,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 83,222, 80,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 70, 63,239,240,241,
84, 14, 75,242, 71, 82,243, 73,244, 15, 85, 79, 86, 30, 77, 87,
245,246,247, 25, 74, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 94.7368%
# first 1024 sequences:5.2623%
# rest sequences: 0.8894%
# negative sequences: 0.0009%
HungarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,2,3,3,1,1,2,2,2,2,2,1,2,
3,2,2,3,3,3,3,3,2,3,3,3,3,3,3,1,2,3,3,3,3,2,3,3,1,1,3,3,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
3,2,1,3,3,3,3,3,2,3,3,3,3,3,1,1,2,3,3,3,3,3,3,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,2,3,3,3,1,3,3,3,3,3,1,3,3,2,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,3,3,2,3,3,2,2,3,2,3,2,0,3,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,3,3,3,1,2,3,2,2,3,1,2,3,3,2,2,0,3,3,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,3,2,3,3,3,3,2,3,3,3,3,0,2,3,2,
0,0,0,1,1,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,3,3,2,1,3,2,2,3,2,1,3,2,2,1,0,3,3,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,2,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,3,2,2,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,1,3,3,3,3,3,2,2,1,3,3,3,0,1,1,2,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,1,3,2,2,2,3,1,1,3,3,1,1,0,3,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,2,3,3,3,3,3,1,2,3,2,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,1,3,3,2,2,1,3,3,3,1,1,3,1,2,3,2,3,2,2,2,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,2,2,3,2,1,0,3,2,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,1,0,3,3,3,3,0,2,3,0,0,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,2,3,3,0,1,2,3,2,3,2,2,3,2,1,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,1,2,3,3,3,2,1,2,3,3,2,2,2,3,2,3,3,1,3,3,1,1,0,2,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,2,2,2,2,3,3,3,1,1,1,3,3,1,1,3,1,1,3,2,1,2,3,1,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,1,2,1,1,3,3,1,1,1,1,3,3,1,1,2,2,1,2,1,1,2,2,1,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,1,2,1,1,3,3,1,0,1,1,3,3,2,0,1,1,2,3,1,0,2,2,1,0,0,1,3,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,1,3,3,3,3,3,1,2,3,2,3,3,2,1,1,3,2,3,2,1,2,2,0,1,2,1,0,0,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,2,2,2,3,1,2,2,1,1,3,3,0,3,2,1,2,3,2,1,3,3,1,1,0,2,1,3,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,2,3,3,3,2,1,1,3,3,1,1,1,2,2,3,2,3,2,2,2,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,0,3,3,3,3,3,0,0,3,3,2,3,0,0,0,2,3,3,1,0,1,2,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,2,3,3,3,3,3,1,2,3,3,2,2,1,1,0,3,3,2,2,1,2,2,1,0,2,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,2,1,3,1,2,3,3,2,2,1,1,2,2,1,1,1,1,3,2,1,1,1,1,2,1,0,1,2,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
2,3,3,1,1,1,1,1,3,3,3,0,1,1,3,3,1,1,1,1,1,2,2,0,3,1,1,2,0,2,1,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,1,0,1,2,1,2,2,0,1,2,3,1,2,0,0,0,2,1,1,1,1,1,2,0,0,1,1,0,0,0,0,
1,2,1,2,2,2,1,2,1,2,0,2,0,2,2,1,1,2,1,1,2,1,1,1,0,1,0,0,0,1,1,0,
1,1,1,2,3,2,3,3,0,1,2,2,3,1,0,1,0,2,1,2,2,0,1,1,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,3,3,2,2,1,0,0,3,2,3,2,0,0,0,1,1,3,0,0,1,1,0,0,2,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,1,0,1,3,2,3,1,1,1,0,1,1,1,1,1,3,1,0,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,2,2,2,1,0,1,2,3,3,2,0,0,0,2,1,1,1,2,1,1,1,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,2,1,1,1,1,1,1,0,1,1,1,0,0,1,1,
3,2,2,1,0,0,1,1,2,2,0,3,0,1,2,1,1,0,0,1,1,1,0,1,1,1,1,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,1,1,1,1,1,2,1,1,1,2,3,1,1,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,3,3,1,0,0,1,2,2,1,0,0,0,0,2,0,0,1,1,1,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,1,0,1,1,0,1,1,1,0,1,2,1,1,0,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,2,2,0,0,0,0,1,2,2,0,0,0,0,1,0,0,1,1,0,0,2,0,1,0,
2,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
3,2,2,0,1,0,1,0,2,3,2,0,0,1,2,2,1,0,0,1,1,1,0,0,2,1,0,1,2,2,1,1,
2,1,1,1,1,1,1,2,1,1,1,1,1,1,0,2,1,0,1,1,0,1,1,1,0,1,1,2,1,1,0,1,
2,2,2,0,0,1,0,0,2,2,1,1,0,0,2,1,1,0,0,0,1,2,0,0,2,1,0,0,2,1,1,1,
2,1,1,1,1,2,1,2,1,1,1,2,2,1,1,2,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,
1,2,3,0,0,0,1,0,3,2,1,0,0,1,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,2,1,
1,1,0,0,0,1,0,1,1,1,1,1,2,0,0,1,0,0,0,2,0,0,1,1,1,1,1,1,1,1,0,1,
3,0,0,2,1,2,2,1,0,0,2,1,2,2,0,0,0,2,1,1,1,0,1,1,0,0,1,1,2,0,0,0,
1,2,1,2,2,1,1,2,1,2,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,0,0,1,
1,3,2,0,0,0,1,0,2,2,2,0,0,0,2,2,1,0,0,0,0,3,1,1,1,1,0,0,2,1,1,1,
2,1,0,1,1,1,0,1,1,1,1,1,1,1,0,2,1,0,0,1,0,1,1,0,1,1,1,1,1,1,0,1,
2,3,2,0,0,0,1,0,2,2,0,0,0,0,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,1,0,
2,1,1,1,1,2,1,2,1,2,0,1,1,1,0,2,1,1,1,2,1,1,1,1,0,1,1,1,1,1,0,1,
3,1,1,2,2,2,3,2,1,1,2,2,1,1,0,1,0,2,2,1,1,1,1,1,0,0,1,1,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,0,0,0,0,0,2,2,0,0,0,0,2,2,1,0,0,0,1,1,0,0,1,2,0,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,1,1,0,1,2,1,1,1,0,1,
1,0,0,1,2,3,2,1,0,0,2,0,1,1,0,0,0,1,1,1,1,0,1,1,0,0,1,0,0,0,0,0,
1,2,1,2,1,2,1,1,1,2,0,2,1,1,1,0,1,2,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,3,2,0,0,0,0,0,1,1,2,1,0,0,1,1,1,0,0,0,0,2,0,0,1,1,0,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,0,1,1,1,1,0,2,1,1,1,1,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,1,1,1,0,2,2,2,0,0,0,3,2,1,0,0,0,1,1,0,0,1,1,0,1,1,1,0,0,
1,1,0,1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,0,0,1,1,1,0,1,0,1,
2,1,0,2,1,1,2,2,1,1,2,1,1,1,0,0,0,1,1,0,1,1,1,1,0,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,1,0,
1,2,3,0,0,0,1,0,2,2,0,0,0,0,2,2,0,0,0,0,0,1,0,0,1,0,0,0,2,0,1,0,
2,1,1,1,1,1,0,2,0,0,0,1,2,1,1,1,1,0,1,2,0,1,0,1,0,1,1,1,0,1,0,1,
2,2,2,0,0,0,1,0,2,1,2,0,0,0,1,1,2,0,0,0,0,1,0,0,1,1,0,0,2,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,1,0,2,2,2,0,0,0,1,1,0,0,0,0,0,1,1,0,2,0,0,1,1,1,0,1,
1,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,0,0,1,1,0,1,0,1,1,1,1,1,0,0,0,1,
1,0,0,1,0,1,2,1,0,0,1,1,1,2,0,0,0,1,1,0,1,0,1,1,0,0,1,0,0,0,0,0,
0,2,1,2,1,1,1,1,1,2,0,2,0,1,1,0,1,2,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,0,1,2,0,0,1,1,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,0,0,0,2,1,0,1,
2,2,1,1,1,1,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,0,0,1,1,0,0,0,0,2,1,0,0,0,0,0,2,0,0,2,2,0,0,2,0,0,1,
2,1,1,1,1,1,1,1,0,1,1,0,1,1,0,1,0,0,0,1,1,1,1,0,0,1,1,1,1,0,0,1,
1,1,2,0,0,3,1,0,2,1,1,1,0,0,1,1,1,0,0,0,1,1,0,0,0,1,0,0,1,0,1,0,
1,2,1,0,1,1,1,2,1,1,0,1,1,1,1,1,0,0,0,1,1,1,1,1,0,1,0,0,0,1,0,0,
2,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,2,0,0,0,
2,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,1,0,1,
2,1,1,1,2,1,1,1,0,1,1,2,1,0,0,0,0,1,1,1,1,0,1,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,1,1,1,1,0,0,1,1,2,1,0,0,0,1,1,0,0,0,1,1,0,0,1,0,1,0,0,0,
1,2,1,1,1,1,1,1,1,1,0,1,0,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,0,0,0,1,1,1,1,0,0,1,1,0,0,0,0,0,1,1,1,2,0,0,1,0,0,1,0,1,0,0,0,
0,1,1,1,1,1,1,1,1,2,0,1,1,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,0,0,2,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,1,0,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,0,0,1,1,0,1,0,1,0,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,1,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,0,1,0,0,1,1,0,1,0,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,0,1,0,0,1,0,1,0,1,1,1,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,0,0,0,1,1,1,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
)
Latin2HungarianModel = {
'charToOrderMap': Latin2_HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': True,
'charsetName': "ISO-8859-2"
}
Win1250HungarianModel = {
'charToOrderMap': win1250HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': True,
'charsetName': "windows-1250"
}
# flake8: noqa
| bsd-3-clause |
lisaglendenning/pynet | pavement.py | 1 | 1611 | # @copyright
# @license
# Paver project
# http://paver.github.com/
# Paver works a bit like Make or Rake.
# To use Paver, you run paver <taskname> and the paver command
# will look for a pavement.py file in the current directory
import sys, os
import paver.easy
import paver.setuputils
NAME = 'pynet'
def configure():
cwd = os.getcwd()
sys.path.insert(0, os.path.join(cwd, 'source'))
package = __import__(NAME)
del sys.path[0]
setup = dict(
version=package.__version__,
url=package.__url__,
author=package.__author__,
author_email=package.__author_email__,
license=package.__license__,
keywords=', '.join([repr(k) for k in package.__keywords__]),
install_requires=package.__requires__,
extras_require=package.__extras__,
)
return setup
paver.setuputils.setup(
name=NAME,
packages=paver.setuputils.find_packages('source'),
package_dir = {'':'source'},
**configure())
@paver.easy.task
@paver.easy.needs('paver.misctasks.generate_setup',
'paver.misctasks.minilib',)
def sdist():
"""Overrides sdist to make sure that our setup.py is generated."""
# Create distribution manifest
includes = ['setup.py', 'paver-minilib.zip']
lines = ['include %s' % ' '.join(includes),
'recursive-include source *.py',]
with open('MANIFEST.in', 'w') as f:
f.write('\n'.join(lines))
f.write('\n')
paver.easy.call_task('setuptools.command.sdist')
| mit |
Jamlum/pytomo | pytomo/dns/rdtypes/ANY/LOC.py | 2 | 12647 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import absolute_import
import cStringIO
import struct
from . import exception as dns_exception
from . import rdata as dns_rdata
_pows = (1L, 10L, 100L, 1000L, 10000L, 100000L, 1000000L, 10000000L,
100000000L, 1000000000L, 10000000000L)
def _exponent_of(what, desc):
exp = None
for i in xrange(len(_pows)):
if what // _pows[i] == 0L:
exp = i - 1
break
if exp is None or exp < 0:
raise dns_exception.SyntaxError("%s value out of bounds" % desc)
return exp
def _float_to_tuple(what):
if what < 0:
sign = -1
what *= -1
else:
sign = 1
what = long(round(what * 3600000))
degrees = int(what // 3600000)
what -= degrees * 3600000
minutes = int(what // 60000)
what -= minutes * 60000
seconds = int(what // 1000)
what -= int(seconds * 1000)
what = int(what)
return (degrees * sign, minutes, seconds, what)
def _tuple_to_float(what):
if what[0] < 0:
sign = -1
value = float(what[0]) * -1
else:
sign = 1
value = float(what[0])
value += float(what[1]) / 60.0
value += float(what[2]) / 3600.0
value += float(what[3]) / 3600000.0
return sign * value
def _encode_size(what, desc):
what = long(what);
exponent = _exponent_of(what, desc) & 0xF
base = what // pow(10, exponent) & 0xF
return base * 16 + exponent
def _decode_size(what, desc):
exponent = what & 0x0F
if exponent > 9:
raise dns_exception.SyntaxError("bad %s exponent" % desc)
base = (what & 0xF0) >> 4
if base > 9:
raise dns_exception.SyntaxError("bad %s base" % desc)
return long(base) * pow(10, exponent)
class LOC(dns_rdata.Rdata):
"""LOC record
@ivar latitude: latitude
@type latitude: (int, int, int, int) tuple specifying the degrees, minutes,
seconds, and milliseconds of the coordinate.
@ivar longitude: longitude
@type longitude: (int, int, int, int) tuple specifying the degrees,
minutes, seconds, and milliseconds of the coordinate.
@ivar altitude: altitude
@type altitude: float
@ivar size: size of the sphere
@type size: float
@ivar horizontal_precision: horizontal precision
@type horizontal_precision: float
@ivar vertical_precision: vertical precision
@type vertical_precision: float
@see: RFC 1876"""
__slots__ = ['latitude', 'longitude', 'altitude', 'size',
'horizontal_precision', 'vertical_precision']
def __init__(self, rdclass, rdtype, latitude, longitude, altitude,
size=1.0, hprec=10000.0, vprec=10.0):
"""Initialize a LOC record instance.
The parameters I{latitude} and I{longitude} may be either a 4-tuple
of integers specifying (degrees, minutes, seconds, milliseconds),
or they may be floating point values specifying the number of
degrees. The other parameters are floats."""
super(LOC, self).__init__(rdclass, rdtype)
if isinstance(latitude, int) or isinstance(latitude, long):
latitude = float(latitude)
if isinstance(latitude, float):
latitude = _float_to_tuple(latitude)
self.latitude = latitude
if isinstance(longitude, int) or isinstance(longitude, long):
longitude = float(longitude)
if isinstance(longitude, float):
longitude = _float_to_tuple(longitude)
self.longitude = longitude
self.altitude = float(altitude)
self.size = float(size)
self.horizontal_precision = float(hprec)
self.vertical_precision = float(vprec)
def to_text(self, origin=None, relativize=True, **kw):
if self.latitude[0] > 0:
lat_hemisphere = 'N'
lat_degrees = self.latitude[0]
else:
lat_hemisphere = 'S'
lat_degrees = -1 * self.latitude[0]
if self.longitude[0] > 0:
long_hemisphere = 'E'
long_degrees = self.longitude[0]
else:
long_hemisphere = 'W'
long_degrees = -1 * self.longitude[0]
text = "%d %d %d.%03d %s %d %d %d.%03d %s %0.2fm" % (
lat_degrees, self.latitude[1], self.latitude[2], self.latitude[3],
lat_hemisphere, long_degrees, self.longitude[1], self.longitude[2],
self.longitude[3], long_hemisphere, self.altitude / 100.0
)
if self.size != 1.0 or self.horizontal_precision != 10000.0 or \
self.vertical_precision != 10.0:
text += " %0.2fm %0.2fm %0.2fm" % (
self.size / 100.0, self.horizontal_precision / 100.0,
self.vertical_precision / 100.0
)
return text
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
latitude = [0, 0, 0, 0]
longitude = [0, 0, 0, 0]
size = 1.0
hprec = 10000.0
vprec = 10.0
latitude[0] = tok.get_int()
t = tok.get_string()
if t.isdigit():
latitude[1] = int(t)
t = tok.get_string()
if '.' in t:
(seconds, milliseconds) = t.split('.')
if not seconds.isdigit():
raise dns_exception.SyntaxError('bad latitude seconds value')
latitude[2] = int(seconds)
if latitude[2] >= 60:
raise dns_exception.SyntaxError('latitude seconds >= 60')
l = len(milliseconds)
if l == 0 or l > 3 or not milliseconds.isdigit():
raise dns_exception.SyntaxError('bad latitude milliseconds value')
if l == 1:
m = 100
elif l == 2:
m = 10
else:
m = 1
latitude[3] = m * int(milliseconds)
t = tok.get_string()
elif t.isdigit():
latitude[2] = int(t)
t = tok.get_string()
if t == 'S':
latitude[0] *= -1
elif t != 'N':
raise dns_exception.SyntaxError('bad latitude hemisphere value')
longitude[0] = tok.get_int()
t = tok.get_string()
if t.isdigit():
longitude[1] = int(t)
t = tok.get_string()
if '.' in t:
(seconds, milliseconds) = t.split('.')
if not seconds.isdigit():
raise dns_exception.SyntaxError('bad longitude seconds value')
longitude[2] = int(seconds)
if longitude[2] >= 60:
raise dns_exception.SyntaxError('longitude seconds >= 60')
l = len(milliseconds)
if l == 0 or l > 3 or not milliseconds.isdigit():
raise dns_exception.SyntaxError('bad longitude milliseconds value')
if l == 1:
m = 100
elif l == 2:
m = 10
else:
m = 1
longitude[3] = m * int(milliseconds)
t = tok.get_string()
elif t.isdigit():
longitude[2] = int(t)
t = tok.get_string()
if t == 'W':
longitude[0] *= -1
elif t != 'E':
raise dns_exception.SyntaxError('bad longitude hemisphere value')
t = tok.get_string()
if t[-1] == 'm':
t = t[0 : -1]
altitude = float(t) * 100.0 # m -> cm
token = tok.get().unescape()
if not token.is_eol_or_eof():
value = token.value
if value[-1] == 'm':
value = value[0 : -1]
size = float(value) * 100.0 # m -> cm
token = tok.get().unescape()
if not token.is_eol_or_eof():
value = token.value
if value[-1] == 'm':
value = value[0 : -1]
hprec = float(value) * 100.0 # m -> cm
token = tok.get().unescape()
if not token.is_eol_or_eof():
value = token.value
if value[-1] == 'm':
value = value[0 : -1]
vprec = float(value) * 100.0 # m -> cm
tok.get_eol()
return cls(rdclass, rdtype, latitude, longitude, altitude,
size, hprec, vprec)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
if self.latitude[0] < 0:
sign = -1
degrees = long(-1 * self.latitude[0])
else:
sign = 1
degrees = long(self.latitude[0])
milliseconds = (degrees * 3600000 +
self.latitude[1] * 60000 +
self.latitude[2] * 1000 +
self.latitude[3]) * sign
latitude = 0x80000000L + milliseconds
if self.longitude[0] < 0:
sign = -1
degrees = long(-1 * self.longitude[0])
else:
sign = 1
degrees = long(self.longitude[0])
milliseconds = (degrees * 3600000 +
self.longitude[1] * 60000 +
self.longitude[2] * 1000 +
self.longitude[3]) * sign
longitude = 0x80000000L + milliseconds
altitude = long(self.altitude) + 10000000L
size = _encode_size(self.size, "size")
hprec = _encode_size(self.horizontal_precision, "horizontal precision")
vprec = _encode_size(self.vertical_precision, "vertical precision")
wire = struct.pack("!BBBBIII", 0, size, hprec, vprec, latitude,
longitude, altitude)
file.write(wire)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(version, size, hprec, vprec, latitude, longitude, altitude) = \
struct.unpack("!BBBBIII", wire[current : current + rdlen])
if latitude > 0x80000000L:
latitude = float(latitude - 0x80000000L) / 3600000
else:
latitude = -1 * float(0x80000000L - latitude) / 3600000
if latitude < -90.0 or latitude > 90.0:
raise dns_exception.FormError("bad latitude")
if longitude > 0x80000000L:
longitude = float(longitude - 0x80000000L) / 3600000
else:
longitude = -1 * float(0x80000000L - longitude) / 3600000
if longitude < -180.0 or longitude > 180.0:
raise dns_exception.FormError("bad longitude")
altitude = float(altitude) - 10000000.0
size = _decode_size(size, "size")
hprec = _decode_size(hprec, "horizontal precision")
vprec = _decode_size(vprec, "vertical precision")
return cls(rdclass, rdtype, latitude, longitude, altitude,
size, hprec, vprec)
from_wire = classmethod(from_wire)
def _cmp(self, other):
f = cStringIO.StringIO()
self.to_wire(f)
wire1 = f.getvalue()
f.seek(0)
f.truncate()
other.to_wire(f)
wire2 = f.getvalue()
f.close()
return cmp(wire1, wire2)
def _get_float_latitude(self):
return _tuple_to_float(self.latitude)
def _set_float_latitude(self, value):
self.latitude = _float_to_tuple(value)
float_latitude = property(_get_float_latitude, _set_float_latitude,
doc="latitude as a floating point value")
def _get_float_longitude(self):
return _tuple_to_float(self.longitude)
def _set_float_longitude(self, value):
self.longitude = _float_to_tuple(value)
float_longitude = property(_get_float_longitude, _set_float_longitude,
doc="longitude as a floating point value")
| gpl-2.0 |
kornicameister/ansible-modules-extras | packaging/os/urpmi.py | 74 | 6083 | #!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# (c) 2013, Philippe Makowski
# Written by Philippe Makowski <philippem@mageia.org>
# Based on apt module written by Matthew Williams <matthew@flowroute.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: urpmi
short_description: Urpmi manager
description:
- Manages packages with I(urpmi) (such as for Mageia or Mandriva)
version_added: "1.3.4"
options:
pkg:
description:
- name of package to install, upgrade or remove.
required: true
default: null
state:
description:
- Indicates the desired package state
required: false
default: present
choices: [ "absent", "present" ]
update_cache:
description:
- update the package database first C(urpmi.update -a).
required: false
default: no
choices: [ "yes", "no" ]
no-recommends:
description:
- Corresponds to the C(--no-recommends) option for I(urpmi).
required: false
default: yes
choices: [ "yes", "no" ]
force:
description:
- Assume "yes" is the answer to any question urpmi has to ask.
Corresponds to the C(--force) option for I(urpmi).
required: false
default: yes
choices: [ "yes", "no" ]
author: "Philippe Makowski (@pmakowski)"
notes: []
'''
EXAMPLES = '''
# install package foo
- urpmi: pkg=foo state=present
# remove package foo
- urpmi: pkg=foo state=absent
# description: remove packages foo and bar
- urpmi: pkg=foo,bar state=absent
# description: update the package database (urpmi.update -a -q) and install bar (bar will be the updated if a newer version exists)
- urpmi: name=bar, state=present, update_cache=yes
'''
import shlex
import os
import sys
URPMI_PATH = '/usr/sbin/urpmi'
URPME_PATH = '/usr/sbin/urpme'
def query_package(module, name):
# rpm -q returns 0 if the package is installed,
# 1 if it is not installed
cmd = "rpm -q %s" % (name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
return True
else:
return False
def query_package_provides(module, name):
# rpm -q returns 0 if the package is installed,
# 1 if it is not installed
cmd = "rpm -q --provides %s" % (name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
return rc == 0
def update_package_db(module):
cmd = "urpmi.update -a -q"
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="could not update package db")
def remove_packages(module, packages):
remove_c = 0
# Using a for loop incase of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, package):
continue
cmd = "%s --auto %s" % (URPME_PATH, package)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to remove %s" % (package))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, pkgspec, force=True, no_recommends=True):
packages = ""
for package in pkgspec:
if not query_package_provides(module, package):
packages += "'%s' " % package
if len(packages) != 0:
if no_recommends:
no_recommends_yes = '--no-recommends'
else:
no_recommends_yes = ''
if force:
force_yes = '--force'
else:
force_yes = ''
cmd = ("%s --auto %s --quiet %s %s" % (URPMI_PATH, force_yes, no_recommends_yes, packages))
rc, out, err = module.run_command(cmd)
installed = True
for packages in pkgspec:
if not query_package_provides(module, package):
installed = False
# urpmi always have 0 for exit code if --force is used
if rc or not installed:
module.fail_json(msg="'urpmi %s' failed: %s" % (packages, err))
else:
module.exit_json(changed=True, msg="%s present(s)" % packages)
else:
module.exit_json(changed=False)
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='installed', choices=['installed', 'removed', 'absent', 'present']),
update_cache = dict(default=False, aliases=['update-cache'], type='bool'),
force = dict(default=True, type='bool'),
no_recommends = dict(default=True, aliases=['no-recommends'], type='bool'),
package = dict(aliases=['pkg', 'name'], required=True)))
if not os.path.exists(URPMI_PATH):
module.fail_json(msg="cannot find urpmi, looking for %s" % (URPMI_PATH))
p = module.params
force_yes = p['force']
no_recommends_yes = p['no_recommends']
if p['update_cache']:
update_package_db(module)
packages = p['package'].split(',')
if p['state'] in [ 'installed', 'present' ]:
install_packages(module, packages, force_yes, no_recommends_yes)
elif p['state'] in [ 'removed', 'absent' ]:
remove_packages(module, packages)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
kwikius/ardupilot | Tools/autotest/apmrover2.py | 25 | 5678 | # drive APMrover2 in SITL
import util, pexpect, sys, time, math, shutil, os
from common import *
from pymavlink import mavutil
import random
# get location of scripts
testdir=os.path.dirname(os.path.realpath(__file__))
#HOME=mavutil.location(-35.362938,149.165085,584,270)
HOME=mavutil.location(40.071374969556928,-105.22978898137808,1583.702759,246)
homeloc = None
def arm_rover(mavproxy, mav):
# wait for EKF to settle
wait_seconds(mav, 15)
mavproxy.send('arm throttle\n')
mavproxy.expect('ARMED')
print("ROVER ARMED")
return True
def drive_left_circuit(mavproxy, mav):
'''drive a left circuit, 50m on a side'''
mavproxy.send('switch 6\n')
wait_mode(mav, 'MANUAL')
mavproxy.send('rc 3 2000\n')
print("Driving left circuit")
# do 4 turns
for i in range(0,4):
# hard left
print("Starting turn %u" % i)
mavproxy.send('rc 1 1000\n')
if not wait_heading(mav, 270 - (90*i), accuracy=10):
return False
mavproxy.send('rc 1 1500\n')
print("Starting leg %u" % i)
if not wait_distance(mav, 50, accuracy=7):
return False
mavproxy.send('rc 3 1500\n')
print("Circuit complete")
return True
def drive_RTL(mavproxy, mav):
'''drive to home'''
print("Driving home in RTL")
mavproxy.send('switch 3\n')
if not wait_location(mav, homeloc, accuracy=22, timeout=90):
return False
print("RTL Complete")
return True
def setup_rc(mavproxy):
'''setup RC override control'''
for chan in [1,2,3,4,5,6,7]:
mavproxy.send('rc %u 1500\n' % chan)
mavproxy.send('rc 8 1800\n')
def drive_mission(mavproxy, mav, filename):
'''drive a mission from a file'''
global homeloc
print("Driving mission %s" % filename)
mavproxy.send('wp load %s\n' % filename)
mavproxy.expect('Flight plan received')
mavproxy.send('wp list\n')
mavproxy.expect('Requesting [0-9]+ waypoints')
mavproxy.send('switch 4\n') # auto mode
mavproxy.send('rc 3 1500\n')
wait_mode(mav, 'AUTO')
if not wait_waypoint(mav, 1, 4, max_dist=5):
return False
wait_mode(mav, 'HOLD')
print("Mission OK")
return True
def drive_APMrover2(viewerip=None, map=False):
'''drive APMrover2 in SIL
you can pass viewerip as an IP address to optionally send fg and
mavproxy packets too for local viewing of the mission in real time
'''
global homeloc
options = '--sitl=127.0.0.1:5501 --out=127.0.0.1:19550 --streamrate=10'
if viewerip:
options += " --out=%s:14550" % viewerip
if map:
options += ' --map'
home = "%f,%f,%u,%u" % (HOME.lat, HOME.lng, HOME.alt, HOME.heading)
sil = util.start_SIL('APMrover2', wipe=True, model='rover', home=home, speedup=10)
mavproxy = util.start_MAVProxy_SIL('APMrover2', options=options)
print("WAITING FOR PARAMETERS")
mavproxy.expect('Received [0-9]+ parameters')
# setup test parameters
mavproxy.send("param load %s/Rover.parm\n" % testdir)
mavproxy.expect('Loaded [0-9]+ parameters')
# restart with new parms
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
sil = util.start_SIL('APMrover2', model='rover', home=home, speedup=10)
mavproxy = util.start_MAVProxy_SIL('APMrover2', options=options)
mavproxy.expect('Telemetry log: (\S+)')
logfile = mavproxy.match.group(1)
print("LOGFILE %s" % logfile)
buildlog = util.reltopdir("../buildlogs/APMrover2-test.tlog")
print("buildlog=%s" % buildlog)
if os.path.exists(buildlog):
os.unlink(buildlog)
try:
os.link(logfile, buildlog)
except Exception:
pass
mavproxy.expect('Received [0-9]+ parameters')
util.expect_setup_callback(mavproxy, expect_callback)
expect_list_clear()
expect_list_extend([sil, mavproxy])
print("Started simulator")
# get a mavlink connection going
try:
mav = mavutil.mavlink_connection('127.0.0.1:19550', robust_parsing=True)
except Exception, msg:
print("Failed to start mavlink connection on 127.0.0.1:19550" % msg)
raise
mav.message_hooks.append(message_hook)
mav.idle_hooks.append(idle_hook)
failed = False
e = 'None'
try:
print("Waiting for a heartbeat with mavlink protocol %s" % mav.WIRE_PROTOCOL_VERSION)
mav.wait_heartbeat()
print("Setting up RC parameters")
setup_rc(mavproxy)
print("Waiting for GPS fix")
mav.wait_gps_fix()
homeloc = mav.location()
print("Home location: %s" % homeloc)
if not arm_rover(mavproxy, mav):
print("Failed to ARM")
failed = True
if not drive_mission(mavproxy, mav, os.path.join(testdir, "rover1.txt")):
print("Failed mission")
failed = True
if not log_download(mavproxy, mav, util.reltopdir("../buildlogs/APMrover2-log.bin")):
print("Failed log download")
failed = True
# if not drive_left_circuit(mavproxy, mav):
# print("Failed left circuit")
# failed = True
# if not drive_RTL(mavproxy, mav):
# print("Failed RTL")
# failed = True
except pexpect.TIMEOUT, e:
print("Failed with timeout")
failed = True
mav.close()
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
if os.path.exists('APMrover2-valgrind.log'):
os.chmod('APMrover2-valgrind.log', 0644)
shutil.copy("APMrover2-valgrind.log", util.reltopdir("../buildlogs/APMrover2-valgrind.log"))
if failed:
print("FAILED: %s" % e)
return False
return True
| gpl-3.0 |
ruymanengithub/vison | vison/image/sextractor.py | 1 | 5990 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Sextractor interface.
Created on Thu May 17 13:29:05 2018
:author: raf
"""
# IMPORT STUFF
from pdb import set_trace as stop
try: import astromatic_wrapper as aw
except ModuleNotFoundError:
pass
import os
import string as st
import numpy as np
import tempfile
from astropy.io import fits as fts
from vison import data as visondata
#from vison.datamodel import ccd as ccdmod
# END IMPORT
default_params = ['NUMBER', 'EXT_NUMBER', 'X_IMAGE', 'Y_IMAGE',
'A_IMAGE', 'B_IMAGE', 'ELONGATION', 'FWHM_IMAGE', 'MAG_AUTO']
config_default_file = 'sexconfig.default'
class VSExtractor(object):
def __init__(self, img=None):
""" """
# if img is not None:
# assert isinstance(img,np.ndarray)
# tmpf = self.save_img_to_tmp(img)
# else:
# tmpf = None
self.img = img
self.internals = dict(
MINAREA=5,
DET_THRESH=13.,
MAG_ZEROPOINT=20.,
SATUR_LEVEL=65535,
SEEING_FWHM=1.5,
PIXEL_SCALE=1.,
GAIN=1.
)
self.internals['params'] = default_params
def save_img_to_tmp(self, img, delete=True, close=False):
""" """
outf = tempfile.NamedTemporaryFile(mode='w+b', suffix='.fits',
prefix='vison_sex',
delete=delete)
fts.writeto(outf, img, overwrite=True)
if close:
outf.close()
return outf
# def close_images(self):
# for item in self.files['image']:
# item.close()
def update_internals(self, inputs):
assert isinstance(inputs, dict)
self.internals.update(inputs)
def get_sex_kwargs(self, catroot, config=None, checks=None):
catname = '%s.ldac.fits' % catroot
configdefaults = dict(
CATALOG_NAME=catname,
CATALOG_TYPE='FITS_LDAC',
DETECT_MINAREA='%i' % self.internals['MINAREA'],
DETECT_THRESH='%.1f,%.1f' %
(self.internals['DET_THRESH'], self.internals['MAG_ZEROPOINT']),
ANALYSIS_THRESH='%.1f,%.1f' %
(self.internals['DET_THRESH'],
self.internals['MAG_ZEROPOINT']),
FILTER='N',
SATUR_LEVEL='%i' % self.internals['SATUR_LEVEL'],
MAG_ZEROPOINT='%.1f' % self.internals['MAG_ZEROPOINT'],
GAIN='%.1f' % self.internals['GAIN'],
PIXEL_SCALE='%.1f' % self.internals['PIXEL_SCALE'],
SEEING_FWHM='%.1f' % self.internals['SEEING_FWHM'],
BACKPHOTO_TYPE='LOCAL',
BACK_SIZE='250',
BACK_FILTERSIZE='3')
if checks is not None:
assert isinstance(checks, list)
assert isinstance(checks[0], str)
checks = [item.upper() for item in checks]
checkimage_name = ','.join(['%s_%s.fits' %
(catroot, check[0:4]) for check in checks])
checkimage_type = ','.join(checks)
configdefaults.update(dict(
CHECKIMAGE_TYPE=checkimage_type,
CHECKIMAGE_NAME=checkimage_name
))
kwargs = dict(code='SExtractor')
kwargs['config'] = configdefaults.copy()
kwargs['temp_path'] = '.'
kwargs['params'] = self.internals['params']
kwargs['config_file'] = os.path.join(
visondata.__path__[0], config_default_file)
if config is not None:
assert isinstance(config, dict)
kwargs['config'].update(config)
return kwargs
def run_SEx(self, catroot, config=None, checks=None, cleanafter=False):
""" """
kwargs = self.get_sex_kwargs(catroot, config, checks)
sextractor = aw.api.Astromatic(**kwargs)
tmpf = self.save_img_to_tmp(self.img, delete=False, close=True)
sextractor.run_frames(tmpf.name, frames=[1])
os.system('rm %s' % tmpf.name)
catname = kwargs['config']['CATALOG_NAME']
if cleanafter:
os.system('rm sex.param')
self.cleanaftersex(kwargs['config'])
return catname
def cleanaftersex(self, config):
if 'CHECKIMAGE_TYPE' in config:
checkimage_name = config['CHECKIMAGE_NAME']
checkimages = checkimage_name.split( ',')
for chimg in checkimages:
if os.path.exists(chimg):
os.system('rm %s' % chimg)
def load_catalog(self, catpath):
""" """
catalog = aw.utils.ldac.get_table_from_ldac(catpath)
return catalog
def easy_run_SEx(img, catroot, sexconfig=None, cleanafter=False):
vSEx = VSExtractor(img=img.copy())
vSEx.internals['params'] = ['NUMBER', 'EXT_NUMBER', 'X_IMAGE', 'Y_IMAGE',
'A_IMAGE', 'B_IMAGE', 'THETA_IMAGE', 'ELONGATION', 'FWHM_IMAGE',
'FLUX_AUTO', 'MAG_AUTO', 'ISOAREA_IMAGE']
MAG_ZERO = 20.
DET_THRESH = -2.5 * np.log10(100.) + MAG_ZERO
configupdate = dict(MINAREA=3,
DET_THRESH=DET_THRESH,
MAG_ZERPOINT=MAG_ZERO,
SATUR_LEVEL=2.**16,
SEEING_FWHM=1.2,
PIXEL_SCALE=1.,
GAIN=1.,
BACKPHOTO_TYPE='LOCAL',
BACK_SIZE=100,
BACK_FILTERSIZE=3
)
if sexconfig is not None:
configupdate.update(sexconfig)
vSEx.internals.update(
configupdate
)
SExCatFile = vSEx.run_SEx(catroot,
checks=['BACKGROUND', 'SEGMENTATION'],
cleanafter=cleanafter)
os.system('rm sex.param')
SExCat = aw.utils.ldac.get_table_from_ldac(SExCatFile)
os.system('rm %s' % SExCatFile)
return SExCat
| gpl-3.0 |
akashsinghal/Speech-Memorization-App | Python_Backend/lib/requests/api.py | 109 | 6237 | # -*- coding: utf-8 -*-
"""
requests.api
~~~~~~~~~~~~
This module implements the Requests API.
:copyright: (c) 2012 by Kenneth Reitz.
:license: Apache2, see LICENSE for more details.
"""
from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary or list of tuples ``[(key, value)]`` (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload.
``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')``
or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string
defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers
to add for the file.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How many seconds to wait for the server to send data
before giving up, as a float, or a :ref:`(connect timeout, read
timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use. Defaults to ``True``.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
:return: :class:`Response <Response>` object
:rtype: requests.Response
Usage::
>>> import requests
>>> req = requests.request('GET', 'http://httpbin.org/get')
<Response [200]>
"""
# By using the 'with' statement we are sure the session is closed, thus we
# avoid leaving sockets open which can trigger a ResourceWarning in some
# cases, and look like a memory leak in others.
with sessions.Session() as session:
return session.request(method=method, url=url, **kwargs)
def get(url, params=None, **kwargs):
r"""Sends a GET request.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, params=params, **kwargs)
def options(url, **kwargs):
r"""Sends an OPTIONS request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('options', url, **kwargs)
def head(url, **kwargs):
r"""Sends a HEAD request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs)
def post(url, data=None, json=None, **kwargs):
r"""Sends a POST request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('post', url, data=data, json=json, **kwargs)
def put(url, data=None, **kwargs):
r"""Sends a PUT request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
r"""Sends a PATCH request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
r"""Sends a DELETE request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('delete', url, **kwargs)
| apache-2.0 |
henrykironde/scikit-learn | sklearn/tests/test_calibration.py | 213 | 12219 | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_greater, assert_almost_equal,
assert_greater_equal,
assert_array_equal,
assert_raises,
assert_warns_message)
from sklearn.datasets import make_classification, make_blobs
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import LinearSVC
from sklearn.linear_model import Ridge
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.metrics import brier_score_loss, log_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration
from sklearn.calibration import calibration_curve
def test_calibration():
"""Test calibration objects with isotonic and sigmoid"""
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
pc_clf = CalibratedClassifierCV(clf, cv=y.size + 1)
assert_raises(ValueError, pc_clf.fit, X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [(X_train, X_test),
(sparse.csr_matrix(X_train),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv=2)
# Note that this fit overwrites the fit on the entire training
# set
pc_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_pc_clf = pc_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
pc_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
pc_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
pc_clf.fit(this_X_train, (y_train + 1) % 2,
sample_weight=sw_train)
prob_pos_pc_clf_relabeled = \
pc_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_pc_clf,
1 - prob_pos_pc_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss((y_test + 1) % 2,
prob_pos_pc_clf_relabeled))
# check that calibration can also deal with regressors that have
# a decision_function
clf_base_regressor = CalibratedClassifierCV(Ridge())
clf_base_regressor.fit(X_train, y_train)
clf_base_regressor.predict(X_test)
# Check failure cases:
# only "isotonic" and "sigmoid" should be accepted as methods
clf_invalid_method = CalibratedClassifierCV(clf, method="foo")
assert_raises(ValueError, clf_invalid_method.fit, X_train, y_train)
# base-estimators should provide either decision_function or
# predict_proba (most regressors, for instance, should fail)
clf_base_regressor = \
CalibratedClassifierCV(RandomForestRegressor(), method="sigmoid")
assert_raises(RuntimeError, clf_base_regressor.fit, X_train, y_train)
def test_sample_weight_warning():
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
for method in ['sigmoid', 'isotonic']:
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(base_estimator, method=method)
# LinearSVC does not currently support sample weights but they
# can still be used for the calibration step (with a warning)
msg = "LinearSVC does not support sample_weight."
assert_warns_message(
UserWarning, msg,
calibrated_clf.fit, X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# a different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert_greater(diff, 0.1)
def test_calibration_multiclass():
"""Test calibration for multiclass """
# test multi-class setting with classifier that implements
# only decision function
clf = LinearSVC()
X, y_idx = make_blobs(n_samples=100, n_features=2, random_state=42,
centers=3, cluster_std=3.0)
# Use categorical labels to check that CalibratedClassifierCV supports
# them correctly
target_names = np.array(['a', 'b', 'c'])
y = target_names[y_idx]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=2)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
assert_array_almost_equal(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that log-loss of calibrated classifier is smaller than
# log-loss of naively turned OvR decision function to probabilities
# via softmax
def softmax(y_pred):
e = np.exp(-y_pred)
return e / e.sum(axis=1).reshape(-1, 1)
uncalibrated_log_loss = \
log_loss(y_test, softmax(clf.decision_function(X_test)))
calibrated_log_loss = log_loss(y_test, probas)
assert_greater_equal(uncalibrated_log_loss, calibrated_log_loss)
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
X, y = make_blobs(n_samples=100, n_features=2, random_state=42,
cluster_std=3.0)
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
loss = log_loss(y_test, clf_probs)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
cal_loss = log_loss(y_test, cal_clf_probs)
assert_greater(loss, cal_loss)
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
pc_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = pc_clf.predict_proba(this_X_test)
y_pred = pc_clf.predict(this_X_test)
prob_pos_pc_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm,
_sigmoid_calibration(exF, exY), 3)
lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
assert_raises(ValueError, _SigmoidCalibration().fit,
np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
prob_true_unnormalized, prob_pred_unnormalized = \
calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)
assert_equal(len(prob_true), len(prob_pred))
assert_equal(len(prob_true), 2)
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
# probabilities outside [0, 1] should not be accepted when normalize
# is set to False
assert_raises(ValueError, calibration_curve, [1.1], [-0.1],
normalize=False)
def test_calibration_nan_imputer():
"""Test that calibration can accept nan"""
X, y = make_classification(n_samples=10, n_features=2,
n_informative=2, n_redundant=0,
random_state=42)
X[0, 0] = np.nan
clf = Pipeline(
[('imputer', Imputer()),
('rf', RandomForestClassifier(n_estimators=1))])
clf_c = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_c.fit(X, y)
clf_c.predict(X)
| bsd-3-clause |
uarka/linux-next | tools/perf/scripts/python/net_dropmonitor.py | 1812 | 1749 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
except:
return
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
kallsyms.append((loc, name))
kallsyms.sort()
def get_sym(sloc):
loc = int(sloc)
# Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
# kallsyms[i][0] > loc for all end <= i < len(kallsyms)
start, end = -1, len(kallsyms)
while end != start + 1:
pivot = (start + end) // 2
if loc < kallsyms[pivot][0]:
end = pivot
else:
start = pivot
# Now (start == -1 or kallsyms[start][0] <= loc)
# and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
if start >= 0:
symloc, name = kallsyms[start]
return (name, loc - symloc)
else:
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
Broadcom/cygnus-linux | scripts/gdb/linux/dmesg.py | 298 | 2488 | #
# gdb helper commands and functions for Linux kernel debugging
#
# kernel log buffer dump
#
# Copyright (c) Siemens AG, 2011, 2012
#
# Authors:
# Jan Kiszka <jan.kiszka@siemens.com>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
import sys
from linux import utils
class LxDmesg(gdb.Command):
"""Print Linux kernel log buffer."""
def __init__(self):
super(LxDmesg, self).__init__("lx-dmesg", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
log_buf_addr = int(str(gdb.parse_and_eval(
"(void *)'printk.c'::log_buf")).split()[0], 16)
log_first_idx = int(gdb.parse_and_eval("'printk.c'::log_first_idx"))
log_next_idx = int(gdb.parse_and_eval("'printk.c'::log_next_idx"))
log_buf_len = int(gdb.parse_and_eval("'printk.c'::log_buf_len"))
inf = gdb.inferiors()[0]
start = log_buf_addr + log_first_idx
if log_first_idx < log_next_idx:
log_buf_2nd_half = -1
length = log_next_idx - log_first_idx
log_buf = utils.read_memoryview(inf, start, length).tobytes()
else:
log_buf_2nd_half = log_buf_len - log_first_idx
a = utils.read_memoryview(inf, start, log_buf_2nd_half)
b = utils.read_memoryview(inf, log_buf_addr, log_next_idx)
log_buf = a.tobytes() + b.tobytes()
pos = 0
while pos < log_buf.__len__():
length = utils.read_u16(log_buf[pos + 8:pos + 10])
if length == 0:
if log_buf_2nd_half == -1:
gdb.write("Corrupted log buffer!\n")
break
pos = log_buf_2nd_half
continue
text_len = utils.read_u16(log_buf[pos + 10:pos + 12])
text = log_buf[pos + 16:pos + 16 + text_len].decode(
encoding='utf8', errors='replace')
time_stamp = utils.read_u64(log_buf[pos:pos + 8])
for line in text.splitlines():
msg = u"[{time:12.6f}] {line}\n".format(
time=time_stamp / 1000000000.0,
line=line)
# With python2 gdb.write will attempt to convert unicode to
# ascii and might fail so pass an utf8-encoded str instead.
if sys.hexversion < 0x03000000:
msg = msg.encode(encoding='utf8', errors='replace')
gdb.write(msg)
pos += length
LxDmesg()
| gpl-2.0 |
dipeshtech/TextBlob | textblob/download_corpora.py | 12 | 1025 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Downloads the necessary NLTK corpora for TextBlob.
Usage: ::
$ python -m textblob.download_corpora
If you only intend to use TextBlob's default models, you can use the "lite"
option: ::
$ python -m textblob.download_corpora lite
"""
import sys
import nltk
MIN_CORPORA = [
'brown', # Required for FastNPExtractor
'punkt', # Required for WordTokenizer
'wordnet' # Required for lemmatization
]
ADDITIONAL_CORPORA = [
'conll2000', # Required for ConllExtractor
'maxent_treebank_pos_tagger', # Required for NLTKTagger
'movie_reviews', # Required for NaiveBayesAnalyzer
]
ALL_CORPORA = MIN_CORPORA + ADDITIONAL_CORPORA
def download_lite():
for each in MIN_CORPORA:
nltk.download(each)
def download_all():
for each in ALL_CORPORA:
nltk.download(each)
def main():
if 'lite' in sys.argv:
download_lite()
else:
download_all()
print("Finished.")
if __name__ == '__main__':
main()
| mit |
iedparis8/django-helpdesk | south_migrations/0005_auto__add_field_customfield_empty_selection_list.py | 9 | 17731 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CustomField.empty_selection_list'
db.add_column('helpdesk_customfield', 'empty_selection_list', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
def backwards(self, orm):
# Deleting field 'CustomField.empty_selection_list'
db.delete_column('helpdesk_customfield', 'empty_selection_list')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'helpdesk.attachment': {
'Meta': {'ordering': "['filename']", 'object_name': 'Attachment'},
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'followup': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helpdesk.FollowUp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'helpdesk.customfield': {
'Meta': {'object_name': 'CustomField'},
'data_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'decimal_places': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'empty_selection_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'help_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': "'30'"}),
'list_values': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'max_length': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'staff_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'helpdesk.emailtemplate': {
'Meta': {'ordering': "['template_name', 'locale']", 'object_name': 'EmailTemplate'},
'heading': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'html': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locale': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'plain_text': ('django.db.models.fields.TextField', [], {}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'helpdesk.escalationexclusion': {
'Meta': {'object_name': 'EscalationExclusion'},
'date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'queues': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['helpdesk.Queue']", 'null': 'True', 'blank': 'True'})
},
'helpdesk.followup': {
'Meta': {'ordering': "['date']", 'object_name': 'FollowUp'},
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 4, 2, 14, 54, 29, 596233)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ticket': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helpdesk.Ticket']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'helpdesk.ignoreemail': {
'Meta': {'object_name': 'IgnoreEmail'},
'date': ('django.db.models.fields.DateField', [], {'blank': 'True'}),
'email_address': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keep_in_mailbox': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'queues': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['helpdesk.Queue']", 'null': 'True', 'blank': 'True'})
},
'helpdesk.kbcategory': {
'Meta': {'ordering': "['title']", 'object_name': 'KBCategory'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'helpdesk.kbitem': {
'Meta': {'ordering': "['title']", 'object_name': 'KBItem'},
'answer': ('django.db.models.fields.TextField', [], {}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helpdesk.KBCategory']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'question': ('django.db.models.fields.TextField', [], {}),
'recommendations': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'helpdesk.presetreply': {
'Meta': {'ordering': "['name']", 'object_name': 'PreSetReply'},
'body': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'queues': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['helpdesk.Queue']", 'null': 'True', 'blank': 'True'})
},
'helpdesk.queue': {
'Meta': {'ordering': "('title',)", 'object_name': 'Queue'},
'allow_email_submission': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'allow_public_submission': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_address': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'email_box_host': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'email_box_imap_folder': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'email_box_interval': ('django.db.models.fields.IntegerField', [], {'default': "'5'", 'null': 'True', 'blank': 'True'}),
'email_box_last_check': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'email_box_pass': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'email_box_port': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'email_box_ssl': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_box_type': ('django.db.models.fields.CharField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'email_box_user': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'escalate_days': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locale': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'new_ticket_cc': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_ticket_cc': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'helpdesk.savedsearch': {
'Meta': {'object_name': 'SavedSearch'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'query': ('django.db.models.fields.TextField', [], {}),
'shared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'helpdesk.ticket': {
'Meta': {'object_name': 'Ticket'},
'assigned_to': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'assigned_to'", 'null': 'True', 'to': "orm['auth.User']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'due_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_escalation': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'on_hold': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '3', 'blank': '3'}),
'queue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helpdesk.Queue']"}),
'resolution': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'submitter_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'helpdesk.ticketcc': {
'Meta': {'object_name': 'TicketCC'},
'can_update': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ticket': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helpdesk.Ticket']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'helpdesk.ticketchange': {
'Meta': {'object_name': 'TicketChange'},
'field': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'followup': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helpdesk.FollowUp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'old_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'helpdesk.ticketcustomfieldvalue': {
'Meta': {'unique_together': "(('ticket', 'field'),)", 'object_name': 'TicketCustomFieldValue'},
'field': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helpdesk.CustomField']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ticket': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helpdesk.Ticket']"}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'helpdesk.ticketdependency': {
'Meta': {'unique_together': "(('ticket', 'depends_on'),)", 'object_name': 'TicketDependency'},
'depends_on': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'depends_on'", 'to': "orm['helpdesk.Ticket']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ticket': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ticketdependency'", 'to': "orm['helpdesk.Ticket']"})
},
'helpdesk.usersettings': {
'Meta': {'object_name': 'UserSettings'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'settings_pickled': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['helpdesk']
| bsd-3-clause |
cpennington/edx-platform | common/djangoapps/util/date_utils.py | 4 | 18655 | """
Convenience methods for working with datetime objects
"""
import re
from datetime import datetime, timedelta
import six
from django.utils.translation import pgettext, ugettext
from pytz import UnknownTimeZoneError, timezone, utc
def get_default_time_display(dtime):
"""
Converts a datetime to a string representation. This is the default
representation used in Studio and LMS.
It will use the "DATE_TIME" format in the current language, if provided,
or defaults to "Apr 09, 2013 at 16:00 UTC".
If None is passed in for dt, an empty string will be returned.
"""
if dtime is None:
return u""
if dtime.tzinfo is not None:
try:
timezone = u" " + dtime.tzinfo.tzname(dtime)
except NotImplementedError:
timezone = dtime.strftime('%z')
else:
timezone = u" UTC"
localized = strftime_localized(dtime, "DATE_TIME")
return (localized + timezone).strip()
def get_time_display(dtime, format_string=None, coerce_tz=None):
"""
Converts a datetime to a string representation.
If None is passed in for dt, an empty string will be returned.
If the format_string is None, or if format_string is improperly
formatted, this method will return the value from `get_default_time_display`.
Coerces aware datetime to tz=coerce_tz if set. coerce_tz should be a pytz timezone string
like "US/Pacific", or None
format_string should be a unicode string that is a valid argument for datetime's strftime method.
"""
if dtime is not None and dtime.tzinfo is not None and coerce_tz:
try:
to_tz = timezone(coerce_tz)
except UnknownTimeZoneError:
to_tz = utc
dtime = to_tz.normalize(dtime.astimezone(to_tz))
if dtime is None or format_string is None:
return get_default_time_display(dtime)
try:
return six.text_type(strftime_localized(dtime, format_string))
except ValueError:
return get_default_time_display(dtime)
def almost_same_datetime(dt1, dt2, allowed_delta=timedelta(minutes=1)):
"""
Returns true if these are w/in a minute of each other. (in case secs saved to db
or timezone aren't same)
:param dt1:
:param dt2:
"""
return abs(dt1 - dt2) < allowed_delta
def to_timestamp(datetime_value):
"""
Convert a datetime into a timestamp, represented as the number
of seconds since January 1, 1970 UTC.
"""
return int((datetime_value - datetime(1970, 1, 1, tzinfo=utc)).total_seconds())
def from_timestamp(timestamp):
"""
Convert a timestamp (number of seconds since Jan 1, 1970 UTC)
into a timezone-aware datetime.
If the timestamp cannot be converted, returns None instead.
"""
try:
return datetime.utcfromtimestamp(int(timestamp)).replace(tzinfo=utc)
except (ValueError, TypeError):
return None
DEFAULT_SHORT_DATE_FORMAT = "%b %d, %Y"
DEFAULT_LONG_DATE_FORMAT = "%A, %B %d, %Y"
DEFAULT_TIME_FORMAT = "%I:%M:%S %p"
DEFAULT_DATE_TIME_FORMAT = "%b %d, %Y at %H:%M"
DEFAULT_DAY_AND_TIME_FORMAT = "%A at %-I%P"
def strftime_localized(dtime, format): # pylint: disable=redefined-builtin
"""
Format a datetime, just like the built-in strftime, but with localized words.
The format string can also be one of:
* "SHORT_DATE" for a date in brief form, localized.
* "LONG_DATE" for a longer form of date, localized.
* "DATE_TIME" for a date and time together, localized.
* "TIME" for just the time, localized.
The localization is based on the current language Django is using for the
request. The exact format strings used for each of the names above is
determined by the translator for each language.
Args:
dtime (datetime): The datetime value to format.
format (str): The format string to use, as specified by
:ref:`datetime.strftime`.
Returns:
A unicode string with the formatted datetime.
"""
if format == "SHORT_DATE":
format = "%x"
elif format == "LONG_DATE":
# Translators: the translation for "LONG_DATE_FORMAT" must be a format
# string for formatting dates in a long form. For example, the
# American English form is "%A, %B %d %Y".
# See http://strftime.org for details.
format = ugettext("LONG_DATE_FORMAT")
if format == "LONG_DATE_FORMAT":
format = DEFAULT_LONG_DATE_FORMAT
elif format == "DATE_TIME":
# Translators: the translation for "DATE_TIME_FORMAT" must be a format
# string for formatting dates with times. For example, the American
# English form is "%b %d, %Y at %H:%M".
# See http://strftime.org for details.
format = ugettext("DATE_TIME_FORMAT")
if format == "DATE_TIME_FORMAT":
format = DEFAULT_DATE_TIME_FORMAT
elif format == "DAY_AND_TIME":
format = DEFAULT_DAY_AND_TIME_FORMAT
elif format == "TIME":
format = "%X"
def process_percent_code(match):
"""
Convert one percent-prefixed code in the format string.
Called by re.sub just below.
"""
code = match.group()
if code == "%":
# This only happens if the string ends with a %, which is not legal.
raise ValueError("strftime format ends with raw %")
if code == "%a":
part = pgettext('abbreviated weekday name', WEEKDAYS_ABBREVIATED[dtime.weekday()])
elif code == "%A":
part = pgettext('weekday name', WEEKDAYS[dtime.weekday()])
elif code == "%b":
part = pgettext('abbreviated month name', MONTHS_ABBREVIATED[dtime.month])
elif code == "%B":
part = pgettext('month name', MONTHS[dtime.month])
elif code == "%p":
part = pgettext('am/pm indicator', AM_PM[dtime.hour // 12])
elif code == "%x":
# Get the localized short date format, and recurse.
# Translators: the translation for "SHORT_DATE_FORMAT" must be a
# format string for formatting dates in a brief form. For example,
# the American English form is "%b %d %Y".
# See http://strftime.org for details.
actual_format = ugettext("SHORT_DATE_FORMAT")
if actual_format == "SHORT_DATE_FORMAT":
actual_format = DEFAULT_SHORT_DATE_FORMAT
if "%x" in actual_format:
# Prevent infinite accidental recursion.
actual_format = DEFAULT_SHORT_DATE_FORMAT
part = strftime_localized(dtime, actual_format)
elif code == "%X":
# Get the localized time format, and recurse.
# Translators: the translation for "TIME_FORMAT" must be a format
# string for formatting times. For example, the American English
# form is "%H:%M:%S". See http://strftime.org for details.
actual_format = ugettext("TIME_FORMAT")
if actual_format == "TIME_FORMAT":
actual_format = DEFAULT_TIME_FORMAT
if "%X" in actual_format:
# Prevent infinite accidental recursion.
actual_format = DEFAULT_TIME_FORMAT
part = strftime_localized(dtime, actual_format)
else:
# All the other format codes: just let built-in strftime take
# care of them.
part = dtime.strftime(code)
return part
formatted_date = re.sub(r"%-.|%.|%", process_percent_code, format)
return formatted_date
# In order to extract the strings below, we have to mark them with pgettext.
# But we'll do the actual pgettext later, so use a no-op for now, and save the
# real pgettext so we can assign it back to the global name later.
real_pgettext = pgettext
pgettext = lambda context, text: text # pylint: disable=invalid-name
AM_PM = {
# Translators: This is an AM/PM indicator for displaying times. It is
# used for the %p directive in date-time formats. See http://strftime.org
# for details.
0: pgettext('am/pm indicator', 'AM'),
# Translators: This is an AM/PM indicator for displaying times. It is
# used for the %p directive in date-time formats. See http://strftime.org
# for details.
1: pgettext('am/pm indicator', 'PM'),
}
WEEKDAYS = {
# Translators: this is a weekday name that will be used when displaying
# dates, as in "Monday Februrary 10, 2014". It is used for the %A
# directive in date-time formats. See http://strftime.org for details.
0: pgettext('weekday name', 'Monday'),
# Translators: this is a weekday name that will be used when displaying
# dates, as in "Tuesday Februrary 11, 2014". It is used for the %A
# directive in date-time formats. See http://strftime.org for details.
1: pgettext('weekday name', 'Tuesday'),
# Translators: this is a weekday name that will be used when displaying
# dates, as in "Wednesday Februrary 12, 2014". It is used for the %A
# directive in date-time formats. See http://strftime.org for details.
2: pgettext('weekday name', 'Wednesday'),
# Translators: this is a weekday name that will be used when displaying
# dates, as in "Thursday Februrary 13, 2014". It is used for the %A
# directive in date-time formats. See http://strftime.org for details.
3: pgettext('weekday name', 'Thursday'),
# Translators: this is a weekday name that will be used when displaying
# dates, as in "Friday Februrary 14, 2014". It is used for the %A
# directive in date-time formats. See http://strftime.org for details.
4: pgettext('weekday name', 'Friday'),
# Translators: this is a weekday name that will be used when displaying
# dates, as in "Saturday Februrary 15, 2014". It is used for the %A
# directive in date-time formats. See http://strftime.org for details.
5: pgettext('weekday name', 'Saturday'),
# Translators: this is a weekday name that will be used when displaying
# dates, as in "Sunday Februrary 16, 2014". It is used for the %A
# directive in date-time formats. See http://strftime.org for details.
6: pgettext('weekday name', 'Sunday'),
}
WEEKDAYS_ABBREVIATED = {
# Translators: this is an abbreviated weekday name that will be used when
# displaying dates, as in "Mon Feb 10, 2014". It is used for the %a
# directive in date-time formats. See http://strftime.org for details.
0: pgettext('abbreviated weekday name', 'Mon'),
# Translators: this is an abbreviated weekday name that will be used when
# displaying dates, as in "Tue Feb 11, 2014". It is used for the %a
# directive in date-time formats. See http://strftime.org for details.
1: pgettext('abbreviated weekday name', 'Tue'),
# Translators: this is an abbreviated weekday name that will be used when
# displaying dates, as in "Wed Feb 12, 2014". It is used for the %a
# directive in date-time formats. See http://strftime.org for details.
2: pgettext('abbreviated weekday name', 'Wed'),
# Translators: this is an abbreviated weekday name that will be used when
# displaying dates, as in "Thu Feb 13, 2014". It is used for the %a
# directive in date-time formats. See http://strftime.org for details.
3: pgettext('abbreviated weekday name', 'Thu'),
# Translators: this is an abbreviated weekday name that will be used when
# displaying dates, as in "Fri Feb 14, 2014". It is used for the %a
# directive in date-time formats. See http://strftime.org for details.
4: pgettext('abbreviated weekday name', 'Fri'),
# Translators: this is an abbreviated weekday name that will be used when
# displaying dates, as in "Sat Feb 15, 2014". It is used for the %a
# directive in date-time formats. See http://strftime.org for details.
5: pgettext('abbreviated weekday name', 'Sat'),
# Translators: this is an abbreviated weekday name that will be used when
# displaying dates, as in "Sun Feb 16, 2014". It is used for the %a
# directive in date-time formats. See http://strftime.org for details.
6: pgettext('abbreviated weekday name', 'Sun'),
}
MONTHS_ABBREVIATED = {
# Translators: this is an abbreviated month name that will be used when
# displaying dates, as in "Jan 10, 2014". It is used for the %b
# directive in date-time formats. See http://strftime.org for details.
1: pgettext('abbreviated month name', 'Jan'),
# Translators: this is an abbreviated month name that will be used when
# displaying dates, as in "Feb 10, 2014". It is used for the %b
# directive in date-time formats. See http://strftime.org for details.
2: pgettext('abbreviated month name', 'Feb'),
# Translators: this is an abbreviated month name that will be used when
# displaying dates, as in "Mar 10, 2014". It is used for the %b
# directive in date-time formats. See http://strftime.org for details.
3: pgettext('abbreviated month name', 'Mar'),
# Translators: this is an abbreviated month name that will be used when
# displaying dates, as in "Apr 10, 2014". It is used for the %b
# directive in date-time formats. See http://strftime.org for details.
4: pgettext('abbreviated month name', 'Apr'),
# Translators: this is an abbreviated month name that will be used when
# displaying dates, as in "May 10, 2014". It is used for the %b
# directive in date-time formats. See http://strftime.org for details.
5: pgettext('abbreviated month name', 'May'),
# Translators: this is an abbreviated month name that will be used when
# displaying dates, as in "Jun 10, 2014". It is used for the %b
# directive in date-time formats. See http://strftime.org for details.
6: pgettext('abbreviated month name', 'Jun'),
# Translators: this is an abbreviated month name that will be used when
# displaying dates, as in "Jul 10, 2014". It is used for the %b
# directive in date-time formats. See http://strftime.org for details.
7: pgettext('abbreviated month name', 'Jul'),
# Translators: this is an abbreviated month name that will be used when
# displaying dates, as in "Aug 10, 2014". It is used for the %b
# directive in date-time formats. See http://strftime.org for details.
8: pgettext('abbreviated month name', 'Aug'),
# Translators: this is an abbreviated month name that will be used when
# displaying dates, as in "Sep 10, 2014". It is used for the %b
# directive in date-time formats. See http://strftime.org for details.
9: pgettext('abbreviated month name', 'Sep'),
# Translators: this is an abbreviated month name that will be used when
# displaying dates, as in "Oct 10, 2014". It is used for the %b
# directive in date-time formats. See http://strftime.org for details.
10: pgettext('abbreviated month name', 'Oct'),
# Translators: this is an abbreviated month name that will be used when
# displaying dates, as in "Nov 10, 2014". It is used for the %b
# directive in date-time formats. See http://strftime.org for details.
11: pgettext('abbreviated month name', 'Nov'),
# Translators: this is an abbreviated month name that will be used when
# displaying dates, as in "Dec 10, 2014". It is used for the %b
# directive in date-time formats. See http://strftime.org for details.
12: pgettext('abbreviated month name', 'Dec'),
}
MONTHS = {
# Translators: this is a month name that will be used when displaying
# dates, as in "January 10, 2014". It is used for the %B directive in
# date-time formats. See http://strftime.org for details.
1: pgettext('month name', 'January'),
# Translators: this is a month name that will be used when displaying
# dates, as in "February 10, 2014". It is used for the %B directive in
# date-time formats. See http://strftime.org for details.
2: pgettext('month name', 'February'),
# Translators: this is a month name that will be used when displaying
# dates, as in "March 10, 2014". It is used for the %B directive in
# date-time formats. See http://strftime.org for details.
3: pgettext('month name', 'March'),
# Translators: this is a month name that will be used when displaying
# dates, as in "April 10, 2014". It is used for the %B directive in
# date-time formats. See http://strftime.org for details.
4: pgettext('month name', 'April'),
# Translators: this is a month name that will be used when displaying
# dates, as in "May 10, 2014". It is used for the %B directive in
# date-time formats. See http://strftime.org for details.
5: pgettext('month name', 'May'),
# Translators: this is a month name that will be used when displaying
# dates, as in "June 10, 2014". It is used for the %B directive in
# date-time formats. See http://strftime.org for details.
6: pgettext('month name', 'June'),
# Translators: this is a month name that will be used when displaying
# dates, as in "July 10, 2014". It is used for the %B directive in
# date-time formats. See http://strftime.org for details.
7: pgettext('month name', 'July'),
# Translators: this is a month name that will be used when displaying
# dates, as in "August 10, 2014". It is used for the %B directive in
# date-time formats. See http://strftime.org for details.
8: pgettext('month name', 'August'),
# Translators: this is a month name that will be used when displaying
# dates, as in "September 10, 2014". It is used for the %B directive in
# date-time formats. See http://strftime.org for details.
9: pgettext('month name', 'September'),
# Translators: this is a month name that will be used when displaying
# dates, as in "October 10, 2014". It is used for the %B directive in
# date-time formats. See http://strftime.org for details.
10: pgettext('month name', 'October'),
# Translators: this is a month name that will be used when displaying
# dates, as in "November 10, 2014". It is used for the %B directive in
# date-time formats. See http://strftime.org for details.
11: pgettext('month name', 'November'),
# Translators: this is a month name that will be used when displaying
# dates, as in "December 10, 2014". It is used for the %B directive in
# date-time formats. See http://strftime.org for details.
12: pgettext('month name', 'December'),
}
# Now that we are done defining constants, we have to restore the real pgettext
# so that the functions in this module will have the right definition.
pgettext = real_pgettext
| agpl-3.0 |
Hubert51/AutoGrading | learning/web_Haotian/venv/Lib/site-packages/jinja2/tests.py | 74 | 4237 | # -*- coding: utf-8 -*-
"""
jinja2.tests
~~~~~~~~~~~~
Jinja test functions. Used with the "is" operator.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import operator
import re
from collections import Mapping
from jinja2.runtime import Undefined
from jinja2._compat import text_type, string_types, integer_types
import decimal
number_re = re.compile(r'^-?\d+(\.\d+)?$')
regex_type = type(number_re)
test_callable = callable
def test_odd(value):
"""Return true if the variable is odd."""
return value % 2 == 1
def test_even(value):
"""Return true if the variable is even."""
return value % 2 == 0
def test_divisibleby(value, num):
"""Check if a variable is divisible by a number."""
return value % num == 0
def test_defined(value):
"""Return true if the variable is defined:
.. sourcecode:: jinja
{% if variable is defined %}
value of variable: {{ variable }}
{% else %}
variable is not defined
{% endif %}
See the :func:`default` filter for a simple way to set undefined
variables.
"""
return not isinstance(value, Undefined)
def test_undefined(value):
"""Like :func:`defined` but the other way round."""
return isinstance(value, Undefined)
def test_none(value):
"""Return true if the variable is none."""
return value is None
def test_lower(value):
"""Return true if the variable is lowercased."""
return text_type(value).islower()
def test_upper(value):
"""Return true if the variable is uppercased."""
return text_type(value).isupper()
def test_string(value):
"""Return true if the object is a string."""
return isinstance(value, string_types)
def test_mapping(value):
"""Return true if the object is a mapping (dict etc.).
.. versionadded:: 2.6
"""
return isinstance(value, Mapping)
def test_number(value):
"""Return true if the variable is a number."""
return isinstance(value, integer_types + (float, complex, decimal.Decimal))
def test_sequence(value):
"""Return true if the variable is a sequence. Sequences are variables
that are iterable.
"""
try:
len(value)
value.__getitem__
except:
return False
return True
def test_sameas(value, other):
"""Check if an object points to the same memory address than another
object:
.. sourcecode:: jinja
{% if foo.attribute is sameas false %}
the foo attribute really is the `False` singleton
{% endif %}
"""
return value is other
def test_iterable(value):
"""Check if it's possible to iterate over an object."""
try:
iter(value)
except TypeError:
return False
return True
def test_escaped(value):
"""Check if the value is escaped."""
return hasattr(value, '__html__')
def test_in(value, seq):
"""Check if value is in seq.
.. versionadded:: 2.10
"""
return value in seq
TESTS = {
'odd': test_odd,
'even': test_even,
'divisibleby': test_divisibleby,
'defined': test_defined,
'undefined': test_undefined,
'none': test_none,
'lower': test_lower,
'upper': test_upper,
'string': test_string,
'mapping': test_mapping,
'number': test_number,
'sequence': test_sequence,
'iterable': test_iterable,
'callable': test_callable,
'sameas': test_sameas,
'escaped': test_escaped,
'in': test_in,
'==': operator.eq,
'eq': operator.eq,
'equalto': operator.eq,
'!=': operator.ne,
'ne': operator.ne,
'>': operator.gt,
'gt': operator.gt,
'greaterthan': operator.gt,
'ge': operator.ge,
'>=': operator.ge,
'<': operator.lt,
'lt': operator.lt,
'lessthan': operator.lt,
'<=': operator.le,
'le': operator.le,
}
| mit |
pranavtendolkr/horizon | openstack_dashboard/dashboards/project/routers/ports/tables.py | 53 | 4609 | # Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import exceptions
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.networks.ports \
import tables as project_tables
from openstack_dashboard import policy
LOG = logging.getLogger(__name__)
def get_device_owner(port):
if port['device_owner'] == 'network:router_gateway':
return _('External Gateway')
elif port['device_owner'] == 'network:router_interface':
return _('Internal Interface')
else:
return ' '
class AddInterface(policy.PolicyTargetMixin, tables.LinkAction):
name = "create"
verbose_name = _("Add Interface")
url = "horizon:project:routers:addinterface"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "add_router_interface"),)
def get_link_url(self, datum=None):
router_id = self.table.kwargs['router_id']
return reverse(self.url, args=(router_id,))
class RemoveInterface(policy.PolicyTargetMixin, tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Interface",
u"Delete Interfaces",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Interface",
u"Deleted Interfaces",
count
)
failure_url = 'horizon:project:routers:detail'
policy_rules = (("network", "remove_router_interface"),)
def delete(self, request, obj_id):
try:
router_id = self.table.kwargs['router_id']
port = api.neutron.port_get(request, obj_id)
if port['device_owner'] == 'network:router_gateway':
api.neutron.router_remove_gateway(request, router_id)
else:
api.neutron.router_remove_interface(request,
router_id,
port_id=obj_id)
except Exception:
msg = _('Failed to delete interface %s') % obj_id
LOG.info(msg)
router_id = self.table.kwargs['router_id']
redirect = reverse(self.failure_url,
args=[router_id])
exceptions.handle(request, msg, redirect=redirect)
DISPLAY_CHOICES = (
("UP", pgettext_lazy("Admin state of a Port", u"UP")),
("DOWN", pgettext_lazy("Admin state of a Port", u"DOWN")),
)
STATUS_DISPLAY_CHOICES = (
("ACTIVE", pgettext_lazy("current status of port", u"Active")),
("BUILD", pgettext_lazy("current status of port", u"Build")),
("DOWN", pgettext_lazy("current status of port", u"Down")),
("ERROR", pgettext_lazy("current status of port", u"Error")),
)
class PortsTable(tables.DataTable):
name = tables.Column("name_or_id",
verbose_name=_("Name"),
link="horizon:project:networks:ports:detail")
fixed_ips = tables.Column(project_tables.get_fixed_ips,
verbose_name=_("Fixed IPs"))
status = tables.Column("status",
verbose_name=_("Status"),
display_choices=STATUS_DISPLAY_CHOICES)
device_owner = tables.Column(get_device_owner,
verbose_name=_("Type"))
admin_state = tables.Column("admin_state",
verbose_name=_("Admin State"),
display_choices=DISPLAY_CHOICES)
def get_object_display(self, port):
return port.id
class Meta(object):
name = "interfaces"
verbose_name = _("Interfaces")
table_actions = (AddInterface, RemoveInterface)
row_actions = (RemoveInterface, )
| apache-2.0 |
JackPrice/ansible-modules-extras | cloud/misc/ovirt.py | 32 | 14190 | #!/usr/bin/python
# (c) 2013, Vincent Van der Kussen <vincent at vanderkussen.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ovirt
author: Vincent Van der Kussen
short_description: oVirt/RHEV platform management
description:
- allows you to create new instances, either from scratch or an image, in addition to deleting or stopping instances on the oVirt/RHEV platform
version_added: "1.4"
options:
user:
description:
- the user to authenticate with
default: null
required: true
aliases: []
url:
description:
- the url of the oVirt instance
default: null
required: true
aliases: []
instance_name:
description:
- the name of the instance to use
default: null
required: true
aliases: [ vmname ]
password:
description:
- password of the user to authenticate with
default: null
required: true
aliases: []
image:
description:
- template to use for the instance
default: null
required: false
aliases: []
resource_type:
description:
- whether you want to deploy an image or create an instance from scratch.
default: null
required: false
aliases: []
choices: [ 'new', 'template' ]
zone:
description:
- deploy the image to this oVirt cluster
default: null
required: false
aliases: []
instance_disksize:
description:
- size of the instance's disk in GB
default: null
required: false
aliases: [ vm_disksize]
instance_cpus:
description:
- the instance's number of cpu's
default: 1
required: false
aliases: [ vmcpus ]
instance_nic:
description:
- name of the network interface in oVirt/RHEV
default: null
required: false
aliases: [ vmnic ]
instance_network:
description:
- the logical network the machine should belong to
default: rhevm
required: false
aliases: [ vmnetwork ]
instance_mem:
description:
- the instance's amount of memory in MB
default: null
required: false
aliases: [ vmmem ]
instance_type:
description:
- define if the instance is a server or desktop
default: server
required: false
aliases: [ vmtype ]
choices: [ 'server', 'desktop' ]
disk_alloc:
description:
- define if disk is thin or preallocated
default: thin
required: false
aliases: []
choices: [ 'thin', 'preallocated' ]
disk_int:
description:
- interface type of the disk
default: virtio
required: false
aliases: []
choices: [ 'virtio', 'ide' ]
instance_os:
description:
- type of Operating System
default: null
required: false
aliases: [ vmos ]
instance_cores:
description:
- define the instance's number of cores
default: 1
required: false
aliases: [ vmcores ]
sdomain:
description:
- the Storage Domain where you want to create the instance's disk on.
default: null
required: false
aliases: []
region:
description:
- the oVirt/RHEV datacenter where you want to deploy to
default: null
required: false
aliases: []
state:
description:
- create, terminate or remove instances
default: 'present'
required: false
aliases: []
choices: ['present', 'absent', 'shutdown', 'started', 'restarted']
requirements: [ "ovirt-engine-sdk" ]
'''
EXAMPLES = '''
# Basic example provisioning from image.
action: ovirt >
user=admin@internal
url=https://ovirt.example.com
instance_name=ansiblevm04
password=secret
image=centos_64
zone=cluster01
resource_type=template"
# Full example to create new instance from scratch
action: ovirt >
instance_name=testansible
resource_type=new
instance_type=server
user=admin@internal
password=secret
url=https://ovirt.example.com
instance_disksize=10
zone=cluster01
region=datacenter1
instance_cpus=1
instance_nic=nic1
instance_network=rhevm
instance_mem=1000
disk_alloc=thin
sdomain=FIBER01
instance_cores=1
instance_os=rhel_6x64
disk_int=virtio"
# stopping an instance
action: ovirt >
instance_name=testansible
state=stopped
user=admin@internal
password=secret
url=https://ovirt.example.com
# starting an instance
action: ovirt >
instance_name=testansible
state=started
user=admin@internal
password=secret
url=https://ovirt.example.com
'''
import sys
try:
from ovirtsdk.api import API
from ovirtsdk.xml import params
except ImportError:
print "failed=True msg='ovirtsdk required for this module'"
sys.exit(1)
# ------------------------------------------------------------------- #
# create connection with API
#
def conn(url, user, password):
api = API(url=url, username=user, password=password, insecure=True)
try:
value = api.test()
except:
print "error connecting to the oVirt API"
sys.exit(1)
return api
# ------------------------------------------------------------------- #
# Create VM from scratch
def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int):
if vmdisk_alloc == 'thin':
# define VM params
vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))), type_=vmtype)
# define disk params
vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=True, interface=vmdisk_int, type_="System", format='cow',
storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
# define network parameters
network_net = params.Network(name=vmnetwork)
nic_net1 = params.NIC(name='nic1', network=network_net, interface='virtio')
elif vmdisk_alloc == 'preallocated':
# define VM params
vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))) ,type_=vmtype)
# define disk params
vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=False, interface=vmdisk_int, type_="System", format='raw',
storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
# define network parameters
network_net = params.Network(name=vmnetwork)
nic_net1 = params.NIC(name=vmnic, network=network_net, interface='virtio')
try:
conn.vms.add(vmparams)
except:
print "Error creating VM with specified parameters"
sys.exit(1)
vm = conn.vms.get(name=vmname)
try:
vm.disks.add(vmdisk)
except:
print "Error attaching disk"
try:
vm.nics.add(nic_net1)
except:
print "Error adding nic"
# create an instance from a template
def create_vm_template(conn, vmname, image, zone):
vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), template=conn.templates.get(name=image),disks=params.Disks(clone=True))
try:
conn.vms.add(vmparams)
except:
print 'error adding template %s' % image
sys.exit(1)
# start instance
def vm_start(conn, vmname):
vm = conn.vms.get(name=vmname)
vm.start()
# Stop instance
def vm_stop(conn, vmname):
vm = conn.vms.get(name=vmname)
vm.stop()
# restart instance
def vm_restart(conn, vmname):
state = vm_status(conn, vmname)
vm = conn.vms.get(name=vmname)
vm.stop()
while conn.vms.get(vmname).get_status().get_state() != 'down':
time.sleep(5)
vm.start()
# remove an instance
def vm_remove(conn, vmname):
vm = conn.vms.get(name=vmname)
vm.delete()
# ------------------------------------------------------------------- #
# VM statuses
#
# Get the VMs status
def vm_status(conn, vmname):
status = conn.vms.get(name=vmname).status.state
print "vm status is : %s" % status
return status
# Get VM object and return it's name if object exists
def get_vm(conn, vmname):
vm = conn.vms.get(name=vmname)
if vm == None:
name = "empty"
print "vmname: %s" % name
else:
name = vm.get_name()
print "vmname: %s" % name
return name
# ------------------------------------------------------------------- #
# Hypervisor operations
#
# not available yet
# ------------------------------------------------------------------- #
# Main
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['present', 'absent', 'shutdown', 'started', 'restart']),
#name = dict(required=True),
user = dict(required=True),
url = dict(required=True),
instance_name = dict(required=True, aliases=['vmname']),
password = dict(required=True),
image = dict(),
resource_type = dict(choices=['new', 'template']),
zone = dict(),
instance_disksize = dict(aliases=['vm_disksize']),
instance_cpus = dict(default=1, aliases=['vmcpus']),
instance_nic = dict(aliases=['vmnic']),
instance_network = dict(default='rhevm', aliases=['vmnetwork']),
instance_mem = dict(aliases=['vmmem']),
instance_type = dict(default='server', aliases=['vmtype'], choices=['server', 'desktop']),
disk_alloc = dict(default='thin', choices=['thin', 'preallocated']),
disk_int = dict(default='virtio', choices=['virtio', 'ide']),
instance_os = dict(aliases=['vmos']),
instance_cores = dict(default=1, aliases=['vmcores']),
sdomain = dict(),
region = dict(),
)
)
state = module.params['state']
user = module.params['user']
url = module.params['url']
vmname = module.params['instance_name']
password = module.params['password']
image = module.params['image'] # name of the image to deploy
resource_type = module.params['resource_type'] # template or from scratch
zone = module.params['zone'] # oVirt cluster
vmdisk_size = module.params['instance_disksize'] # disksize
vmcpus = module.params['instance_cpus'] # number of cpu
vmnic = module.params['instance_nic'] # network interface
vmnetwork = module.params['instance_network'] # logical network
vmmem = module.params['instance_mem'] # mem size
vmdisk_alloc = module.params['disk_alloc'] # thin, preallocated
vmdisk_int = module.params['disk_int'] # disk interface virtio or ide
vmos = module.params['instance_os'] # Operating System
vmtype = module.params['instance_type'] # server or desktop
vmcores = module.params['instance_cores'] # number of cores
sdomain = module.params['sdomain'] # storage domain to store disk on
region = module.params['region'] # oVirt Datacenter
#initialize connection
c = conn(url+"/api", user, password)
if state == 'present':
if get_vm(c, vmname) == "empty":
if resource_type == 'template':
create_vm_template(c, vmname, image, zone)
module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmname,image))
elif resource_type == 'new':
# FIXME: refactor, use keyword args.
create_vm(c, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int)
module.exit_json(changed=True, msg="deployed VM %s from scratch" % vmname)
else:
module.exit_json(changed=False, msg="You did not specify a resource type")
else:
module.exit_json(changed=False, msg="VM %s already exists" % vmname)
if state == 'started':
if vm_status(c, vmname) == 'up':
module.exit_json(changed=False, msg="VM %s is already running" % vmname)
else:
vm_start(c, vmname)
module.exit_json(changed=True, msg="VM %s started" % vmname)
if state == 'shutdown':
if vm_status(c, vmname) == 'down':
module.exit_json(changed=False, msg="VM %s is already shutdown" % vmname)
else:
vm_stop(c, vmname)
module.exit_json(changed=True, msg="VM %s is shutting down" % vmname)
if state == 'restart':
if vm_status(c, vmname) == 'up':
vm_restart(c, vmname)
module.exit_json(changed=True, msg="VM %s is restarted" % vmname)
else:
module.exit_json(changed=False, msg="VM %s is not running" % vmname)
if state == 'absent':
if get_vm(c, vmname) == "empty":
module.exit_json(changed=False, msg="VM %s does not exist" % vmname)
else:
vm_remove(c, vmname)
module.exit_json(changed=True, msg="VM %s removed" % vmname)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
repotvsupertuga/tvsupertuga.repository | script.module.openscrapers/lib/openscrapers/sources_openscrapers/en_DebridOnly/ddlspot.py | 1 | 6101 | # -*- coding: utf-8 -*-
# ..#######.########.#######.##....#..######..######.########....###...########.#######.########..######.
# .##.....#.##.....#.##......###...#.##....#.##....#.##.....#...##.##..##.....#.##......##.....#.##....##
# .##.....#.##.....#.##......####..#.##......##......##.....#..##...##.##.....#.##......##.....#.##......
# .##.....#.########.######..##.##.#..######.##......########.##.....#.########.######..########..######.
# .##.....#.##.......##......##..###.......#.##......##...##..########.##.......##......##...##........##
# .##.....#.##.......##......##...##.##....#.##....#.##....##.##.....#.##.......##......##....##.##....##
# ..#######.##.......#######.##....#..######..######.##.....#.##.....#.##.......#######.##.....#..######.
'''
OpenScrapers Project **Created by Tempest**
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
import urllib
import urlparse
from openscrapers.modules import cfscrape
from openscrapers.modules import client
from openscrapers.modules import debrid
from openscrapers.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['www.ddlspot.com']
self.base_link = 'http://www.ddlspot.com/'
self.search_link = 'search/?q=%s&m=1&x=0&y=0'
self.scraper = cfscrape.create_scraper()
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() == False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (
data['tvshowtitle'], int(data['season']), int(data['episode'])) \
if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, url).replace('-', '+')
r = self.scraper.get(url).content
if r == None and 'tvshowtitle' in data:
season = re.search('S(.*?)E', hdlr)
season = season.group(1)
url = title
r = self.scraper.get(url).content
for loopCount in range(0, 2):
if loopCount == 1 or (r == None and 'tvshowtitle' in data):
r = self.scraper.get(url).content
posts = client.parseDOM(r, "table", attrs={"class": "download"})
hostDict = hostprDict + hostDict
items = []
for post in posts:
try:
u = client.parseDOM(post, 'a', ret='href')
for i in u:
try:
name = str(i)
items.append(name)
except:
pass
except:
pass
if len(items) > 0: break
for item in items:
try:
info = []
i = str(item)
i = self.base_link + i
r = self.scraper.get(i).content
u = client.parseDOM(r, "div", attrs={"class": "dl-links"})
for t in u:
r = re.compile('a href=".+?" rel=".+?">(.+?)<').findall(t)
for url in r:
if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
quality, info = source_utils.get_release_quality(url)
valid, host = source_utils.is_host_valid(url, hostDict)
sources.append(
{'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info,
'direct': False, 'debridonly': True})
except:
pass
check = [i for i in sources if not i['quality'] == 'CAM']
if check: sources = check
return sources
except:
return
def resolve(self, url):
return url
| gpl-2.0 |
throwable-one/lettuce | tests/integration/lib/Django-1.3/tests/modeltests/transactions/tests_25.py | 51 | 5306 | from __future__ import with_statement
from django.db import connection, transaction, IntegrityError
from django.test import TransactionTestCase, skipUnlessDBFeature
from models import Reporter
class TransactionContextManagerTests(TransactionTestCase):
def create_reporter_and_fail(self):
Reporter.objects.create(first_name="Bob", last_name="Holtzman")
raise Exception
@skipUnlessDBFeature('supports_transactions')
def test_autocommit(self):
"""
The default behavior is to autocommit after each save() action.
"""
with self.assertRaises(Exception):
self.create_reporter_and_fail()
# The object created before the exception still exists
self.assertEqual(Reporter.objects.count(), 1)
@skipUnlessDBFeature('supports_transactions')
def test_autocommit_context_manager(self):
"""
The autocommit context manager works exactly the same as the default
behavior.
"""
with self.assertRaises(Exception):
with transaction.autocommit():
self.create_reporter_and_fail()
self.assertEqual(Reporter.objects.count(), 1)
@skipUnlessDBFeature('supports_transactions')
def test_autocommit_context_manager_with_using(self):
"""
The autocommit context manager also works with a using argument.
"""
with self.assertRaises(Exception):
with transaction.autocommit(using="default"):
self.create_reporter_and_fail()
self.assertEqual(Reporter.objects.count(), 1)
@skipUnlessDBFeature('supports_transactions')
def test_commit_on_success(self):
"""
With the commit_on_success context manager, the transaction is only
committed if the block doesn't throw an exception.
"""
with self.assertRaises(Exception):
with transaction.commit_on_success():
self.create_reporter_and_fail()
self.assertEqual(Reporter.objects.count(), 0)
@skipUnlessDBFeature('supports_transactions')
def test_commit_on_success_with_using(self):
"""
The commit_on_success context manager also works with a using argument.
"""
with self.assertRaises(Exception):
with transaction.commit_on_success(using="default"):
self.create_reporter_and_fail()
self.assertEqual(Reporter.objects.count(), 0)
@skipUnlessDBFeature('supports_transactions')
def test_commit_on_success_succeed(self):
"""
If there aren't any exceptions, the data will get saved.
"""
Reporter.objects.create(first_name="Alice", last_name="Smith")
with transaction.commit_on_success():
Reporter.objects.filter(first_name="Alice").delete()
self.assertQuerysetEqual(Reporter.objects.all(), [])
@skipUnlessDBFeature('supports_transactions')
def test_commit_on_success_exit(self):
with transaction.autocommit():
with transaction.commit_on_success():
Reporter.objects.create(first_name="Bobby", last_name="Tables")
# Much more formal
r = Reporter.objects.get()
r.first_name = "Robert"
r.save()
r = Reporter.objects.get()
self.assertEqual(r.first_name, "Robert")
@skipUnlessDBFeature('supports_transactions')
def test_manually_managed(self):
"""
You can manually manage transactions if you really want to, but you
have to remember to commit/rollback.
"""
with transaction.commit_manually():
Reporter.objects.create(first_name="Libby", last_name="Holtzman")
transaction.commit()
self.assertEqual(Reporter.objects.count(), 1)
@skipUnlessDBFeature('supports_transactions')
def test_manually_managed_mistake(self):
"""
If you forget, you'll get bad errors.
"""
with self.assertRaises(transaction.TransactionManagementError):
with transaction.commit_manually():
Reporter.objects.create(first_name="Scott", last_name="Browning")
@skipUnlessDBFeature('supports_transactions')
def test_manually_managed_with_using(self):
"""
The commit_manually function also works with a using argument.
"""
with self.assertRaises(transaction.TransactionManagementError):
with transaction.commit_manually(using="default"):
Reporter.objects.create(first_name="Walter", last_name="Cronkite")
@skipUnlessDBFeature('requires_rollback_on_dirty_transaction')
def test_bad_sql(self):
"""
Regression for #11900: If a block wrapped by commit_on_success
writes a transaction that can't be committed, that transaction should
be rolled back. The bug is only visible using the psycopg2 backend,
though the fix is generally a good idea.
"""
with self.assertRaises(IntegrityError):
with transaction.commit_on_success():
cursor = connection.cursor()
cursor.execute("INSERT INTO transactions_reporter (first_name, last_name) VALUES ('Douglas', 'Adams');")
transaction.set_dirty()
transaction.rollback()
| gpl-3.0 |
robweber/maraschino | lib/sqlalchemy/dialects/mysql/zxjdbc.py | 23 | 3887 | # mysql/zxjdbc.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the MySQL database via Jython's zxjdbc JDBC connector.
JDBC Driver
-----------
The official MySQL JDBC driver is at
http://dev.mysql.com/downloads/connector/j/.
Connecting
----------
Connect string format:
mysql+zxjdbc://<user>:<password>@<hostname>[:<port>]/<database>
Character Sets
--------------
SQLAlchemy zxjdbc dialects pass unicode straight through to the
zxjdbc/JDBC layer. To allow multiple character sets to be sent from the
MySQL Connector/J JDBC driver, by default SQLAlchemy sets its
``characterEncoding`` connection property to ``UTF-8``. It may be
overriden via a ``create_engine`` URL parameter.
"""
import re
from sqlalchemy import types as sqltypes, util
from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector
from sqlalchemy.dialects.mysql.base import BIT, MySQLDialect, MySQLExecutionContext
class _ZxJDBCBit(BIT):
def result_processor(self, dialect, coltype):
"""Converts boolean or byte arrays from MySQL Connector/J to longs."""
def process(value):
if value is None:
return value
if isinstance(value, bool):
return int(value)
v = 0L
for i in value:
v = v << 8 | (i & 0xff)
value = v
return value
return process
class MySQLExecutionContext_zxjdbc(MySQLExecutionContext):
def get_lastrowid(self):
cursor = self.create_cursor()
cursor.execute("SELECT LAST_INSERT_ID()")
lastrowid = cursor.fetchone()[0]
cursor.close()
return lastrowid
class MySQLDialect_zxjdbc(ZxJDBCConnector, MySQLDialect):
jdbc_db_name = 'mysql'
jdbc_driver_name = 'com.mysql.jdbc.Driver'
execution_ctx_cls = MySQLExecutionContext_zxjdbc
colspecs = util.update_copy(
MySQLDialect.colspecs,
{
sqltypes.Time: sqltypes.Time,
BIT: _ZxJDBCBit
}
)
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
# Prefer 'character_set_results' for the current connection over the
# value in the driver. SET NAMES or individual variable SETs will
# change the charset without updating the driver's view of the world.
#
# If it's decided that issuing that sort of SQL leaves you SOL, then
# this can prefer the driver value.
rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'")
opts = dict((row[0], row[1]) for row in self._compat_fetchall(rs))
for key in ('character_set_connection', 'character_set'):
if opts.get(key, None):
return opts[key]
util.warn("Could not detect the connection character set. Assuming latin1.")
return 'latin1'
def _driver_kwargs(self):
"""return kw arg dict to be sent to connect()."""
return dict(characterEncoding='UTF-8', yearIsDateType='false')
def _extract_error_code(self, exception):
# e.g.: DBAPIError: (Error) Table 'test.u2' doesn't exist
# [SQLCode: 1146], [SQLState: 42S02] 'DESCRIBE `u2`' ()
m = re.compile(r"\[SQLCode\: (\d+)\]").search(str(exception.args))
c = m.group(1)
if c:
return int(c)
def _get_server_version_info(self,connection):
dbapi_con = connection.connection
version = []
r = re.compile('[.\-]')
for n in r.split(dbapi_con.dbversion):
try:
version.append(int(n))
except ValueError:
version.append(n)
return tuple(version)
dialect = MySQLDialect_zxjdbc
| mit |
emanuelmellblom/opendlv.scaledcars | thirdparty/cxxtest/python/python3/cxxtest/cxxtest_fog.py | 55 | 4013 | #-------------------------------------------------------------------------
# CxxTest: A lightweight C++ unit testing library.
# Copyright (c) 2008 Sandia Corporation.
# This software is distributed under the LGPL License v3
# For more information, see the COPYING file in the top CxxTest directory.
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
#-------------------------------------------------------------------------
#
# TODO: add line number info
# TODO: add test function names
#
import sys
import re
from .cxxtest_misc import abort
from . import cxx_parser
import re
def cstr( str ):
'''Convert a string to its C representation'''
return '"' + re.sub('\\\\', '\\\\\\\\', str ) + '"'
def scanInputFiles(files, _options):
'''Scan all input files for test suites'''
suites=[]
for file in files:
try:
print("Parsing file "+file, end=' ')
sys.stdout.flush()
parse_info = cxx_parser.parse_cpp(filename=file,optimize=1)
except IOError as err:
print(" error.")
print(str(err))
continue
print("done.")
sys.stdout.flush()
#
# WEH: see if it really makes sense to use parse information to
# initialize this data. I don't think so...
#
_options.haveStandardLibrary=1
if not parse_info.noExceptionLogic:
_options.haveExceptionHandling=1
#
keys = list(parse_info.index.keys())
tpat = re.compile("[Tt][Ee][Ss][Tt]")
for key in keys:
if parse_info.index[key].scope_t == "class" and parse_info.is_baseclass(key,"CxxTest::TestSuite"):
name=parse_info.index[key].name
if key.startswith('::'):
fullname = key[2:]
else:
fullname = key
suite = {
'fullname' : fullname,
'name' : name,
'file' : file,
'cfile' : cstr(file),
'line' : str(parse_info.index[key].lineno),
'generated' : 0,
'object' : 'suite_%s' % fullname.replace('::','_'),
'dobject' : 'suiteDescription_%s' % fullname.replace('::','_'),
'tlist' : 'Tests_%s' % fullname.replace('::','_'),
'tests' : [],
'lines' : [] }
for fn in parse_info.get_functions(key,quiet=True):
tname = fn[0]
lineno = str(fn[1])
if tname.startswith('createSuite'):
# Indicate that we're using a dynamically generated test suite
suite['create'] = str(lineno) # (unknown line)
if tname.startswith('destroySuite'):
# Indicate that we're using a dynamically generated test suite
suite['destroy'] = str(lineno) # (unknown line)
if not tpat.match(tname):
# Skip non-test methods
continue
test = { 'name' : tname,
'suite' : suite,
'class' : 'TestDescription_suite_%s_%s' % (suite['fullname'].replace('::','_'), tname),
'object' : 'testDescription_suite_%s_%s' % (suite['fullname'].replace('::','_'), tname),
'line' : lineno,
}
suite['tests'].append(test)
suites.append(suite)
if not _options.root:
ntests = 0
for suite in suites:
ntests += len(suite['tests'])
if ntests == 0:
abort( 'No tests defined' )
#
return [_options, suites]
| gpl-2.0 |
axelkennedal/dissen | dissenEnv/lib/python3.5/site-packages/django/contrib/postgres/forms/array.py | 84 | 6754 | import copy
from django import forms
from django.contrib.postgres.validators import (
ArrayMaxLengthValidator, ArrayMinLengthValidator,
)
from django.core.exceptions import ValidationError
from django.utils import six
from django.utils.safestring import mark_safe
from django.utils.translation import string_concat, ugettext_lazy as _
class SimpleArrayField(forms.CharField):
default_error_messages = {
'item_invalid': _('Item %(nth)s in the array did not validate: '),
}
def __init__(self, base_field, delimiter=',', max_length=None, min_length=None, *args, **kwargs):
self.base_field = base_field
self.delimiter = delimiter
super(SimpleArrayField, self).__init__(*args, **kwargs)
if min_length is not None:
self.min_length = min_length
self.validators.append(ArrayMinLengthValidator(int(min_length)))
if max_length is not None:
self.max_length = max_length
self.validators.append(ArrayMaxLengthValidator(int(max_length)))
def prepare_value(self, value):
if isinstance(value, list):
return self.delimiter.join(six.text_type(self.base_field.prepare_value(v)) for v in value)
return value
def to_python(self, value):
if value:
items = value.split(self.delimiter)
else:
items = []
errors = []
values = []
for i, item in enumerate(items):
try:
values.append(self.base_field.to_python(item))
except ValidationError as e:
for error in e.error_list:
errors.append(ValidationError(
string_concat(self.error_messages['item_invalid'], error.message),
code='item_invalid',
params={'nth': i},
))
if errors:
raise ValidationError(errors)
return values
def validate(self, value):
super(SimpleArrayField, self).validate(value)
errors = []
for i, item in enumerate(value):
try:
self.base_field.validate(item)
except ValidationError as e:
for error in e.error_list:
errors.append(ValidationError(
string_concat(self.error_messages['item_invalid'], error.message),
code='item_invalid',
params={'nth': i},
))
if errors:
raise ValidationError(errors)
def run_validators(self, value):
super(SimpleArrayField, self).run_validators(value)
errors = []
for i, item in enumerate(value):
try:
self.base_field.run_validators(item)
except ValidationError as e:
for error in e.error_list:
errors.append(ValidationError(
string_concat(self.error_messages['item_invalid'], error.message),
code='item_invalid',
params={'nth': i},
))
if errors:
raise ValidationError(errors)
class SplitArrayWidget(forms.Widget):
def __init__(self, widget, size, **kwargs):
self.widget = widget() if isinstance(widget, type) else widget
self.size = size
super(SplitArrayWidget, self).__init__(**kwargs)
@property
def is_hidden(self):
return self.widget.is_hidden
def value_from_datadict(self, data, files, name):
return [self.widget.value_from_datadict(data, files, '%s_%s' % (name, index))
for index in range(self.size)]
def id_for_label(self, id_):
# See the comment for RadioSelect.id_for_label()
if id_:
id_ += '_0'
return id_
def render(self, name, value, attrs=None):
if self.is_localized:
self.widget.is_localized = self.is_localized
value = value or []
output = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id')
for i in range(max(len(value), self.size)):
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
final_attrs = dict(final_attrs, id='%s_%s' % (id_, i))
output.append(self.widget.render(name + '_%s' % i, widget_value, final_attrs))
return mark_safe(self.format_output(output))
def format_output(self, rendered_widgets):
return ''.join(rendered_widgets)
@property
def media(self):
return self.widget.media
def __deepcopy__(self, memo):
obj = super(SplitArrayWidget, self).__deepcopy__(memo)
obj.widget = copy.deepcopy(self.widget)
return obj
@property
def needs_multipart_form(self):
return self.widget.needs_multipart_form
class SplitArrayField(forms.Field):
default_error_messages = {
'item_invalid': _('Item %(nth)s in the array did not validate: '),
}
def __init__(self, base_field, size, remove_trailing_nulls=False, **kwargs):
self.base_field = base_field
self.size = size
self.remove_trailing_nulls = remove_trailing_nulls
widget = SplitArrayWidget(widget=base_field.widget, size=size)
kwargs.setdefault('widget', widget)
super(SplitArrayField, self).__init__(**kwargs)
def clean(self, value):
cleaned_data = []
errors = []
if not any(value) and self.required:
raise ValidationError(self.error_messages['required'])
max_size = max(self.size, len(value))
for i in range(max_size):
item = value[i]
try:
cleaned_data.append(self.base_field.clean(item))
errors.append(None)
except ValidationError as error:
errors.append(ValidationError(
string_concat(self.error_messages['item_invalid'], ' '.join(error.messages)),
code='item_invalid',
params={'nth': i},
))
cleaned_data.append(None)
if self.remove_trailing_nulls:
null_index = None
for i, value in reversed(list(enumerate(cleaned_data))):
if value in self.base_field.empty_values:
null_index = i
else:
break
if null_index:
cleaned_data = cleaned_data[:null_index]
errors = errors[:null_index]
errors = list(filter(None, errors))
if errors:
raise ValidationError(errors)
return cleaned_data
| mit |
d7415/merlin | Core/paconf.py | 2 | 1049 | # This file is part of Merlin.
# Merlin is the Copyright (C)2008,2009,2010 of Robin K. Hansen, Elliot Rosemarine, Andreas Jacobsen.
# Individual portions may be copyright by individual contributors, and
# are included in this collective work with permission of the copyright
# owners.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from Core.config import load_config
PA = load_config("pa.cfg") | gpl-2.0 |
amenonsen/ansible | lib/ansible/modules/remote_management/redfish/redfish_info.py | 2 | 14365 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017-2018 Dell EMC Inc.
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: redfish_info
version_added: "2.7"
short_description: Manages Out-Of-Band controllers using Redfish APIs
description:
- Builds Redfish URIs locally and sends them to remote OOB controllers to
get information back.
- Information retrieved is placed in a location specified by the user.
- This module was called C(redfish_facts) before Ansible 2.9, returning C(ansible_facts).
Note that the M(redfish_info) module no longer returns C(ansible_facts)!
options:
category:
required: false
description:
- List of categories to execute on OOB controller
default: ['Systems']
type: list
command:
required: false
description:
- List of commands to execute on OOB controller
type: list
baseuri:
required: true
description:
- Base URI of OOB controller
type: str
username:
required: true
description:
- User for authentication with OOB controller
type: str
version_added: "2.8"
password:
required: true
description:
- Password for authentication with OOB controller
type: str
timeout:
description:
- Timeout in seconds for URL requests to OOB controller
default: 10
type: int
version_added: '2.8'
author: "Jose Delarosa (@jose-delarosa)"
'''
EXAMPLES = '''
- name: Get CPU inventory
redfish_info:
category: Systems
command: GetCpuInventory
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
register: result
- debug:
msg: "{{ result.redfish_facts.cpu.entries | to_nice_json }}"
- name: Get CPU model
redfish_info:
category: Systems
command: GetCpuInventory
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
register: result
- debug:
msg: "{{ result.redfish_facts.cpu.entries.0.Model }}"
- name: Get memory inventory
redfish_info:
category: Systems
command: GetMemoryInventory
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
register: result
- name: Get fan inventory with a timeout of 20 seconds
redfish_info:
category: Chassis
command: GetFanInventory
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
timeout: 20
register: result
- name: Get Virtual Media information
redfish_info:
category: Manager
command: GetVirtualMedia
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
register: result
- debug:
msg: "{{ result.redfish_facts.virtual_media.entries | to_nice_json }}"
- name: Get Volume Inventory
redfish_info:
category: Systems
command: GetVolumeInventory
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
register: result
- debug:
msg: "{{ result.redfish_facts.volume.entries | to_nice_json }}"
- name: Get Session information
redfish_info:
category: Sessions
command: GetSessions
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
register: result
- debug:
msg: "{{ result.redfish_facts.session.entries | to_nice_json }}"
- name: Get default inventory information
redfish_info:
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
register: result
- debug:
msg: "{{ result.redfish_facts | to_nice_json }}"
- name: Get several inventories
redfish_info:
category: Systems
command: GetNicInventory,GetBiosAttributes
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
- name: Get default system inventory and user information
redfish_info:
category: Systems,Accounts
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
- name: Get default system, user and firmware information
redfish_info:
category: ["Systems", "Accounts", "Update"]
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
- name: Get Manager NIC inventory information
redfish_info:
category: Manager
command: GetManagerNicInventory
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
- name: Get boot override information
redfish_info:
category: Systems
command: GetBootOverride
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
- name: Get chassis inventory
redfish_info:
category: Chassis
command: GetChassisInventory
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
- name: Get all information available in the Manager category
redfish_info:
category: Manager
command: all
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
- name: Get firmware update capability information
redfish_info:
category: Update
command: GetFirmwareUpdateCapabilities
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
- name: Get all information available in all categories
redfish_info:
category: all
command: all
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
'''
RETURN = '''
result:
description: different results depending on task
returned: always
type: dict
sample: List of CPUs on system
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.redfish_utils import RedfishUtils
CATEGORY_COMMANDS_ALL = {
"Systems": ["GetSystemInventory", "GetPsuInventory", "GetCpuInventory",
"GetMemoryInventory", "GetNicInventory",
"GetStorageControllerInventory", "GetDiskInventory", "GetVolumeInventory",
"GetBiosAttributes", "GetBootOrder", "GetBootOverride"],
"Chassis": ["GetFanInventory", "GetPsuInventory", "GetChassisPower", "GetChassisThermals", "GetChassisInventory"],
"Accounts": ["ListUsers"],
"Sessions": ["GetSessions"],
"Update": ["GetFirmwareInventory", "GetFirmwareUpdateCapabilities"],
"Manager": ["GetManagerNicInventory", "GetVirtualMedia", "GetLogs"],
}
CATEGORY_COMMANDS_DEFAULT = {
"Systems": "GetSystemInventory",
"Chassis": "GetFanInventory",
"Accounts": "ListUsers",
"Update": "GetFirmwareInventory",
"Sessions": "GetSessions",
"Manager": "GetManagerNicInventory"
}
def main():
result = {}
category_list = []
module = AnsibleModule(
argument_spec=dict(
category=dict(type='list', default=['Systems']),
command=dict(type='list'),
baseuri=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
timeout=dict(type='int', default=10)
),
supports_check_mode=False
)
is_old_facts = module._name == 'redfish_facts'
if is_old_facts:
module.deprecate("The 'redfish_facts' module has been renamed to 'redfish_info', "
"and the renamed one no longer returns ansible_facts", version='2.13')
# admin credentials used for authentication
creds = {'user': module.params['username'],
'pswd': module.params['password']}
# timeout
timeout = module.params['timeout']
# Build root URI
root_uri = "https://" + module.params['baseuri']
rf_utils = RedfishUtils(creds, root_uri, timeout, module)
# Build Category list
if "all" in module.params['category']:
for entry in CATEGORY_COMMANDS_ALL:
category_list.append(entry)
else:
# one or more categories specified
category_list = module.params['category']
for category in category_list:
command_list = []
# Build Command list for each Category
if category in CATEGORY_COMMANDS_ALL:
if not module.params['command']:
# True if we don't specify a command --> use default
command_list.append(CATEGORY_COMMANDS_DEFAULT[category])
elif "all" in module.params['command']:
for entry in range(len(CATEGORY_COMMANDS_ALL[category])):
command_list.append(CATEGORY_COMMANDS_ALL[category][entry])
# one or more commands
else:
command_list = module.params['command']
# Verify that all commands are valid
for cmd in command_list:
# Fail if even one command given is invalid
if cmd not in CATEGORY_COMMANDS_ALL[category]:
module.fail_json(msg="Invalid Command: %s" % cmd)
else:
# Fail if even one category given is invalid
module.fail_json(msg="Invalid Category: %s" % category)
# Organize by Categories / Commands
if category == "Systems":
# execute only if we find a Systems resource
resource = rf_utils._find_systems_resource()
if resource['ret'] is False:
module.fail_json(msg=resource['msg'])
for command in command_list:
if command == "GetSystemInventory":
result["system"] = rf_utils.get_multi_system_inventory()
elif command == "GetCpuInventory":
result["cpu"] = rf_utils.get_multi_cpu_inventory()
elif command == "GetMemoryInventory":
result["memory"] = rf_utils.get_multi_memory_inventory()
elif command == "GetNicInventory":
result["nic"] = rf_utils.get_multi_nic_inventory(category)
elif command == "GetStorageControllerInventory":
result["storage_controller"] = rf_utils.get_multi_storage_controller_inventory()
elif command == "GetDiskInventory":
result["disk"] = rf_utils.get_multi_disk_inventory()
elif command == "GetVolumeInventory":
result["volume"] = rf_utils.get_multi_volume_inventory()
elif command == "GetBiosAttributes":
result["bios_attribute"] = rf_utils.get_multi_bios_attributes()
elif command == "GetBootOrder":
result["boot_order"] = rf_utils.get_multi_boot_order()
elif command == "GetBootOverride":
result["boot_override"] = rf_utils.get_multi_boot_override()
elif category == "Chassis":
# execute only if we find Chassis resource
resource = rf_utils._find_chassis_resource()
if resource['ret'] is False:
module.fail_json(msg=resource['msg'])
for command in command_list:
if command == "GetFanInventory":
result["fan"] = rf_utils.get_fan_inventory()
elif command == "GetPsuInventory":
result["psu"] = rf_utils.get_psu_inventory()
elif command == "GetChassisThermals":
result["thermals"] = rf_utils.get_chassis_thermals()
elif command == "GetChassisPower":
result["chassis_power"] = rf_utils.get_chassis_power()
elif command == "GetChassisInventory":
result["chassis"] = rf_utils.get_chassis_inventory()
elif category == "Accounts":
# execute only if we find an Account service resource
resource = rf_utils._find_accountservice_resource()
if resource['ret'] is False:
module.fail_json(msg=resource['msg'])
for command in command_list:
if command == "ListUsers":
result["user"] = rf_utils.list_users()
elif category == "Update":
# execute only if we find UpdateService resources
resource = rf_utils._find_updateservice_resource()
if resource['ret'] is False:
module.fail_json(msg=resource['msg'])
for command in command_list:
if command == "GetFirmwareInventory":
result["firmware"] = rf_utils.get_firmware_inventory()
elif command == "GetFirmwareUpdateCapabilities":
result["firmware_update_capabilities"] = rf_utils.get_firmware_update_capabilities()
elif category == "Sessions":
# excute only if we find SessionService resources
resource = rf_utils._find_sessionservice_resource()
if resource['ret'] is False:
module.fail_json(msg=resource['msg'])
for command in command_list:
if command == "GetSessions":
result["session"] = rf_utils.get_sessions()
elif category == "Manager":
# execute only if we find a Manager service resource
resource = rf_utils._find_managers_resource()
if resource['ret'] is False:
module.fail_json(msg=resource['msg'])
for command in command_list:
if command == "GetManagerNicInventory":
result["manager_nics"] = rf_utils.get_multi_nic_inventory(category)
elif command == "GetVirtualMedia":
result["virtual_media"] = rf_utils.get_multi_virtualmedia()
elif command == "GetLogs":
result["log"] = rf_utils.get_logs()
# Return data back
if is_old_facts:
module.exit_json(ansible_facts=dict(redfish_facts=result))
else:
module.exit_json(redfish_facts=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
igoroya/igor-oya-solutions-cracking-coding-interview | crackingcointsolutions/chapter3/exercise3.py | 1 | 2545 | '''
Created on 24 Aug 2017
Stack of plates
@author: igoroya
'''
from chapter3 import utils
class StackOfPlates(object):
'''
Represents a set of literal stacks plates
'''
def __init__(self, max_stack_size):
'''
Constructor
'''
self._stacks = [] # lazy init of 1st stack at 1st push
self._max_stack_size = max_stack_size
def __repr__(self):
return self._stacks.__repr__()
def __len__(self):
sizes = 0
for stack in self._stacks:
sizes += len(stack)
return sizes
def n_of_stacks(self):
return len(self._stacks)
def push(self):
'''
add a plate at the top of the active stack. if the active stack is full, start new one
'''
if (not bool(self._stacks)) or (len(self._stacks[-1]) == self._max_stack_size):
stack = utils.Stack()
self._stacks.append(stack)
else:
stack = self._stacks[-1]
stack.push("Plate")
def remove(self):
'''
removes top plate of the last stack
'''
if(not bool(self._stacks)):
print("Sorry, not stacks exist") # better would be an execption
return
self._stacks[-1].remove()
if self._stacks[-1].is_empty():
self._stacks.pop()
def peek(self):
'''
`returns to the top of the queue (without removing)
'''
if(not bool(self._stacks)):
print("All stacks are empty")
return None
return self._stacks[-1].peek()
def is_empty(self):
'''
Returns True if the queu is empty
'''
return not bool(self._stacks)
def pop_at(self, stack_index):
if stack_index + 1>= len(self._stacks):
print("Stack does not exist")
return None #should be a exception instead
value = self._stacks[stack_index].peek()
self._stacks[stack_index].remove()
return value
if __name__ == '__main__':
my_plates = StackOfPlates(10)
for _ in range(35):
my_plates.push()
print(my_plates)
print("Number of plates: {} using: {} stacks".format(len(my_plates), my_plates.n_of_stacks()))
for _ in range(35):
my_plates.remove()
print("is empty? {}".format(my_plates.is_empty()))
for _ in range(35):
my_plates.push()
my_plates.pop_at(0)
my_plates.pop_at(1)
my_plates.pop_at(2)
my_plates.pop_at(3)
my_plates.pop_at(4)
print(my_plates)
| mit |
GraemeFulton/job-search | docutils-0.12/test/test_parsers/test_rst/test_directives/test_replace.py | 18 | 3908 | #! /usr/bin/env python
# $Id: test_replace.py 7021 2011-04-29 23:20:54Z grubert $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Tests for misc.py "replace" directive.
"""
from __init__ import DocutilsTestSupport
def suite():
s = DocutilsTestSupport.ParserTestSuite()
s.generateTests(totest)
return s
totest = {}
totest['replace'] = [
["""\
Test the |name| directive.
.. |name| replace:: "**replace**"
""",
"""\
<document source="test data">
<paragraph>
Test the \n\
<substitution_reference refname="name">
name
directive.
<substitution_definition names="name">
"
<strong>
replace
"
"""],
["""\
.. |name| replace:: paragraph 1
paragraph 2
""",
"""\
<document source="test data">
<system_message level="3" line="1" source="test data" type="ERROR">
<paragraph>
Error in "replace" directive: may contain a single paragraph only.
<system_message level="2" line="1" source="test data" type="WARNING">
<paragraph>
Substitution definition "name" empty or invalid.
<literal_block xml:space="preserve">
.. |name| replace:: paragraph 1
paragraph 2
"""],
["""\
.. |name| replace::
""",
"""\
<document source="test data">
<system_message level="3" line="1" source="test data" type="ERROR">
<paragraph>
Content block expected for the "replace" directive; none found.
<literal_block xml:space="preserve">
replace::
<system_message level="2" line="1" source="test data" type="WARNING">
<paragraph>
Substitution definition "name" empty or invalid.
<literal_block xml:space="preserve">
.. |name| replace::
"""],
["""\
.. |Python| replace:: Python, *the* best language around
.. _Python: http://www.python.org/
I recommend you try |Python|_.
""",
"""\
<document source="test data">
<substitution_definition names="Python">
Python,
<emphasis>
the
best language around
<target ids="python" names="python" refuri="http://www.python.org/">
<paragraph>
I recommend you try
<reference refname="python">
<substitution_reference refname="Python">
Python
.
"""],
["""\
.. |name| replace:: *error in **inline ``markup
""",
"""\
<document source="test data">
<system_message ids="id1" level="2" line="1" source="test data" type="WARNING">
<paragraph>
Inline emphasis start-string without end-string.
<system_message ids="id3" level="2" line="1" source="test data" type="WARNING">
<paragraph>
Inline strong start-string without end-string.
<system_message ids="id5" level="2" line="1" source="test data" type="WARNING">
<paragraph>
Inline literal start-string without end-string.
<system_message level="3" line="1" source="test data" type="ERROR">
<paragraph>
Substitution definition contains illegal element:
<literal_block xml:space="preserve">
<problematic ids="id2" refid="id1">
*
<literal_block xml:space="preserve">
.. |name| replace:: *error in **inline ``markup
"""],
["""\
.. replace:: not valid outside of a substitution definition
""",
"""\
<document source="test data">
<system_message level="3" line="1" source="test data" type="ERROR">
<paragraph>
Invalid context: the "replace" directive can only be used within a substitution definition.
<literal_block xml:space="preserve">
.. replace:: not valid outside of a substitution definition
"""],
]
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
| gpl-2.0 |
pquentin/django | tests/syndication_tests/tests.py | 13 | 18373 | from __future__ import unicode_literals
import datetime
from xml.dom import minidom
from django.contrib.sites.models import Site
from django.contrib.syndication import views
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, override_settings
from django.test.utils import requires_tz_support
from django.utils import timezone
from django.utils.feedgenerator import rfc2822_date, rfc3339_date
from .models import Entry
try:
import pytz
except ImportError:
pytz = None
TZ = timezone.get_default_timezone()
class FeedTestCase(TestCase):
fixtures = ['feeddata.json']
def setUp(self):
# Django cannot deal with very old dates when pytz isn't installed.
if pytz is None:
old_entry = Entry.objects.get(pk=1)
old_entry.updated = datetime.datetime(1980, 1, 1, 12, 30)
old_entry.published = datetime.datetime(1986, 9, 25, 20, 15, 00)
old_entry.save()
def assertChildNodes(self, elem, expected):
actual = set(n.nodeName for n in elem.childNodes)
expected = set(expected)
self.assertEqual(actual, expected)
def assertChildNodeContent(self, elem, expected):
for k, v in expected.items():
self.assertEqual(
elem.getElementsByTagName(k)[0].firstChild.wholeText, v)
def assertCategories(self, elem, expected):
self.assertEqual(set(i.firstChild.wholeText for i in elem.childNodes if i.nodeName == 'category'), set(expected))
######################################
# Feed view
######################################
@override_settings(ROOT_URLCONF='syndication_tests.urls')
class SyndicationFeedTest(FeedTestCase):
"""
Tests for the high-level syndication feed framework.
"""
@classmethod
def setUpClass(cls):
super(SyndicationFeedTest, cls).setUpClass()
# This cleanup is necessary because contrib.sites cache
# makes tests interfere with each other, see #11505
Site.objects.clear_cache()
def test_rss2_feed(self):
"""
Test the structure and content of feeds generated by Rss201rev2Feed.
"""
response = self.client.get('/syndication/rss2/')
doc = minidom.parseString(response.content)
# Making sure there's only 1 `rss` element and that the correct
# RSS version was specified.
feed_elem = doc.getElementsByTagName('rss')
self.assertEqual(len(feed_elem), 1)
feed = feed_elem[0]
self.assertEqual(feed.getAttribute('version'), '2.0')
# Making sure there's only one `channel` element w/in the
# `rss` element.
chan_elem = feed.getElementsByTagName('channel')
self.assertEqual(len(chan_elem), 1)
chan = chan_elem[0]
# Find the last build date
d = Entry.objects.latest('published').published
last_build_date = rfc2822_date(timezone.make_aware(d, TZ))
self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category'])
self.assertChildNodeContent(chan, {
'title': 'My blog',
'description': 'A more thorough description of my blog.',
'link': 'http://example.com/blog/',
'language': 'en',
'lastBuildDate': last_build_date,
'ttl': '600',
'copyright': 'Copyright (c) 2007, Sally Smith',
})
self.assertCategories(chan, ['python', 'django'])
# Ensure the content of the channel is correct
self.assertChildNodeContent(chan, {
'title': 'My blog',
'link': 'http://example.com/blog/',
})
# Check feed_url is passed
self.assertEqual(
chan.getElementsByTagName('atom:link')[0].getAttribute('href'),
'http://example.com/syndication/rss2/'
)
# Find the pubdate of the first feed item
d = Entry.objects.get(pk=1).published
pub_date = rfc2822_date(timezone.make_aware(d, TZ))
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
self.assertChildNodeContent(items[0], {
'title': 'My first entry',
'description': 'Overridden description: My first entry',
'link': 'http://example.com/blog/1/',
'guid': 'http://example.com/blog/1/',
'pubDate': pub_date,
'author': 'test@example.com (Sally Smith)',
})
self.assertCategories(items[0], ['python', 'testing'])
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'category', 'pubDate', 'author'])
# Assert that <guid> does not have any 'isPermaLink' attribute
self.assertIsNone(item.getElementsByTagName(
'guid')[0].attributes.get('isPermaLink'))
def test_rss2_feed_guid_permalink_false(self):
"""
Test if the 'isPermaLink' attribute of <guid> element of an item
in the RSS feed is 'false'.
"""
response = self.client.get(
'/syndication/rss2/guid_ispermalink_false/')
doc = minidom.parseString(response.content)
chan = doc.getElementsByTagName(
'rss')[0].getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
for item in items:
self.assertEqual(
item.getElementsByTagName('guid')[0].attributes.get(
'isPermaLink').value, "false")
def test_rss2_feed_guid_permalink_true(self):
"""
Test if the 'isPermaLink' attribute of <guid> element of an item
in the RSS feed is 'true'.
"""
response = self.client.get(
'/syndication/rss2/guid_ispermalink_true/')
doc = minidom.parseString(response.content)
chan = doc.getElementsByTagName(
'rss')[0].getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
for item in items:
self.assertEqual(
item.getElementsByTagName('guid')[0].attributes.get(
'isPermaLink').value, "true")
def test_rss091_feed(self):
"""
Test the structure and content of feeds generated by RssUserland091Feed.
"""
response = self.client.get('/syndication/rss091/')
doc = minidom.parseString(response.content)
# Making sure there's only 1 `rss` element and that the correct
# RSS version was specified.
feed_elem = doc.getElementsByTagName('rss')
self.assertEqual(len(feed_elem), 1)
feed = feed_elem[0]
self.assertEqual(feed.getAttribute('version'), '0.91')
# Making sure there's only one `channel` element w/in the
# `rss` element.
chan_elem = feed.getElementsByTagName('channel')
self.assertEqual(len(chan_elem), 1)
chan = chan_elem[0]
self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category'])
# Ensure the content of the channel is correct
self.assertChildNodeContent(chan, {
'title': 'My blog',
'link': 'http://example.com/blog/',
})
self.assertCategories(chan, ['python', 'django'])
# Check feed_url is passed
self.assertEqual(
chan.getElementsByTagName('atom:link')[0].getAttribute('href'),
'http://example.com/syndication/rss091/'
)
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
self.assertChildNodeContent(items[0], {
'title': 'My first entry',
'description': 'Overridden description: My first entry',
'link': 'http://example.com/blog/1/',
})
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description'])
self.assertCategories(item, [])
def test_atom_feed(self):
"""
Test the structure and content of feeds generated by Atom1Feed.
"""
response = self.client.get('/syndication/atom/')
feed = minidom.parseString(response.content).firstChild
self.assertEqual(feed.nodeName, 'feed')
self.assertEqual(feed.getAttribute('xmlns'), 'http://www.w3.org/2005/Atom')
self.assertChildNodes(feed, ['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'rights', 'category', 'author'])
for link in feed.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href'), 'http://example.com/syndication/atom/')
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), Entry.objects.count())
for entry in entries:
self.assertChildNodes(entry, [
'title',
'link',
'id',
'summary',
'category',
'updated',
'published',
'rights',
'author',
])
summary = entry.getElementsByTagName('summary')[0]
self.assertEqual(summary.getAttribute('type'), 'html')
def test_atom_feed_published_and_updated_elements(self):
"""
Test that the published and updated elements are not
the same and now adhere to RFC 4287.
"""
response = self.client.get('/syndication/atom/')
feed = minidom.parseString(response.content).firstChild
entries = feed.getElementsByTagName('entry')
published = entries[0].getElementsByTagName('published')[0].firstChild.wholeText
updated = entries[0].getElementsByTagName('updated')[0].firstChild.wholeText
self.assertNotEqual(published, updated)
def test_latest_post_date(self):
"""
Test that both the published and updated dates are
considered when determining the latest post date.
"""
# this feed has a `published` element with the latest date
response = self.client.get('/syndication/atom/')
feed = minidom.parseString(response.content).firstChild
updated = feed.getElementsByTagName('updated')[0].firstChild.wholeText
d = Entry.objects.latest('published').published
latest_published = rfc3339_date(timezone.make_aware(d, TZ))
self.assertEqual(updated, latest_published)
# this feed has an `updated` element with the latest date
response = self.client.get('/syndication/latest/')
feed = minidom.parseString(response.content).firstChild
updated = feed.getElementsByTagName('updated')[0].firstChild.wholeText
d = Entry.objects.exclude(pk=5).latest('updated').updated
latest_updated = rfc3339_date(timezone.make_aware(d, TZ))
self.assertEqual(updated, latest_updated)
def test_custom_feed_generator(self):
response = self.client.get('/syndication/custom/')
feed = minidom.parseString(response.content).firstChild
self.assertEqual(feed.nodeName, 'feed')
self.assertEqual(feed.getAttribute('django'), 'rocks')
self.assertChildNodes(feed, ['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'spam', 'rights', 'category', 'author'])
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), Entry.objects.count())
for entry in entries:
self.assertEqual(entry.getAttribute('bacon'), 'yum')
self.assertChildNodes(entry, [
'title',
'link',
'id',
'summary',
'ministry',
'rights',
'author',
'updated',
'published',
'category',
])
summary = entry.getElementsByTagName('summary')[0]
self.assertEqual(summary.getAttribute('type'), 'html')
def test_title_escaping(self):
"""
Tests that titles are escaped correctly in RSS feeds.
"""
response = self.client.get('/syndication/rss2/')
doc = minidom.parseString(response.content)
for item in doc.getElementsByTagName('item'):
link = item.getElementsByTagName('link')[0]
if link.firstChild.wholeText == 'http://example.com/blog/4/':
title = item.getElementsByTagName('title')[0]
self.assertEqual(title.firstChild.wholeText, 'A & B < C > D')
def test_naive_datetime_conversion(self):
"""
Test that datetimes are correctly converted to the local time zone.
"""
# Naive date times passed in get converted to the local time zone, so
# check the received zone offset against the local offset.
response = self.client.get('/syndication/naive-dates/')
doc = minidom.parseString(response.content)
updated = doc.getElementsByTagName('updated')[0].firstChild.wholeText
d = Entry.objects.latest('published').published
latest = rfc3339_date(timezone.make_aware(d, TZ))
self.assertEqual(updated, latest)
def test_aware_datetime_conversion(self):
"""
Test that datetimes with timezones don't get trodden on.
"""
response = self.client.get('/syndication/aware-dates/')
doc = minidom.parseString(response.content)
published = doc.getElementsByTagName('published')[0].firstChild.wholeText
self.assertEqual(published[-6:], '+00:42')
@requires_tz_support
def test_feed_last_modified_time_naive_date(self):
"""
Tests the Last-Modified header with naive publication dates.
"""
response = self.client.get('/syndication/naive-dates/')
self.assertEqual(response['Last-Modified'], 'Tue, 26 Mar 2013 01:00:00 GMT')
def test_feed_last_modified_time(self):
"""
Tests the Last-Modified header with aware publication dates.
"""
response = self.client.get('/syndication/aware-dates/')
self.assertEqual(response['Last-Modified'], 'Mon, 25 Mar 2013 19:18:00 GMT')
# No last-modified when feed has no item_pubdate
response = self.client.get('/syndication/no_pubdate/')
self.assertFalse(response.has_header('Last-Modified'))
def test_feed_url(self):
"""
Test that the feed_url can be overridden.
"""
response = self.client.get('/syndication/feedurl/')
doc = minidom.parseString(response.content)
for link in doc.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href'), 'http://example.com/customfeedurl/')
def test_secure_urls(self):
"""
Test URLs are prefixed with https:// when feed is requested over HTTPS.
"""
response = self.client.get('/syndication/rss2/', **{
'wsgi.url_scheme': 'https',
})
doc = minidom.parseString(response.content)
chan = doc.getElementsByTagName('channel')[0]
self.assertEqual(
chan.getElementsByTagName('link')[0].firstChild.wholeText[0:5],
'https'
)
atom_link = chan.getElementsByTagName('atom:link')[0]
self.assertEqual(atom_link.getAttribute('href')[0:5], 'https')
for link in doc.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href')[0:5], 'https')
def test_item_link_error(self):
"""
Test that an ImproperlyConfigured is raised if no link could be found
for the item(s).
"""
self.assertRaises(ImproperlyConfigured,
self.client.get,
'/syndication/articles/')
def test_template_feed(self):
"""
Test that the item title and description can be overridden with
templates.
"""
response = self.client.get('/syndication/template/')
doc = minidom.parseString(response.content)
feed = doc.getElementsByTagName('rss')[0]
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertChildNodeContent(items[0], {
'title': 'Title in your templates: My first entry\n',
'description': 'Description in your templates: My first entry\n',
'link': 'http://example.com/blog/1/',
})
def test_template_context_feed(self):
"""
Test that custom context data can be passed to templates for title
and description.
"""
response = self.client.get('/syndication/template_context/')
doc = minidom.parseString(response.content)
feed = doc.getElementsByTagName('rss')[0]
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertChildNodeContent(items[0], {
'title': 'My first entry (foo is bar)\n',
'description': 'My first entry (foo is bar)\n',
})
def test_add_domain(self):
"""
Test add_domain() prefixes domains onto the correct URLs.
"""
self.assertEqual(
views.add_domain('example.com', '/foo/?arg=value'),
'http://example.com/foo/?arg=value'
)
self.assertEqual(
views.add_domain('example.com', '/foo/?arg=value', True),
'https://example.com/foo/?arg=value'
)
self.assertEqual(
views.add_domain('example.com', 'http://djangoproject.com/doc/'),
'http://djangoproject.com/doc/'
)
self.assertEqual(
views.add_domain('example.com', 'https://djangoproject.com/doc/'),
'https://djangoproject.com/doc/'
)
self.assertEqual(
views.add_domain('example.com', 'mailto:uhoh@djangoproject.com'),
'mailto:uhoh@djangoproject.com'
)
self.assertEqual(
views.add_domain('example.com', '//example.com/foo/?arg=value'),
'http://example.com/foo/?arg=value'
)
| bsd-3-clause |
p4datasystems/CarnotKEdist | dist/Lib/encodings/cp1006.py | 593 | 13824 | """ Python Character Mapping Codec cp1006 generated from 'MAPPINGS/VENDORS/MISC/CP1006.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1006',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u06f0' # 0xA1 -> EXTENDED ARABIC-INDIC DIGIT ZERO
u'\u06f1' # 0xA2 -> EXTENDED ARABIC-INDIC DIGIT ONE
u'\u06f2' # 0xA3 -> EXTENDED ARABIC-INDIC DIGIT TWO
u'\u06f3' # 0xA4 -> EXTENDED ARABIC-INDIC DIGIT THREE
u'\u06f4' # 0xA5 -> EXTENDED ARABIC-INDIC DIGIT FOUR
u'\u06f5' # 0xA6 -> EXTENDED ARABIC-INDIC DIGIT FIVE
u'\u06f6' # 0xA7 -> EXTENDED ARABIC-INDIC DIGIT SIX
u'\u06f7' # 0xA8 -> EXTENDED ARABIC-INDIC DIGIT SEVEN
u'\u06f8' # 0xA9 -> EXTENDED ARABIC-INDIC DIGIT EIGHT
u'\u06f9' # 0xAA -> EXTENDED ARABIC-INDIC DIGIT NINE
u'\u060c' # 0xAB -> ARABIC COMMA
u'\u061b' # 0xAC -> ARABIC SEMICOLON
u'\xad' # 0xAD -> SOFT HYPHEN
u'\u061f' # 0xAE -> ARABIC QUESTION MARK
u'\ufe81' # 0xAF -> ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM
u'\ufe8d' # 0xB0 -> ARABIC LETTER ALEF ISOLATED FORM
u'\ufe8e' # 0xB1 -> ARABIC LETTER ALEF FINAL FORM
u'\ufe8e' # 0xB2 -> ARABIC LETTER ALEF FINAL FORM
u'\ufe8f' # 0xB3 -> ARABIC LETTER BEH ISOLATED FORM
u'\ufe91' # 0xB4 -> ARABIC LETTER BEH INITIAL FORM
u'\ufb56' # 0xB5 -> ARABIC LETTER PEH ISOLATED FORM
u'\ufb58' # 0xB6 -> ARABIC LETTER PEH INITIAL FORM
u'\ufe93' # 0xB7 -> ARABIC LETTER TEH MARBUTA ISOLATED FORM
u'\ufe95' # 0xB8 -> ARABIC LETTER TEH ISOLATED FORM
u'\ufe97' # 0xB9 -> ARABIC LETTER TEH INITIAL FORM
u'\ufb66' # 0xBA -> ARABIC LETTER TTEH ISOLATED FORM
u'\ufb68' # 0xBB -> ARABIC LETTER TTEH INITIAL FORM
u'\ufe99' # 0xBC -> ARABIC LETTER THEH ISOLATED FORM
u'\ufe9b' # 0xBD -> ARABIC LETTER THEH INITIAL FORM
u'\ufe9d' # 0xBE -> ARABIC LETTER JEEM ISOLATED FORM
u'\ufe9f' # 0xBF -> ARABIC LETTER JEEM INITIAL FORM
u'\ufb7a' # 0xC0 -> ARABIC LETTER TCHEH ISOLATED FORM
u'\ufb7c' # 0xC1 -> ARABIC LETTER TCHEH INITIAL FORM
u'\ufea1' # 0xC2 -> ARABIC LETTER HAH ISOLATED FORM
u'\ufea3' # 0xC3 -> ARABIC LETTER HAH INITIAL FORM
u'\ufea5' # 0xC4 -> ARABIC LETTER KHAH ISOLATED FORM
u'\ufea7' # 0xC5 -> ARABIC LETTER KHAH INITIAL FORM
u'\ufea9' # 0xC6 -> ARABIC LETTER DAL ISOLATED FORM
u'\ufb84' # 0xC7 -> ARABIC LETTER DAHAL ISOLATED FORMN
u'\ufeab' # 0xC8 -> ARABIC LETTER THAL ISOLATED FORM
u'\ufead' # 0xC9 -> ARABIC LETTER REH ISOLATED FORM
u'\ufb8c' # 0xCA -> ARABIC LETTER RREH ISOLATED FORM
u'\ufeaf' # 0xCB -> ARABIC LETTER ZAIN ISOLATED FORM
u'\ufb8a' # 0xCC -> ARABIC LETTER JEH ISOLATED FORM
u'\ufeb1' # 0xCD -> ARABIC LETTER SEEN ISOLATED FORM
u'\ufeb3' # 0xCE -> ARABIC LETTER SEEN INITIAL FORM
u'\ufeb5' # 0xCF -> ARABIC LETTER SHEEN ISOLATED FORM
u'\ufeb7' # 0xD0 -> ARABIC LETTER SHEEN INITIAL FORM
u'\ufeb9' # 0xD1 -> ARABIC LETTER SAD ISOLATED FORM
u'\ufebb' # 0xD2 -> ARABIC LETTER SAD INITIAL FORM
u'\ufebd' # 0xD3 -> ARABIC LETTER DAD ISOLATED FORM
u'\ufebf' # 0xD4 -> ARABIC LETTER DAD INITIAL FORM
u'\ufec1' # 0xD5 -> ARABIC LETTER TAH ISOLATED FORM
u'\ufec5' # 0xD6 -> ARABIC LETTER ZAH ISOLATED FORM
u'\ufec9' # 0xD7 -> ARABIC LETTER AIN ISOLATED FORM
u'\ufeca' # 0xD8 -> ARABIC LETTER AIN FINAL FORM
u'\ufecb' # 0xD9 -> ARABIC LETTER AIN INITIAL FORM
u'\ufecc' # 0xDA -> ARABIC LETTER AIN MEDIAL FORM
u'\ufecd' # 0xDB -> ARABIC LETTER GHAIN ISOLATED FORM
u'\ufece' # 0xDC -> ARABIC LETTER GHAIN FINAL FORM
u'\ufecf' # 0xDD -> ARABIC LETTER GHAIN INITIAL FORM
u'\ufed0' # 0xDE -> ARABIC LETTER GHAIN MEDIAL FORM
u'\ufed1' # 0xDF -> ARABIC LETTER FEH ISOLATED FORM
u'\ufed3' # 0xE0 -> ARABIC LETTER FEH INITIAL FORM
u'\ufed5' # 0xE1 -> ARABIC LETTER QAF ISOLATED FORM
u'\ufed7' # 0xE2 -> ARABIC LETTER QAF INITIAL FORM
u'\ufed9' # 0xE3 -> ARABIC LETTER KAF ISOLATED FORM
u'\ufedb' # 0xE4 -> ARABIC LETTER KAF INITIAL FORM
u'\ufb92' # 0xE5 -> ARABIC LETTER GAF ISOLATED FORM
u'\ufb94' # 0xE6 -> ARABIC LETTER GAF INITIAL FORM
u'\ufedd' # 0xE7 -> ARABIC LETTER LAM ISOLATED FORM
u'\ufedf' # 0xE8 -> ARABIC LETTER LAM INITIAL FORM
u'\ufee0' # 0xE9 -> ARABIC LETTER LAM MEDIAL FORM
u'\ufee1' # 0xEA -> ARABIC LETTER MEEM ISOLATED FORM
u'\ufee3' # 0xEB -> ARABIC LETTER MEEM INITIAL FORM
u'\ufb9e' # 0xEC -> ARABIC LETTER NOON GHUNNA ISOLATED FORM
u'\ufee5' # 0xED -> ARABIC LETTER NOON ISOLATED FORM
u'\ufee7' # 0xEE -> ARABIC LETTER NOON INITIAL FORM
u'\ufe85' # 0xEF -> ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM
u'\ufeed' # 0xF0 -> ARABIC LETTER WAW ISOLATED FORM
u'\ufba6' # 0xF1 -> ARABIC LETTER HEH GOAL ISOLATED FORM
u'\ufba8' # 0xF2 -> ARABIC LETTER HEH GOAL INITIAL FORM
u'\ufba9' # 0xF3 -> ARABIC LETTER HEH GOAL MEDIAL FORM
u'\ufbaa' # 0xF4 -> ARABIC LETTER HEH DOACHASHMEE ISOLATED FORM
u'\ufe80' # 0xF5 -> ARABIC LETTER HAMZA ISOLATED FORM
u'\ufe89' # 0xF6 -> ARABIC LETTER YEH WITH HAMZA ABOVE ISOLATED FORM
u'\ufe8a' # 0xF7 -> ARABIC LETTER YEH WITH HAMZA ABOVE FINAL FORM
u'\ufe8b' # 0xF8 -> ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM
u'\ufef1' # 0xF9 -> ARABIC LETTER YEH ISOLATED FORM
u'\ufef2' # 0xFA -> ARABIC LETTER YEH FINAL FORM
u'\ufef3' # 0xFB -> ARABIC LETTER YEH INITIAL FORM
u'\ufbb0' # 0xFC -> ARABIC LETTER YEH BARREE WITH HAMZA ABOVE ISOLATED FORM
u'\ufbae' # 0xFD -> ARABIC LETTER YEH BARREE ISOLATED FORM
u'\ufe7c' # 0xFE -> ARABIC SHADDA ISOLATED FORM
u'\ufe7d' # 0xFF -> ARABIC SHADDA MEDIAL FORM
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
oouyang/fxos-certsuite | mcts/web-platform-tests/tests/tools/wptserve/wptserve/pipes.py | 7 | 13808 | from cgi import escape
import gzip as gzip_module
import logging
import re
import time
import types
import uuid
from cStringIO import StringIO
logger = logging.getLogger("wptserve")
def resolve_content(response):
rv = "".join(item for item in response.iter_content())
if type(rv) == unicode:
rv = rv.encode(response.encoding)
return rv
class Pipeline(object):
pipes = {}
def __init__(self, pipe_string):
self.pipe_functions = self.parse(pipe_string)
def parse(self, pipe_string):
functions = []
for item in PipeTokenizer().tokenize(pipe_string):
if not item:
break
if item[0] == "function":
functions.append((self.pipes[item[1]], []))
elif item[0] == "argument":
functions[-1][1].append(item[1])
return functions
def __call__(self, request, response):
for func, args in self.pipe_functions:
response = func(request, response, *args)
return response
class PipeTokenizer(object):
def __init__(self):
#This whole class can likely be replaced by some regexps
self.state = None
def tokenize(self, string):
self.string = string
self.state = self.func_name_state
self._index = 0
while self.state:
yield self.state()
yield None
def get_char(self):
if self._index >= len(self.string):
return None
rv = self.string[self._index]
self._index += 1
return rv
def func_name_state(self):
rv = ""
while True:
char = self.get_char()
if char is None:
self.state = None
if rv:
return ("function", rv)
else:
return None
elif char == "(":
self.state = self.argument_state
return ("function", rv)
elif char == "|":
if rv:
return ("function", rv)
else:
rv += char
def argument_state(self):
rv = ""
while True:
char = self.get_char()
if char is None:
self.state = None
return ("argument", rv)
elif char == "\\":
rv += self.get_escape()
if rv is None:
#This should perhaps be an error instead
return ("argument", rv)
elif char == ",":
return ("argument", rv)
elif char == ")":
self.state = self.func_name_state
return ("argument", rv)
else:
rv += char
def get_escape(self):
char = self.get_char()
escapes = {"n": "\n",
"r": "\r",
"t": "\t"}
return escapes.get(char, char)
class pipe(object):
def __init__(self, *arg_converters):
self.arg_converters = arg_converters
self.max_args = len(self.arg_converters)
self.min_args = 0
opt_seen = False
for item in self.arg_converters:
if not opt_seen:
if isinstance(item, opt):
opt_seen = True
else:
self.min_args += 1
else:
if not isinstance(item, opt):
raise ValueError("Non-optional argument cannot follow optional argument")
def __call__(self, f):
def inner(request, response, *args):
if not (self.min_args <= len(args) <= self.max_args):
raise ValueError("Expected between %d and %d args, got %d" %
(self.min_args, self.max_args, len(args)))
arg_values = tuple(f(x) for f, x in zip(self.arg_converters, args))
return f(request, response, *arg_values)
Pipeline.pipes[f.__name__] = inner
#We actually want the undecorated function in the main namespace
return f
class opt(object):
def __init__(self, f):
self.f = f
def __call__(self, arg):
return self.f(arg)
def nullable(func):
def inner(arg):
if arg.lower() == "null":
return None
else:
return func(arg)
return inner
def boolean(arg):
if arg.lower() in ("true", "1"):
return True
elif arg.lower() in ("false", "0"):
return False
raise ValueError
@pipe(int)
def status(request, response, code):
"""Alter the status code.
:param code: Status code to use for the response."""
response.status = code
return response
@pipe(str, str, opt(boolean))
def header(request, response, name, value, append=False):
"""Set a HTTP header.
Replaces any existing HTTP header of the same name unless
append is set, in which case the header is appended without
replacement.
:param name: Name of the header to set.
:param value: Value to use for the header.
:param append: True if existing headers should not be replaced
"""
if not append:
response.headers.set(name, value)
else:
response.headers.append(name, value)
return response
@pipe(str)
def trickle(request, response, delays):
"""Send the response in parts, with time delays.
:param delays: A string of delays and amounts, in bytes, of the
response to send. Each component is separated by
a colon. Amounts in bytes are plain integers, whilst
delays are floats prefixed with a single d e.g.
d1:100:d2
Would cause a 1 second delay, would then send 100 bytes
of the file, and then cause a 2 second delay, before sending
the remainder of the file.
If the last token is of the form rN, instead of sending the
remainder of the file, the previous N instructions will be
repeated until the whole file has been sent e.g.
d1:100:d2:r2
Causes a delay of 1s, then 100 bytes to be sent, then a 2s delay
and then a further 100 bytes followed by a two second delay
until the response has been fully sent.
"""
def parse_delays():
parts = delays.split(":")
rv = []
for item in parts:
if item.startswith("d"):
item_type = "delay"
item = item[1:]
value = float(item)
elif item.startswith("r"):
item_type = "repeat"
value = int(item[1:])
if not value % 2 == 0:
raise ValueError
else:
item_type = "bytes"
value = int(item)
if len(rv) and rv[-1][0] == item_type:
rv[-1][1] += value
else:
rv.append((item_type, value))
return rv
delays = parse_delays()
if not delays:
return response
content = resolve_content(response)
modified_content = []
offset = [0]
def sleep(seconds):
def inner():
time.sleep(seconds)
return ""
return inner
def add_content(delays, repeat=False):
for i, (item_type, value) in enumerate(delays):
if item_type == "bytes":
modified_content.append(content[offset[0]:offset[0] + value])
offset[0] += value
elif item_type == "delay":
modified_content.append(sleep(value))
elif item_type == "repeat":
assert i == len(delays) - 1
while offset[0] < len(content):
add_content(delays[-(value + 1):-1], True)
if not repeat and offset[0] < len(content):
modified_content.append(content[offset[0]:])
add_content(delays)
response.content = modified_content
return response
@pipe(nullable(int), opt(nullable(int)))
def slice(request, response, start, end=None):
"""Send a byte range of the response body
:param start: The starting offset. Follows python semantics including
negative numbers.
:param end: The ending offset, again with python semantics and None
(spelled "null" in a query string) to indicate the end of
the file.
"""
content = resolve_content(response)
response.content = content[start:end]
return response
class ReplacementTokenizer(object):
def ident(scanner, token):
return ("ident", token)
def index(scanner, token):
token = token[1:-1]
try:
token = int(token)
except:
token = unicode(token, "utf8")
return ("index", token)
def var(scanner, token):
token = token[:-1]
return ("var", token)
def tokenize(self, string):
return self.scanner.scan(string)[0]
scanner = re.Scanner([(r"\$\w+:", var),
(r"\$?\w+(?:\(\))?", ident),
(r"\[[^\]]*\]", index)])
class FirstWrapper(object):
def __init__(self, params):
self.params = params
def __getitem__(self, key):
return self.params.first(key)
@pipe()
def sub(request, response):
"""Substitute environment information about the server and request into the script.
The format is a very limited template language. Substitutions are
enclosed by {{ and }}. There are several avaliable substitutions:
host
A simple string value and represents the primary host from which the
tests are being run.
domains
A dictionary of available domains indexed by subdomain name.
ports
A dictionary of lists of ports indexed by protocol.
location
A dictionary of parts of the request URL. Valid keys are
'server, 'scheme', 'host', 'hostname', 'port', 'path' and 'query'.
'server' is scheme://host:port, 'host' is hostname:port, and query
includes the leading '?', but other delimiters are omitted.
headers
A dictionary of HTTP headers in the request.
GET
A dictionary of query parameters supplied with the request.
uuid()
A pesudo-random UUID suitable for usage with stash
So for example in a setup running on localhost with a www
subdomain and a http server on ports 80 and 81::
{{host}} => localhost
{{domains[www]}} => www.localhost
{{ports[http][1]}} => 81
It is also possible to assign a value to a variable name, which must start with
the $ character, using the ":" syntax e.g.
{{$id:uuid()}
Later substitutions in the same file may then refer to the variable
by name e.g.
{{$id}}
"""
content = resolve_content(response)
new_content = template(request, content)
response.content = new_content
return response
def template(request, content):
#TODO: There basically isn't any error handling here
tokenizer = ReplacementTokenizer()
variables = {}
def config_replacement(match):
content, = match.groups()
tokens = tokenizer.tokenize(content)
if tokens[0][0] == "var":
variable = tokens[0][1]
tokens = tokens[1:]
else:
variable = None
assert tokens[0][0] == "ident" and all(item[0] == "index" for item in tokens[1:]), tokens
field = tokens[0][1]
if field in variables:
value = variables[field]
elif field == "headers":
value = request.headers
elif field == "GET":
value = FirstWrapper(request.GET)
elif field in request.server.config:
value = request.server.config[tokens[0][1]]
elif field == "location":
value = {"server": "%s://%s:%s" % (request.url_parts.scheme,
request.url_parts.hostname,
request.url_parts.port),
"scheme": request.url_parts.scheme,
"host": "%s:%s" % (request.url_parts.hostname,
request.url_parts.port),
"hostname": request.url_parts.hostname,
"port": request.url_parts.port,
"path": request.url_parts.path,
"query": "?%s" % request.url_parts.query}
elif field == "uuid()":
value = str(uuid.uuid4())
else:
raise Exception("Undefined template variable %s" % field)
for item in tokens[1:]:
value = value[item[1]]
assert isinstance(value, (int,) + types.StringTypes), tokens
if variable is not None:
variables[variable] = value
#Should possibly support escaping for other contexts e.g. script
#TODO: read the encoding of the response
return escape(unicode(value)).encode("utf-8")
template_regexp = re.compile(r"{{([^}]*)}}")
new_content, count = template_regexp.subn(config_replacement, content)
return new_content
@pipe()
def gzip(request, response):
"""This pipe gzip-encodes response data.
It sets (or overwrites) these HTTP headers:
Content-Encoding is set to gzip
Content-Length is set to the length of the compressed content
"""
content = resolve_content(response)
response.headers.set("Content-Encoding", "gzip")
out = StringIO()
with gzip_module.GzipFile(fileobj=out, mode="w") as f:
f.write(content)
response.content = out.getvalue()
response.headers.set("Content-Length", len(response.content))
return response
| mpl-2.0 |
tdaede/can-websockets | pycrc/crc_opt.py | 1 | 17560 | # -*- coding: Latin-1 -*-
# pycrc -- parametrisable CRC calculation utility and C source code generator
#
# Copyright (c) 2006-2012 Thomas Pircher <tehpeh@gmx.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Option parsing library for pycrc.
use as follows:
from crc_opt import Options
opt = Options()
opt.parse(sys.argv[1:])
"""
from optparse import OptionParser, Option, OptionValueError
from copy import copy
import sys
from crc_models import CrcModels
# Class Options
###############################################################################
class Options(object):
"""
The options parsing and validationg class
"""
# Program details
ProgramName = "pycrc"
Version = "0.7.10"
VersionStr = "%s v%s" % (ProgramName, Version)
WebAddress = "http://www.tty1.net/pycrc/"
# Bitmap of the algorithms
Algo_None = 0x00
Algo_Bit_by_Bit = 0x01
Algo_Bit_by_Bit_Fast = 0x02
Algo_Table_Driven = 0x04
Action_Check_String = 0x01
Action_Check_Hex_String = 0x02
Action_Check_File = 0x03
Action_Generate_H = 0x04
Action_Generate_C = 0x05
Action_Generate_C_Main = 0x06
Action_Generate_Table = 0x07
# Class constructor
###############################################################################
def __init__(self):
self.Width = None
self.Poly = None
self.ReflectIn = None
self.XorIn = None
self.ReflectOut = None
self.XorOut = None
self.TableIdxWidth = 8
self.TableWidth = 1 << self.TableIdxWidth
self.Verbose = False
self.CheckString = "123456789"
self.MSB_Mask = None
self.Mask = None
self.Algorithm = self.Algo_None
self.SymbolPrefix = "crc_"
self.CrcType = None
self.IncludeFile = None
self.OutputFile = None
self.Action = self.Action_Check_String
self.CheckFile = None
self.CStd = None
self.UndefinedCrcParameters = False
# function parse
###############################################################################
def parse(self, argv = None):
"""
Parses and validates the options given as arguments
"""
usage = """\
%prog [OPTIONS]
To generate the checksum of a string or hexadecimal data:
%prog [model] --check-string "123456789"
%prog [model] --check-hexstring "313233343536373839"
To generate the checksum of a file:
%prog [model] --check-file filename
To generate the c-source and write it to filename:
%prog [model] --generate c -o filename
The model can be defined by the --model switch or by specifying each of the
following parameters:
--width --poly --reflect-in --xor-in --reflect-out --xor-out"""
models = CrcModels()
model_list = ", ".join(models.getList())
parser = OptionParser(option_class=MyOption, usage=usage, version=self.VersionStr)
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False,
help="print information about the model")
parser.add_option("--check-string",
action="store", type="string", dest="check_string",
help="calculate the checksum of the given string (default: '123456789')", metavar="STRING")
parser.add_option("--check-hexstring",
action="store", type="string", dest="check_hexstring",
help="calculate the checksum of the given hexadecimal number string", metavar="STRING")
parser.add_option("--check-file",
action="store", type="string", dest="check_file",
help="calculate the checksum of the given file", metavar="FILE")
parser.add_option("--generate",
action="store", type="string", dest="generate", default=None,
help="choose which type of code to generate from {c, h, c-main, table}", metavar="CODE")
parser.add_option("--std",
action="store", type="string", dest="c_std", default="C99",
help="C standard style of the generated code from {C89, ANSI, C99}", metavar="STD")
parser.add_option("--algorithm",
action="store", type="string", dest="algorithm", default="all",
help="choose an algorithm from {bit-by-bit, bit-by-bit-fast, table-driven, all}", metavar="ALGO")
parser.add_option("--model",
action="callback", callback=self.model_cb, type="string", dest="model", default=None,
help="choose a parameter set from {%s}" % model_list, metavar="MODEL")
parser.add_option("--width",
action="store", type="hex", dest="width",
help="use NUM bits in the polynomial", metavar="NUM")
parser.add_option("--poly",
action="store", type="hex", dest="poly",
help="use HEX as Polynom", metavar="HEX")
parser.add_option("--reflect-in",
action="store", type="bool", dest="reflect_in",
help="reflect input bytes", metavar="BOOL")
parser.add_option("--xor-in",
action="store", type="hex", dest="xor_in",
help="use HEX as initial value", metavar="HEX")
parser.add_option("--reflect-out",
action="store", type="bool", dest="reflect_out",
help="reflect output bytes", metavar="BOOL")
parser.add_option("--xor-out",
action="store", type="hex", dest="xor_out",
help="xor the final crc value with HEX", metavar="HEX")
parser.add_option("--table-idx-width",
action="store", type="int", dest="table_idx_width",
help="use NUM bits to index the crc table; NUM must be one of the values {1, 2, 4, 8}", metavar="NUM")
parser.add_option("--symbol-prefix",
action="store", type="string", dest="symbol_prefix",
help="when generating source code, use STRING as prefix to the generated symbols", metavar="STRING")
parser.add_option("--crc-type",
action="store", type="string", dest="crc_type",
help="when generating source code, use STRING as crc_t type", metavar="STRING")
parser.add_option("--include-file",
action="store", type="string", dest="include_file",
help="when generating source code, use FILE as additional include file", metavar="FILE")
parser.add_option("-o", "--output",
action="store", type="string", dest="output_file",
help="write the generated code to file instead to stdout", metavar="FILE")
(options, args) = parser.parse_args(argv)
undefined_params = []
if options.width != None:
self.Width = options.width
else:
undefined_params.append("--width")
if options.poly != None:
self.Poly = options.poly
else:
undefined_params.append("--poly")
if options.reflect_in != None:
self.ReflectIn = options.reflect_in
else:
undefined_params.append("--reflect-in")
if options.xor_in != None:
self.XorIn = options.xor_in
else:
undefined_params.append("--xor-in")
if options.reflect_out != None:
self.ReflectOut = options.reflect_out
else:
undefined_params.append("--reflect-out")
if options.xor_out != None:
self.XorOut = options.xor_out
else:
undefined_params.append("--xor-out")
if options.table_idx_width != None:
if options.table_idx_width == 1 or \
options.table_idx_width == 2 or \
options.table_idx_width == 4 or \
options.table_idx_width == 8:
self.TableIdxWidth = options.table_idx_width
self.TableWidth = 1 << options.table_idx_width
else:
sys.stderr.write("%s: error: unsupported table-idx-width %d\n" % (sys.argv[0], options.table_idx_width))
sys.exit(1)
if self.Poly != None and self.Poly % 2 == 0:
sys.stderr.write("%s: warning: the polynomial 0x%x is even. A valid CRC polynomial must be odd.\n" % (sys.argv[0], self.Poly))
if self.Width != None:
if self.Width <= 0:
sys.stderr.write("%s: error: Width must be strictly positive\n" % sys.argv[0])
sys.exit(1)
self.MSB_Mask = 0x1 << (self.Width - 1)
self.Mask = ((self.MSB_Mask - 1) << 1) | 1
if self.Poly != None:
self.Poly = self.Poly & self.Mask
if self.XorIn != None:
self.XorIn = self.XorIn & self.Mask
if self.XorOut != None:
self.XorOut = self.XorOut & self.Mask
else:
self.MSB_Mask = None
self.Mask = None
if self.Width == None or \
self.Poly == None or \
self.ReflectIn == None or \
self.XorIn == None or \
self.ReflectOut == None or \
self.XorOut == None:
self.UndefinedCrcParameters = True
else:
self.UndefinedCrcParameters = False
if options.algorithm != None:
alg = options.algorithm.lower()
if alg == "bit-by-bit" or alg == "all":
self.Algorithm |= self.Algo_Bit_by_Bit
if alg == "bit-by-bit-fast" or alg == "all":
self.Algorithm |= self.Algo_Bit_by_Bit_Fast
if alg == "table-driven" or alg == "all":
self.Algorithm |= self.Algo_Table_Driven
if self.Algorithm == 0:
sys.stderr.write("%s: error: unknown algorithm %s\n" % (sys.argv[0], options.algorithm))
sys.exit(1)
if options.c_std != None:
std = options.c_std.upper()
if std == "ANSI" or std == "C89":
self.CStd = "C89"
elif std == "C99":
self.CStd = std
else:
sys.stderr.write("%s: error: unknown C standard %s\n" % (sys.argv[0], options.c_std))
sys.exit(1)
if options.symbol_prefix != None:
self.SymbolPrefix = options.symbol_prefix
if options.include_file != None:
self.IncludeFile = options.include_file
if options.crc_type != None:
self.CrcType = options.crc_type
if options.output_file != None:
self.OutputFile = options.output_file
op_count = 0
if options.check_string != None:
self.Action = self.Action_Check_String
self.CheckString = options.check_string
op_count += 1
if options.check_hexstring != None:
self.Action = self.Action_Check_Hex_String
self.CheckString = options.check_hexstring
op_count += 1
if options.check_file != None:
self.Action = self.Action_Check_File
self.CheckFile = options.check_file
op_count += 1
if options.generate != None:
arg = options.generate.lower()
if arg == 'h':
self.Action = self.Action_Generate_H
elif arg == 'c':
self.Action = self.Action_Generate_C
elif arg == 'c-main':
self.Action = self.Action_Generate_C_Main
elif arg == 'table':
self.Action = self.Action_Generate_Table
else:
sys.stderr.write("%s: error: don't know how to generate %s\n" % (sys.argv[0], options.generate))
sys.exit(1)
op_count += 1
if self.Action == self.Action_Generate_Table:
if self.Algorithm & self.Algo_Table_Driven == 0:
sys.stderr.write("%s: error: the --generate table option is incompatible with the --algorithm option\n" % sys.argv[0])
sys.exit(1)
self.Algorithm = self.Algo_Table_Driven
elif self.Algorithm != self.Algo_Bit_by_Bit and self.Algorithm != self.Algo_Bit_by_Bit_Fast and self.Algorithm != self.Algo_Table_Driven:
sys.stderr.write("%s: error: select an algorithm to be used in the generated file\n" % sys.argv[0])
sys.exit(1)
else:
if self.TableIdxWidth != 8:
sys.stderr.write("%s: warning: reverting to Table Index Width = 8 for internal CRC calculation\n" % sys.argv[0])
self.TableIdxWidth = 8
self.TableWidth = 1 << options.table_idx_width
if op_count == 0:
self.Action = self.Action_Check_String
if op_count > 1:
sys.stderr.write("%s: error: too many actions scecified\n" % sys.argv[0])
sys.exit(1)
if len(args) != 0:
sys.stderr.write("%s: error: unrecognized argument(s): %s\n" % (sys.argv[0], " ".join(args)))
sys.exit(1)
if self.UndefinedCrcParameters and self.Action in set((self.Action_Check_String, self.Action_Check_Hex_String, self.Action_Check_File, self.Action_Generate_Table)):
sys.stderr.write("%s: error: undefined parameters: Add %s or use --model\n" % (sys.argv[0], ", ".join(undefined_params)))
sys.exit(1)
self.Verbose = options.verbose
# function model_cb
##############################################################################
def model_cb(self, option, opt_str, value, parser):
"""
This function sets up the single parameters if the 'model' option has been selected
by the user.
"""
model_name = value.lower()
models = CrcModels()
model = models.getParams(model_name)
if model != None:
setattr(parser.values, 'width', model['width'])
setattr(parser.values, 'poly', model['poly'])
setattr(parser.values, 'reflect_in', model['reflect_in'])
setattr(parser.values, 'xor_in', model['xor_in'])
setattr(parser.values, 'reflect_out', model['reflect_out'])
setattr(parser.values, 'xor_out', model['xor_out'])
else:
raise OptionValueError("Error: unsupported model %s" % (value))
# function check_hex
###############################################################################
def check_hex(option, opt, value):
"""
Checks if a value is given in a decimal integer of hexadecimal reppresentation.
Returns the converted value or rises an exception on error.
"""
try:
if value.lower().startswith("0x"):
return int(value, 16)
else:
return int(value)
except ValueError:
raise OptionValueError("option %s: invalid integer or hexadecimal value: %r" % (opt, value))
# function check_bool
###############################################################################
def check_bool(option, opt, value):
"""
Checks if a value is given as a boolean value (either 0 or 1 or "true" or "false")
Returns the converted value or rises an exception on error.
"""
if value.isdigit():
return int(value, 10) != 0
elif value.lower() == "false":
return False
elif value.lower() == "true":
return True
else:
raise OptionValueError("option %s: invalid boolean value: %r" % (opt, value))
# Class MyOption
###############################################################################
class MyOption(Option):
"""
New option parsing class extends the Option class
"""
TYPES = Option.TYPES + ("hex", "bool")
TYPE_CHECKER = copy(Option.TYPE_CHECKER)
TYPE_CHECKER["hex"] = check_hex
TYPE_CHECKER["bool"] = check_bool
| gpl-2.0 |
axinging/sky_engine | sky/engine/bindings/scripts/dart_utilities.py | 13 | 5240 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Functions shared by various parts of the code generator.
Extends IdlType and IdlUnion type with |enum_validation_expression| property.
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
################################################################################
# Utility function exposed for Dart CodeGenerator. Only 6 methods are special
# to Dart the rest delegate to the v8_utilities functions.
################################################################################
import v8_types # Required
import v8_utilities
def _scoped_name(interface, definition, base_name):
# partial interfaces are implemented as separate classes, with their members
# implemented as static member functions
partial_interface_implemented_as = definition.extended_attributes.get('PartialInterfaceImplementedAs')
if partial_interface_implemented_as:
return '%s::%s' % (partial_interface_implemented_as, base_name)
if (definition.is_static or
definition.name in ('Constructor', 'NamedConstructor')):
return '%s::%s' % (v8_utilities.cpp_name(interface), base_name)
return 'receiver->%s' % base_name
def _bool_to_cpp(tf):
return "true" if tf else "false"
# [CallWith]
_CALL_WITH_ARGUMENTS = {
'ScriptState': 'state',
'ExecutionContext': 'context',
'ScriptArguments': 'scriptArguments.release()',
'ActiveWindow': 'DOMDartState::CurrentWindow()',
'FirstWindow': 'DOMDartState::CurrentWindow()',
'Document': 'document',
}
# List because key order matters, as we want arguments in deterministic order
_CALL_WITH_VALUES = [
'ScriptState',
'ExecutionContext',
'ScriptArguments',
'ActiveWindow',
'FirstWindow',
'Document',
]
def _call_with_arguments(call_with_values):
if not call_with_values:
return []
return [_CALL_WITH_ARGUMENTS[value]
for value in _CALL_WITH_VALUES
if v8_utilities.extended_attribute_value_contains(call_with_values, value)]
def _generate_native_entry(interface_name, name, kind, is_static, arity):
if kind == 'Getter':
suffix = "_Getter"
elif kind == 'Setter':
suffix = "_Setter"
elif kind == 'Constructor':
name = "constructor"
suffix = "Callback"
elif kind == 'Method':
suffix = "_Callback"
tag = "%s%s" % (name, suffix)
native_entry = "_".join([interface_name, tag])
argument_names = ['__arg_%d' % i for i in range(0, arity)]
return {'blink_entry': name,
'argument_names': argument_names,
'resolver_string': native_entry}
################################################################################
# This is the monkey patched methods most delegate to v8_utilities but some are
# overridden in dart_utilities.
################################################################################
class dart_utilities_monkey():
def __init__(self):
self.base_class_name = 'dart_utilities'
DartUtilities = dart_utilities_monkey()
DartUtilities.bool_to_cpp = _bool_to_cpp
DartUtilities.call_with_arguments = _call_with_arguments
DartUtilities.capitalize = v8_utilities.capitalize
DartUtilities.cpp_name = v8_utilities.cpp_name
DartUtilities.extended_attribute_value_contains = v8_utilities.extended_attribute_value_contains
DartUtilities.generate_native_entry = _generate_native_entry
DartUtilities.has_extended_attribute = v8_utilities.has_extended_attribute
DartUtilities.has_extended_attribute_value = v8_utilities.has_extended_attribute_value
DartUtilities.scoped_name = _scoped_name
DartUtilities.strip_suffix = v8_utilities.strip_suffix
DartUtilities.uncapitalize = v8_utilities.uncapitalize
| bsd-3-clause |
bkrukowski/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/config/committers.py | 121 | 11526 | # Copyright (c) 2011, Apple Inc. All rights reserved.
# Copyright (c) 2009, 2011, 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# WebKit's Python module for committer and reviewer validation.
import fnmatch
import json
from webkitpy.common.editdistance import edit_distance
from webkitpy.common.memoized import memoized
from webkitpy.common.system.filesystem import FileSystem
# The list of contributors have been moved to contributors.json
class Contributor(object):
def __init__(self, name, email_or_emails, irc_nickname_or_nicknames=None):
assert(name)
assert(email_or_emails)
self.full_name = name
if isinstance(email_or_emails, str):
self.emails = [email_or_emails]
else:
self.emails = email_or_emails
self.emails = map(lambda email: email.lower(), self.emails) # Emails are case-insensitive.
if isinstance(irc_nickname_or_nicknames, str):
self.irc_nicknames = [irc_nickname_or_nicknames]
else:
self.irc_nicknames = irc_nickname_or_nicknames
self.can_commit = False
self.can_review = False
def bugzilla_email(self):
# FIXME: We're assuming the first email is a valid bugzilla email,
# which might not be right.
return self.emails[0]
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return '"%s" <%s>' % (self.full_name, self.emails[0])
def contains_string(self, search_string):
string = search_string.lower()
if string in self.full_name.lower():
return True
if self.irc_nicknames:
for nickname in self.irc_nicknames:
if string in nickname.lower():
return True
for email in self.emails:
if string in email:
return True
return False
def matches_glob(self, glob_string):
if fnmatch.fnmatch(self.full_name, glob_string):
return True
if self.irc_nicknames:
for nickname in self.irc_nicknames:
if fnmatch.fnmatch(nickname, glob_string):
return True
for email in self.emails:
if fnmatch.fnmatch(email, glob_string):
return True
return False
class Committer(Contributor):
def __init__(self, name, email_or_emails, irc_nickname=None):
Contributor.__init__(self, name, email_or_emails, irc_nickname)
self.can_commit = True
class Reviewer(Committer):
def __init__(self, name, email_or_emails, irc_nickname=None):
Committer.__init__(self, name, email_or_emails, irc_nickname)
self.can_review = True
class CommitterList(object):
# Committers and reviewers are passed in to allow easy testing
def __init__(self,
committers=[],
reviewers=[],
contributors=[]):
# FIXME: These arguments only exist for testing. Clean it up.
if not (committers or reviewers or contributors):
loaded_data = self.load_json()
contributors = loaded_data['Contributors']
committers = loaded_data['Committers']
reviewers = loaded_data['Reviewers']
self._contributors = contributors + committers + reviewers
self._committers = committers + reviewers
self._reviewers = reviewers
self._contributors_by_name = {}
self._accounts_by_email = {}
self._accounts_by_login = {}
@staticmethod
@memoized
def load_json():
filesystem = FileSystem()
json_path = filesystem.join(filesystem.dirname(filesystem.path_to_module('webkitpy.common.config')), 'contributors.json')
contributors = json.loads(filesystem.read_text_file(json_path))
return {
'Contributors': [Contributor(name, data.get('emails'), data.get('nicks')) for name, data in contributors['Contributors'].iteritems()],
'Committers': [Committer(name, data.get('emails'), data.get('nicks')) for name, data in contributors['Committers'].iteritems()],
'Reviewers': [Reviewer(name, data.get('emails'), data.get('nicks')) for name, data in contributors['Reviewers'].iteritems()],
}
def contributors(self):
return self._contributors
def committers(self):
return self._committers
def reviewers(self):
return self._reviewers
def _name_to_contributor_map(self):
if not len(self._contributors_by_name):
for contributor in self._contributors:
assert(contributor.full_name)
assert(contributor.full_name.lower() not in self._contributors_by_name) # We should never have duplicate names.
self._contributors_by_name[contributor.full_name.lower()] = contributor
return self._contributors_by_name
def _email_to_account_map(self):
if not len(self._accounts_by_email):
for account in self._contributors:
for email in account.emails:
assert(email not in self._accounts_by_email) # We should never have duplicate emails.
self._accounts_by_email[email] = account
return self._accounts_by_email
def _login_to_account_map(self):
if not len(self._accounts_by_login):
for account in self._contributors:
if account.emails:
login = account.bugzilla_email()
assert(login not in self._accounts_by_login) # We should never have duplicate emails.
self._accounts_by_login[login] = account
return self._accounts_by_login
def _committer_only(self, record):
if record and not record.can_commit:
return None
return record
def _reviewer_only(self, record):
if record and not record.can_review:
return None
return record
def committer_by_name(self, name):
return self._committer_only(self.contributor_by_name(name))
def contributor_by_irc_nickname(self, irc_nickname):
for contributor in self.contributors():
# FIXME: This should do case-insensitive comparison or assert that all IRC nicknames are in lowercase
if contributor.irc_nicknames and irc_nickname in contributor.irc_nicknames:
return contributor
return None
def contributors_by_search_string(self, string):
glob_matches = filter(lambda contributor: contributor.matches_glob(string), self.contributors())
return glob_matches or filter(lambda contributor: contributor.contains_string(string), self.contributors())
def contributors_by_email_username(self, string):
string = string + '@'
result = []
for contributor in self.contributors():
for email in contributor.emails:
if email.startswith(string):
result.append(contributor)
break
return result
def _contributor_name_shorthands(self, contributor):
if ' ' not in contributor.full_name:
return []
split_fullname = contributor.full_name.split()
first_name = split_fullname[0]
last_name = split_fullname[-1]
return first_name, last_name, first_name + last_name[0], first_name + ' ' + last_name[0]
def _tokenize_contributor_name(self, contributor):
full_name_in_lowercase = contributor.full_name.lower()
tokens = [full_name_in_lowercase] + full_name_in_lowercase.split()
if contributor.irc_nicknames:
return tokens + [nickname.lower() for nickname in contributor.irc_nicknames if len(nickname) > 5]
return tokens
def contributors_by_fuzzy_match(self, string):
string_in_lowercase = string.lower()
# 1. Exact match for fullname, email and irc_nicknames
account = self.contributor_by_name(string_in_lowercase) or self.contributor_by_email(string_in_lowercase) or self.contributor_by_irc_nickname(string_in_lowercase)
if account:
return [account], 0
# 2. Exact match for email username (before @)
accounts = self.contributors_by_email_username(string_in_lowercase)
if accounts and len(accounts) == 1:
return accounts, 0
# 3. Exact match for first name, last name, and first name + initial combinations such as "Dan B" and "Tim H"
accounts = [contributor for contributor in self.contributors() if string in self._contributor_name_shorthands(contributor)]
if accounts and len(accounts) == 1:
return accounts, 0
# 4. Finally, fuzzy-match using edit-distance
string = string_in_lowercase
contributorWithMinDistance = []
minDistance = len(string) / 2 - 1
for contributor in self.contributors():
tokens = self._tokenize_contributor_name(contributor)
editdistances = [edit_distance(token, string) for token in tokens if abs(len(token) - len(string)) <= minDistance]
if not editdistances:
continue
distance = min(editdistances)
if distance == minDistance:
contributorWithMinDistance.append(contributor)
elif distance < minDistance:
contributorWithMinDistance = [contributor]
minDistance = distance
if not len(contributorWithMinDistance):
return [], len(string)
return contributorWithMinDistance, minDistance
def contributor_by_email(self, email):
return self._email_to_account_map().get(email.lower()) if email else None
def contributor_by_name(self, name):
return self._name_to_contributor_map().get(name.lower()) if name else None
def committer_by_email(self, email):
return self._committer_only(self.contributor_by_email(email))
def reviewer_by_email(self, email):
return self._reviewer_only(self.contributor_by_email(email))
| bsd-3-clause |
aronsky/home-assistant | homeassistant/components/switch/netio.py | 7 | 5566 | """
The Netio switch component.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.netio/
"""
import logging
from collections import namedtuple
from datetime import timedelta
import voluptuous as vol
from homeassistant.core import callback
from homeassistant import util
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import (
CONF_HOST, CONF_PORT, CONF_USERNAME, CONF_PASSWORD,
EVENT_HOMEASSISTANT_STOP, STATE_ON)
from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pynetio==0.1.9.1']
_LOGGER = logging.getLogger(__name__)
ATTR_START_DATE = 'start_date'
ATTR_TOTAL_CONSUMPTION_KWH = 'total_energy_kwh'
CONF_OUTLETS = 'outlets'
DEFAULT_PORT = 1234
DEFAULT_USERNAME = 'admin'
DEPENDENCIES = ['http']
Device = namedtuple('device', ['netio', 'entities'])
DEVICES = {}
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
REQ_CONF = [CONF_HOST, CONF_OUTLETS]
URL_API_NETIO_EP = '/api/netio/{host}'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Required(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_OUTLETS): {cv.string: cv.string},
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Netio platform."""
from pynetio import Netio
host = config.get(CONF_HOST)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
port = config.get(CONF_PORT)
if not DEVICES:
hass.http.register_view(NetioApiView)
dev = Netio(host, port, username, password)
DEVICES[host] = Device(dev, [])
# Throttle the update for all Netio switches of one Netio
dev.update = util.Throttle(MIN_TIME_BETWEEN_SCANS)(dev.update)
for key in config[CONF_OUTLETS]:
switch = NetioSwitch(
DEVICES[host].netio, key, config[CONF_OUTLETS][key])
DEVICES[host].entities.append(switch)
add_entities(DEVICES[host].entities)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, dispose)
return True
def dispose(event):
"""Close connections to Netio Devices."""
for _, value in DEVICES.items():
value.netio.stop()
class NetioApiView(HomeAssistantView):
"""WSGI handler class."""
url = URL_API_NETIO_EP
name = 'api:netio'
@callback
def get(self, request, host):
"""Request handler."""
hass = request.app['hass']
data = request.query
states, consumptions, cumulated_consumptions, start_dates = \
[], [], [], []
for i in range(1, 5):
out = 'output%d' % i
states.append(data.get('%s_state' % out) == STATE_ON)
consumptions.append(float(data.get('%s_consumption' % out, 0)))
cumulated_consumptions.append(
float(data.get('%s_cumulatedConsumption' % out, 0)) / 1000)
start_dates.append(data.get('%s_consumptionStart' % out, ""))
_LOGGER.debug('%s: %s, %s, %s since %s', host, states,
consumptions, cumulated_consumptions, start_dates)
ndev = DEVICES[host].netio
ndev.consumptions = consumptions
ndev.cumulated_consumptions = cumulated_consumptions
ndev.states = states
ndev.start_dates = start_dates
for dev in DEVICES[host].entities:
hass.async_create_task(dev.async_update_ha_state())
return self.json(True)
class NetioSwitch(SwitchDevice):
"""Provide a Netio linked switch."""
def __init__(self, netio, outlet, name):
"""Initialize the Netio switch."""
self._name = name
self.outlet = outlet
self.netio = netio
@property
def name(self):
"""Return the device's name."""
return self._name
@property
def available(self):
"""Return true if entity is available."""
return not hasattr(self, 'telnet')
def turn_on(self, **kwargs):
"""Turn switch on."""
self._set(True)
def turn_off(self, **kwargs):
"""Turn switch off."""
self._set(False)
def _set(self, value):
val = list('uuuu')
val[int(self.outlet) - 1] = '1' if value else '0'
self.netio.get('port list %s' % ''.join(val))
self.netio.states[int(self.outlet) - 1] = value
self.schedule_update_ha_state()
@property
def is_on(self):
"""Return the switch's status."""
return self.netio.states[int(self.outlet) - 1]
def update(self):
"""Update the state."""
self.netio.update()
@property
def state_attributes(self):
"""Return optional state attributes."""
return {
ATTR_TOTAL_CONSUMPTION_KWH: self.cumulated_consumption_kwh,
ATTR_START_DATE: self.start_date.split('|')[0]
}
@property
def current_power_w(self):
"""Return actual power."""
return self.netio.consumptions[int(self.outlet) - 1]
@property
def cumulated_consumption_kwh(self):
"""Return the total enerygy consumption since start_date."""
return self.netio.cumulated_consumptions[int(self.outlet) - 1]
@property
def start_date(self):
"""Point in time when the energy accumulation started."""
return self.netio.start_dates[int(self.outlet) - 1]
| apache-2.0 |
abdellatifkarroum/odoo | addons/stock_account/wizard/stock_invoice_onshipping.py | 120 | 6111 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class stock_invoice_onshipping(osv.osv_memory):
def _get_journal(self, cr, uid, context=None):
journal_obj = self.pool.get('account.journal')
journal_type = self._get_journal_type(cr, uid, context=context)
journals = journal_obj.search(cr, uid, [('type', '=', journal_type)])
return journals and journals[0] or False
def _get_journal_type(self, cr, uid, context=None):
if context is None:
context = {}
res_ids = context and context.get('active_ids', [])
pick_obj = self.pool.get('stock.picking')
pickings = pick_obj.browse(cr, uid, res_ids, context=context)
vals = []
pick = pickings and pickings[0]
if not pick or not pick.move_lines:
return 'sale'
src_usage = pick.move_lines[0].location_id.usage
dest_usage = pick.move_lines[0].location_dest_id.usage
type = pick.picking_type_id.code
if type == 'outgoing' and dest_usage == 'supplier':
journal_type = 'purchase_refund'
elif type == 'outgoing' and dest_usage == 'customer':
journal_type = 'sale'
elif type == 'incoming' and src_usage == 'supplier':
journal_type = 'purchase'
elif type == 'incoming' and src_usage == 'customer':
journal_type = 'sale_refund'
else:
journal_type = 'sale'
return journal_type
_name = "stock.invoice.onshipping"
_description = "Stock Invoice Onshipping"
_columns = {
'journal_id': fields.many2one('account.journal', 'Destination Journal', required=True),
'journal_type': fields.selection([('purchase_refund', 'Refund Purchase'), ('purchase', 'Create Supplier Invoice'),
('sale_refund', 'Refund Sale'), ('sale', 'Create Customer Invoice')], 'Journal Type', readonly=True),
'group': fields.boolean("Group by partner"),
'invoice_date': fields.date('Invoice Date'),
}
_defaults = {
'journal_type': _get_journal_type,
'journal_id' : _get_journal,
}
def view_init(self, cr, uid, fields_list, context=None):
if context is None:
context = {}
res = super(stock_invoice_onshipping, self).view_init(cr, uid, fields_list, context=context)
pick_obj = self.pool.get('stock.picking')
count = 0
active_ids = context.get('active_ids',[])
for pick in pick_obj.browse(cr, uid, active_ids, context=context):
if pick.invoice_state != '2binvoiced':
count += 1
if len(active_ids) == count:
raise osv.except_osv(_('Warning!'), _('None of these picking lists require invoicing.'))
return res
def open_invoice(self, cr, uid, ids, context=None):
if context is None:
context = {}
invoice_ids = self.create_invoice(cr, uid, ids, context=context)
if not invoice_ids:
raise osv.except_osv(_('Error!'), _('No invoice created!'))
data = self.browse(cr, uid, ids[0], context=context)
action_model = False
action = {}
journal2type = {'sale':'out_invoice', 'purchase':'in_invoice' , 'sale_refund':'out_refund', 'purchase_refund':'in_refund'}
inv_type = journal2type.get(data.journal_type) or 'out_invoice'
data_pool = self.pool.get('ir.model.data')
if inv_type == "out_invoice":
action_id = data_pool.xmlid_to_res_id(cr, uid, 'account.action_invoice_tree1')
elif inv_type == "in_invoice":
action_id = data_pool.xmlid_to_res_id(cr, uid, 'account.action_invoice_tree2')
elif inv_type == "out_refund":
action_id = data_pool.xmlid_to_res_id(cr, uid, 'account.action_invoice_tree3')
elif inv_type == "in_refund":
action_id = data_pool.xmlid_to_res_id(cr, uid, 'account.action_invoice_tree4')
if action_id:
action_pool = self.pool['ir.actions.act_window']
action = action_pool.read(cr, uid, action_id, context=context)
action['domain'] = "[('id','in', ["+','.join(map(str,invoice_ids))+"])]"
return action
return True
def create_invoice(self, cr, uid, ids, context=None):
context = dict(context or {})
picking_pool = self.pool.get('stock.picking')
data = self.browse(cr, uid, ids[0], context=context)
journal2type = {'sale':'out_invoice', 'purchase':'in_invoice', 'sale_refund':'out_refund', 'purchase_refund':'in_refund'}
context['date_inv'] = data.invoice_date
acc_journal = self.pool.get("account.journal")
inv_type = journal2type.get(data.journal_type) or 'out_invoice'
context['inv_type'] = inv_type
active_ids = context.get('active_ids', [])
res = picking_pool.action_invoice_create(cr, uid, active_ids,
journal_id = data.journal_id.id,
group = data.group,
type = inv_type,
context=context)
return res
| agpl-3.0 |
nanonyme/nanobot | app.py | 1 | 12063 | # -*- coding: utf-8 -*-
from twisted.spread import pb
from twisted.internet import endpoints, task, defer
from os import environ
import functools
import treq
import lxml.html
import re
import Levenshtein
from urllib import parse as urlparse
import json
import sqlite3
import codecs
import simple_eval
import ipaddress
from twisted.logger import textFileLogObserver, globalLogPublisher, Logger
log = Logger()
class AppException(Exception):
pass
BLOCKLIST = [
ipaddress.IPv4Network('127.0.0.0/8'),
ipaddress.IPv4Network('192.168.0.0/16'),
ipaddress.IPv4Network('10.0.0.0/8'),
ipaddress.IPv4Network('172.16.0.0/12'),
ipaddress.IPv6Network('::1'),
ipaddress.IPv6Network('fe80::/10'),
]
config = {}
def acceptable_netloc(hostname):
try:
address = ipaddress.ip_address(hostname)
except ValueError:
if hostname == "localhost":
return False
else:
return True
else:
for network in BLOCKLIST:
if address in network:
return False
else:
return True
class UrlHandler(object):
TIMEOUT = 30
def __init__(self, max_body, parser_class,
accepted_mimes=("text/html",),
headers={"Accept-Language": "en-US",
"User-Agent": ("nanobot title fetching, contacts to"
"http://github.com/nanonyme/nanobot")
}):
self.max_body = max_body
self.bytes = 0
self.parser_class = parser_class
self.parser = None
self.accepted_mimes = accepted_mimes
self.headers = headers
def feed(self, data):
if self.bytes < self.max_body:
if len(data) > self.max_body - self.bytes:
data = data[:self.max_body - self.bytes]
data_len = len(data)
self.bytes += data_len
self.parser.feed(data)
else:
self.connection.cancel()
async def handle_response(self, response):
if response.code != 200:
raise AppException(f"Response code {response.code}")
try:
headers = response.headers.getRawHeaders("Content-Type")
except KeyError:
raise AppException("No Content-Type")
if not headers:
raise AppException("Empty Content-Type")
else:
header = headers[0]
log.info(f"Header line {header}")
mime, _, encoding = header.partition(";")
if encoding:
_, _, encoding = encoding.strip().partition("=")
try:
codecs.lookup(encoding)
except LookupError:
encoding = None
if mime not in self.accepted_mimes:
raise AppException(f"Mime {mime} not supported")
if encoding:
log.info(f"Using encoding {encoding} to handle response")
self.parser = self.parser_class()
await response.collect(self.feed)
return self.parser.close()
async def get_url(self, url):
return await treq.get(url, timeout=self.TIMEOUT, headers=self.headers)
async def get_title(self, url):
response = await self.get_url(url)
root = await self.handle_response(response)
title = root.xpath("//title")[0].text
if not title:
return ""
else:
return " ".join(title.split())
def difference_check(a, s):
if len(a) < 14 or len(s) < 14:
if len(a) != len(s):
return True
else:
return a != s
else:
return Levenshtein.distance(a, s) >= 7
def dynsearch(l, s):
a, b = l[0], l[1:]
if not b:
return difference_check(a, s)
else:
if not dynsearch(b, s):
return False
else:
return difference_check("".join(b), s)
def prepare_url(url):
path = urlparse.unquote(urlparse.urlparse(url).path).replace("-", "")
path = path.replace(" ", "").replace("+", "").replace("_", "").lower()
path = path.rstrip("0123456789")
return path.split("/")
def prepare_title(title):
title = title.replace("+", "").replace(" ", "").replace("_", "").lower()
return re.split("[-–]", title)[0]
class MessageHandler(object):
_URL_HANDLER_CLASS = UrlHandler
def __init__(self, reactor, hits, misses, callback, max_len):
self._reactor = reactor
self._hits = hits
self._misses = misses
self._max_len = max_len
self._callback = callback
async def success(self, title, url):
log.info(f"Got title {title}")
if dynsearch(prepare_url(url), prepare_title(title)):
log.info("Will try to send title as a message")
await self._callback("title: %s" % title)
await task.deferLater(self._reactor, 2, defer.succeed,
None)
def fail(self, url):
self._misses.update(url, "miss")
log.failure(f"Adding {url} to temporary block list")
async def find_links(self, message):
for m in re.finditer("(https?://[^ ]+)", message):
url = m.group(0)
if not acceptable_netloc(urlparse.urlparse(url).netloc):
continue
if self._misses.fetch(url):
log.info((f"Skipped title check for URL {url} because of "
"previous failures"))
continue
title = self._hits.fetch(url)
if title is None:
log.info(f"Cache miss for URL {url}")
handler = self._URL_HANDLER_CLASS(
max_body=2 * 1024 ** 2, parser_class=lxml.html.HTMLParser)
try:
title = await handler.get_title(url)
except Exception:
self.fail(url)
else:
if len(title) > self._max_len:
title = title[:self._max_len]
if title:
self._hits.update(url, title)
await self.success(title, url)
else:
log.info(f"Cache hit for URL {url}")
await self.success(title, url)
class UrlCache(object):
def __init__(self, reactor, expiration=60):
self._reactor = reactor
self._expiration = expiration
self._db = {}
self._reaper = task.LoopingCall(self._reap)
self._reaper.clock = reactor
def fetch(self, key):
try:
value = self._db[key]["value"]
except KeyError:
value = None
return value
def update(self, key, value):
self._db[key] = {"value": value,
"timestamp": self._reactor.seconds()}
def _valid(self):
for key, value in self._db.items():
if self._reactor.seconds() - value["timestamp"] < self._expiration:
yield key, value
def enable(self):
if not self._reaper.running:
self._reaper.start(self._expiration, False)
def disable(self):
if self._reaper.running:
self._reaper.stop()
def _reap(self):
self._db = dict(self._valid())
class API(pb.Referenceable):
STALENESS_LIMIT = 24*60*60
def __init__(self, reactor):
self.reactor = reactor
self.good_urls = UrlCache(self.reactor, expiration=3600)
self.good_urls.enable()
self.bad_urls = UrlCache(self.reactor, expiration=60)
self.bad_urls.enable()
def _staleness_check(self, timestamp):
if self.reactor.seconds() - timestamp > self.STALENESS_LIMIT:
log.info("Message stale, ignoring")
return True
else:
return False
def remote_handlePublicMessage(self, protocol, user, channel, message,
max_line_length, timestamp):
if self._staleness_check(timestamp):
return
try:
callback = functools.partial(
protocol.callRemote, "msg", channel)
roles = resolveRoles(user)
if "ignored" in roles:
return
if message.startswith("!"):
return handleCommand(protocol, user, roles, channel, message[1:],
max_line_length, callback)
else:
handler = MessageHandler(self.reactor, self.good_urls,
self.bad_urls, callback,
max_line_length)
return defer.ensureDeferred(handler.find_links(message))
except Exception:
log.failure("FIXME, runaway exception")
def remote_handlePrivateMessage(self, protocol, user, channel, message,
max_line_length, timestamp):
if self._staleness_check(timestamp):
return
channel, _, _ = user.partition("!")
return self.remote_handlePublicMessage(protocol, user, channel,
message,
max_line_length,
timestamp)
user_query = ("select roles.name from roles where roles.oid in "
"(select userroles.oid from (users natural join usermask)"
"natural join userroles where usermask.mask=?);")
def resolveRoles(user):
with sqlite3.connect(config["core"]["db"]) as conn:
cur = conn.cursor()
res = cur.execute(user_query, (user,))
return [role[0] for role in res.fetchmany()]
def handleCommand(protocol, user, roles, channel, message, max_line_length,
callback):
command, _, suffix = message.partition(" ")
if command == "reincarnate":
if "superadmin" in roles:
log.info("Restarting app")
reactor.stop()
else:
log.info("User {user} tried to do code reload", user=user)
elif command == "eval":
truth, expr = suffix.split(":")
truth = [s.strip() for s in truth.split(",")]
try:
ret = simple_eval.eval_bool(expr, truth)
except simple_eval.EvalError as e:
callback(str(e))
else:
callback("Result: %s" % ret)
elif command == "join":
channel, _, password = suffix.partition(" ")
if not password:
password = None
if "superadmin" in roles:
if password:
log.info(f"Joining {channel} ({password})")
else:
log.info(f"Joining {channel}")
return protocol.callRemote("join", channel, password)
elif command == "leave":
channel, _, reason = suffix.partition(" ")
if not reason:
reason = None
if "superadmin" in roles:
if reason:
log.info("Leaving {channel} ({reason})",
channel=channel, reason=reason)
else:
log.info(f"Leaving {channel}")
return protocol.callRemote("leave", channel, reason)
else:
log.info(f"Unrecognized command {command}")
def log_and_exit(ret, reactor):
log.failure("Critical failure, terminating application")
reactor.stop()
def register(root, reactor):
log.info("Registering app for bot")
return root.callRemote("register", API(reactor))
if __name__ == "__main__":
from twisted.internet import reactor
with open(environ["CONFIG"]) as f:
config.update(json.load(f))
f = open(config["core"]["log_file"], "a")
globalLogPublisher.addObserver(textFileLogObserver(f))
endpoint = endpoints.StandardIOEndpoint(reactor)
factory = pb.PBClientFactory()
d = endpoint.listen(factory)
@d.addCallback
def initialize(_):
d = factory.getRootObject()
d.addCallback(register, reactor)
d.addErrback(log_and_exit, reactor)
return
reactor.run()
| isc |
firm1/zmarkdown-editor | src/pygments/formatters/html.py | 49 | 31215 | # -*- coding: utf-8 -*-
"""
pygments.formatters.html
~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for HTML output.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import os
import sys
import os.path
from pygments.formatter import Formatter
from pygments.token import Token, Text, STANDARD_TYPES
from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
StringIO, string_types, iteritems
try:
import ctags
except ImportError:
ctags = None
__all__ = ['HtmlFormatter']
_escape_html_table = {
ord('&'): u'&',
ord('<'): u'<',
ord('>'): u'>',
ord('"'): u'"',
ord("'"): u''',
}
def escape_html(text, table=_escape_html_table):
"""Escape &, <, > as well as single and double quotes for HTML."""
return text.translate(table)
def get_random_id():
"""Return a random id for javascript fields."""
from random import random
from time import time
try:
from hashlib import sha1 as sha
except ImportError:
import sha
sha = sha.new
return sha('%s|%s' % (random(), time())).hexdigest()
def _get_ttype_class(ttype):
fname = STANDARD_TYPES.get(ttype)
if fname:
return fname
aname = ''
while fname is None:
aname = '-' + ttype[-1] + aname
ttype = ttype.parent
fname = STANDARD_TYPES.get(ttype)
return fname + aname
CSSFILE_TEMPLATE = '''\
td.linenos { background-color: #f0f0f0; padding-right: 10px; }
span.lineno { background-color: #f0f0f0; padding: 0 5px 0 5px; }
pre { line-height: 125%%; }
%(styledefs)s
'''
DOC_HEADER = '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<title>%(title)s</title>
<meta http-equiv="content-type" content="text/html; charset=%(encoding)s">
<style type="text/css">
''' + CSSFILE_TEMPLATE + '''
</style>
</head>
<body>
<h2>%(title)s</h2>
'''
DOC_HEADER_EXTERNALCSS = '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<title>%(title)s</title>
<meta http-equiv="content-type" content="text/html; charset=%(encoding)s">
<link rel="stylesheet" href="%(cssfile)s" type="text/css">
</head>
<body>
<h2>%(title)s</h2>
'''
DOC_FOOTER = '''\
</body>
</html>
'''
class HtmlFormatter(Formatter):
r"""
Format tokens as HTML 4 ``<span>`` tags within a ``<pre>`` tag, wrapped
in a ``<div>`` tag. The ``<div>``'s CSS class can be set by the `cssclass`
option.
If the `linenos` option is set to ``"table"``, the ``<pre>`` is
additionally wrapped inside a ``<table>`` which has one row and two
cells: one containing the line numbers and one containing the code.
Example:
.. sourcecode:: html
<div class="highlight" >
<table><tr>
<td class="linenos" title="click to toggle"
onclick="with (this.firstChild.style)
{ display = (display == '') ? 'none' : '' }">
<pre>1
2</pre>
</td>
<td class="code">
<pre><span class="Ke">def </span><span class="NaFu">foo</span>(bar):
<span class="Ke">pass</span>
</pre>
</td>
</tr></table></div>
(whitespace added to improve clarity).
Wrapping can be disabled using the `nowrap` option.
A list of lines can be specified using the `hl_lines` option to make these
lines highlighted (as of Pygments 0.11).
With the `full` option, a complete HTML 4 document is output, including
the style definitions inside a ``<style>`` tag, or in a separate file if
the `cssfile` option is given.
When `tagsfile` is set to the path of a ctags index file, it is used to
generate hyperlinks from names to their definition. You must enable
`anchorlines` and run ctags with the `-n` option for this to work. The
`python-ctags` module from PyPI must be installed to use this feature;
otherwise a `RuntimeError` will be raised.
The `get_style_defs(arg='')` method of a `HtmlFormatter` returns a string
containing CSS rules for the CSS classes used by the formatter. The
argument `arg` can be used to specify additional CSS selectors that
are prepended to the classes. A call `fmter.get_style_defs('td .code')`
would result in the following CSS classes:
.. sourcecode:: css
td .code .kw { font-weight: bold; color: #00FF00 }
td .code .cm { color: #999999 }
...
If you have Pygments 0.6 or higher, you can also pass a list or tuple to the
`get_style_defs()` method to request multiple prefixes for the tokens:
.. sourcecode:: python
formatter.get_style_defs(['div.syntax pre', 'pre.syntax'])
The output would then look like this:
.. sourcecode:: css
div.syntax pre .kw,
pre.syntax .kw { font-weight: bold; color: #00FF00 }
div.syntax pre .cm,
pre.syntax .cm { color: #999999 }
...
Additional options accepted:
`nowrap`
If set to ``True``, don't wrap the tokens at all, not even inside a ``<pre>``
tag. This disables most other options (default: ``False``).
`full`
Tells the formatter to output a "full" document, i.e. a complete
self-contained document (default: ``False``).
`title`
If `full` is true, the title that should be used to caption the
document (default: ``''``).
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``). This option has no effect if the `cssfile`
and `noclobber_cssfile` option are given and the file specified in
`cssfile` exists.
`noclasses`
If set to true, token ``<span>`` tags will not use CSS classes, but
inline styles. This is not recommended for larger pieces of code since
it increases output size by quite a bit (default: ``False``).
`classprefix`
Since the token types use relatively short class names, they may clash
with some of your own class names. In this case you can use the
`classprefix` option to give a string to prepend to all Pygments-generated
CSS class names for token types.
Note that this option also affects the output of `get_style_defs()`.
`cssclass`
CSS class for the wrapping ``<div>`` tag (default: ``'highlight'``).
If you set this option, the default selector for `get_style_defs()`
will be this class.
.. versionadded:: 0.9
If you select the ``'table'`` line numbers, the wrapping table will
have a CSS class of this string plus ``'table'``, the default is
accordingly ``'highlighttable'``.
`cssstyles`
Inline CSS styles for the wrapping ``<div>`` tag (default: ``''``).
`prestyles`
Inline CSS styles for the ``<pre>`` tag (default: ``''``).
.. versionadded:: 0.11
`cssfile`
If the `full` option is true and this option is given, it must be the
name of an external file. If the filename does not include an absolute
path, the file's path will be assumed to be relative to the main output
file's path, if the latter can be found. The stylesheet is then written
to this file instead of the HTML file.
.. versionadded:: 0.6
`noclobber_cssfile`
If `cssfile` is given and the specified file exists, the css file will
not be overwritten. This allows the use of the `full` option in
combination with a user specified css file. Default is ``False``.
.. versionadded:: 1.1
`linenos`
If set to ``'table'``, output line numbers as a table with two cells,
one containing the line numbers, the other the whole code. This is
copy-and-paste-friendly, but may cause alignment problems with some
browsers or fonts. If set to ``'inline'``, the line numbers will be
integrated in the ``<pre>`` tag that contains the code (that setting
is *new in Pygments 0.8*).
For compatibility with Pygments 0.7 and earlier, every true value
except ``'inline'`` means the same as ``'table'`` (in particular, that
means also ``True``).
The default value is ``False``, which means no line numbers at all.
**Note:** with the default ("table") line number mechanism, the line
numbers and code can have different line heights in Internet Explorer
unless you give the enclosing ``<pre>`` tags an explicit ``line-height``
CSS property (you get the default line spacing with ``line-height:
125%``).
`hl_lines`
Specify a list of lines to be highlighted.
.. versionadded:: 0.11
`linenostart`
The line number for the first line (default: ``1``).
`linenostep`
If set to a number n > 1, only every nth line number is printed.
`linenospecial`
If set to a number n > 0, every nth line number is given the CSS
class ``"special"`` (default: ``0``).
`nobackground`
If set to ``True``, the formatter won't output the background color
for the wrapping element (this automatically defaults to ``False``
when there is no wrapping element [eg: no argument for the
`get_syntax_defs` method given]) (default: ``False``).
.. versionadded:: 0.6
`lineseparator`
This string is output between lines of code. It defaults to ``"\n"``,
which is enough to break a line inside ``<pre>`` tags, but you can
e.g. set it to ``"<br>"`` to get HTML line breaks.
.. versionadded:: 0.7
`lineanchors`
If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
output line in an anchor tag with a ``name`` of ``foo-linenumber``.
This allows easy linking to certain lines.
.. versionadded:: 0.9
`linespans`
If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
output line in a span tag with an ``id`` of ``foo-linenumber``.
This allows easy access to lines via javascript.
.. versionadded:: 1.6
`anchorlinenos`
If set to `True`, will wrap line numbers in <a> tags. Used in
combination with `linenos` and `lineanchors`.
`tagsfile`
If set to the path of a ctags file, wrap names in anchor tags that
link to their definitions. `lineanchors` should be used, and the
tags file should specify line numbers (see the `-n` option to ctags).
.. versionadded:: 1.6
`tagurlformat`
A string formatting pattern used to generate links to ctags definitions.
Available variables are `%(path)s`, `%(fname)s` and `%(fext)s`.
Defaults to an empty string, resulting in just `#prefix-number` links.
.. versionadded:: 1.6
**Subclassing the HTML formatter**
.. versionadded:: 0.7
The HTML formatter is now built in a way that allows easy subclassing, thus
customizing the output HTML code. The `format()` method calls
`self._format_lines()` which returns a generator that yields tuples of ``(1,
line)``, where the ``1`` indicates that the ``line`` is a line of the
formatted source code.
If the `nowrap` option is set, the generator is the iterated over and the
resulting HTML is output.
Otherwise, `format()` calls `self.wrap()`, which wraps the generator with
other generators. These may add some HTML code to the one generated by
`_format_lines()`, either by modifying the lines generated by the latter,
then yielding them again with ``(1, line)``, and/or by yielding other HTML
code before or after the lines, with ``(0, html)``. The distinction between
source lines and other code makes it possible to wrap the generator multiple
times.
The default `wrap()` implementation adds a ``<div>`` and a ``<pre>`` tag.
A custom `HtmlFormatter` subclass could look like this:
.. sourcecode:: python
class CodeHtmlFormatter(HtmlFormatter):
def wrap(self, source, outfile):
return self._wrap_code(source)
def _wrap_code(self, source):
yield 0, '<code>'
for i, t in source:
if i == 1:
# it's a line of formatted code
t += '<br>'
yield i, t
yield 0, '</code>'
This results in wrapping the formatted lines with a ``<code>`` tag, where the
source lines are broken using ``<br>`` tags.
After calling `wrap()`, the `format()` method also adds the "line numbers"
and/or "full document" wrappers if the respective options are set. Then, all
HTML yielded by the wrapped generator is output.
"""
name = 'HTML'
aliases = ['html']
filenames = ['*.html', '*.htm']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.title = self._decodeifneeded(self.title)
self.nowrap = get_bool_opt(options, 'nowrap', False)
self.noclasses = get_bool_opt(options, 'noclasses', False)
self.classprefix = options.get('classprefix', '')
self.cssclass = self._decodeifneeded(options.get('cssclass', 'highlight'))
self.cssstyles = self._decodeifneeded(options.get('cssstyles', ''))
self.prestyles = self._decodeifneeded(options.get('prestyles', ''))
self.cssfile = self._decodeifneeded(options.get('cssfile', ''))
self.noclobber_cssfile = get_bool_opt(options, 'noclobber_cssfile', False)
self.tagsfile = self._decodeifneeded(options.get('tagsfile', ''))
self.tagurlformat = self._decodeifneeded(options.get('tagurlformat', ''))
if self.tagsfile:
if not ctags:
raise RuntimeError('The "ctags" package must to be installed '
'to be able to use the "tagsfile" feature.')
self._ctags = ctags.CTags(self.tagsfile)
linenos = options.get('linenos', False)
if linenos == 'inline':
self.linenos = 2
elif linenos:
# compatibility with <= 0.7
self.linenos = 1
else:
self.linenos = 0
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
self.linenospecial = abs(get_int_opt(options, 'linenospecial', 0))
self.nobackground = get_bool_opt(options, 'nobackground', False)
self.lineseparator = options.get('lineseparator', '\n')
self.lineanchors = options.get('lineanchors', '')
self.linespans = options.get('linespans', '')
self.anchorlinenos = options.get('anchorlinenos', False)
self.hl_lines = set()
for lineno in get_list_opt(options, 'hl_lines', []):
try:
self.hl_lines.add(int(lineno))
except ValueError:
pass
self._create_stylesheet()
def _get_css_class(self, ttype):
"""Return the css class of this token type prefixed with
the classprefix option."""
ttypeclass = _get_ttype_class(ttype)
if ttypeclass:
return self.classprefix + ttypeclass
return ''
def _create_stylesheet(self):
t2c = self.ttype2class = {Token: ''}
c2s = self.class2style = {}
for ttype, ndef in self.style:
name = self._get_css_class(ttype)
style = ''
if ndef['color']:
style += 'color: #%s; ' % ndef['color']
if ndef['bold']:
style += 'font-weight: bold; '
if ndef['italic']:
style += 'font-style: italic; '
if ndef['underline']:
style += 'text-decoration: underline; '
if ndef['bgcolor']:
style += 'background-color: #%s; ' % ndef['bgcolor']
if ndef['border']:
style += 'border: 1px solid #%s; ' % ndef['border']
if style:
t2c[ttype] = name
# save len(ttype) to enable ordering the styles by
# hierarchy (necessary for CSS cascading rules!)
c2s[name] = (style[:-2], ttype, len(ttype))
def get_style_defs(self, arg=None):
"""
Return CSS style definitions for the classes produced by the current
highlighting style. ``arg`` can be a string or list of selectors to
insert before the token type classes.
"""
if arg is None:
arg = ('cssclass' in self.options and '.'+self.cssclass or '')
if isinstance(arg, string_types):
args = [arg]
else:
args = list(arg)
def prefix(cls):
if cls:
cls = '.' + cls
tmp = []
for arg in args:
tmp.append((arg and arg + ' ' or '') + cls)
return ', '.join(tmp)
styles = [(level, ttype, cls, style)
for cls, (style, ttype, level) in iteritems(self.class2style)
if cls and style]
styles.sort()
lines = ['%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:])
for (level, ttype, cls, style) in styles]
if arg and not self.nobackground and \
self.style.background_color is not None:
text_style = ''
if Text in self.ttype2class:
text_style = ' ' + self.class2style[self.ttype2class[Text]][0]
lines.insert(0, '%s { background: %s;%s }' %
(prefix(''), self.style.background_color, text_style))
if self.style.highlight_color is not None:
lines.insert(0, '%s.hll { background-color: %s }' %
(prefix(''), self.style.highlight_color))
return '\n'.join(lines)
def _decodeifneeded(self, value):
if isinstance(value, bytes):
if self.encoding:
return value.decode(self.encoding)
return value.decode()
return value
def _wrap_full(self, inner, outfile):
if self.cssfile:
if os.path.isabs(self.cssfile):
# it's an absolute filename
cssfilename = self.cssfile
else:
try:
filename = outfile.name
if not filename or filename[0] == '<':
# pseudo files, e.g. name == '<fdopen>'
raise AttributeError
cssfilename = os.path.join(os.path.dirname(filename),
self.cssfile)
except AttributeError:
print('Note: Cannot determine output file name, ' \
'using current directory as base for the CSS file name',
file=sys.stderr)
cssfilename = self.cssfile
# write CSS file only if noclobber_cssfile isn't given as an option.
try:
if not os.path.exists(cssfilename) or not self.noclobber_cssfile:
cf = open(cssfilename, "w")
cf.write(CSSFILE_TEMPLATE %
{'styledefs': self.get_style_defs('body')})
cf.close()
except IOError as err:
err.strerror = 'Error writing CSS file: ' + err.strerror
raise
yield 0, (DOC_HEADER_EXTERNALCSS %
dict(title = self.title,
cssfile = self.cssfile,
encoding = self.encoding))
else:
yield 0, (DOC_HEADER %
dict(title = self.title,
styledefs = self.get_style_defs('body'),
encoding = self.encoding))
for t, line in inner:
yield t, line
yield 0, DOC_FOOTER
def _wrap_tablelinenos(self, inner):
dummyoutfile = StringIO()
lncount = 0
for t, line in inner:
if t:
lncount += 1
dummyoutfile.write(line)
fl = self.linenostart
mw = len(str(lncount + fl - 1))
sp = self.linenospecial
st = self.linenostep
la = self.lineanchors
aln = self.anchorlinenos
nocls = self.noclasses
if sp:
lines = []
for i in range(fl, fl+lncount):
if i % st == 0:
if i % sp == 0:
if aln:
lines.append('<a href="#%s-%d" class="special">%*d</a>' %
(la, i, mw, i))
else:
lines.append('<span class="special">%*d</span>' % (mw, i))
else:
if aln:
lines.append('<a href="#%s-%d">%*d</a>' % (la, i, mw, i))
else:
lines.append('%*d' % (mw, i))
else:
lines.append('')
ls = '\n'.join(lines)
else:
lines = []
for i in range(fl, fl+lncount):
if i % st == 0:
if aln:
lines.append('<a href="#%s-%d">%*d</a>' % (la, i, mw, i))
else:
lines.append('%*d' % (mw, i))
else:
lines.append('')
ls = '\n'.join(lines)
# in case you wonder about the seemingly redundant <div> here: since the
# content in the other cell also is wrapped in a div, some browsers in
# some configurations seem to mess up the formatting...
if nocls:
yield 0, ('<table class="%stable">' % self.cssclass +
'<tr><td><div class="linenodiv" '
'style="background-color: #f0f0f0; padding-right: 10px">'
'<pre style="line-height: 125%">' +
ls + '</pre></div></td><td class="code">')
else:
yield 0, ('<table class="%stable">' % self.cssclass +
'<tr><td class="linenos"><div class="linenodiv"><pre>' +
ls + '</pre></div></td><td class="code">')
yield 0, dummyoutfile.getvalue()
yield 0, '</td></tr></table>'
def _wrap_inlinelinenos(self, inner):
# need a list of lines since we need the width of a single number :(
lines = list(inner)
sp = self.linenospecial
st = self.linenostep
num = self.linenostart
mw = len(str(len(lines) + num - 1))
if self.noclasses:
if sp:
for t, line in lines:
if num%sp == 0:
style = 'background-color: #ffffc0; padding: 0 5px 0 5px'
else:
style = 'background-color: #f0f0f0; padding: 0 5px 0 5px'
yield 1, '<span style="%s">%*s</span> ' % (
style, mw, (num%st and ' ' or num)) + line
num += 1
else:
for t, line in lines:
yield 1, ('<span style="background-color: #f0f0f0; '
'padding: 0 5px 0 5px">%*s</span> ' % (
mw, (num%st and ' ' or num)) + line)
num += 1
elif sp:
for t, line in lines:
yield 1, '<span class="lineno%s">%*s</span> ' % (
num%sp == 0 and ' special' or '', mw,
(num%st and ' ' or num)) + line
num += 1
else:
for t, line in lines:
yield 1, '<span class="lineno">%*s</span> ' % (
mw, (num%st and ' ' or num)) + line
num += 1
def _wrap_lineanchors(self, inner):
s = self.lineanchors
i = self.linenostart - 1 # subtract 1 since we have to increment i
# *before* yielding
for t, line in inner:
if t:
i += 1
yield 1, '<a name="%s-%d"></a>' % (s, i) + line
else:
yield 0, line
def _wrap_linespans(self, inner):
s = self.linespans
i = self.linenostart - 1
for t, line in inner:
if t:
i += 1
yield 1, '<span id="%s-%d">%s</span>' % (s, i, line)
else:
yield 0, line
def _wrap_div(self, inner):
style = []
if (self.noclasses and not self.nobackground and
self.style.background_color is not None):
style.append('background: %s' % (self.style.background_color,))
if self.cssstyles:
style.append(self.cssstyles)
style = '; '.join(style)
yield 0, ('<div' + (self.cssclass and ' class="%s"' % self.cssclass)
+ (style and (' style="%s"' % style)) + '>')
for tup in inner:
yield tup
yield 0, '</div>\n'
def _wrap_pre(self, inner):
style = []
if self.prestyles:
style.append(self.prestyles)
if self.noclasses:
style.append('line-height: 125%')
style = '; '.join(style)
yield 0, ('<pre' + (style and ' style="%s"' % style) + '>')
for tup in inner:
yield tup
yield 0, '</pre>'
def _format_lines(self, tokensource):
"""
Just format the tokens, without any wrapping tags.
Yield individual lines.
"""
nocls = self.noclasses
lsep = self.lineseparator
# for <span style=""> lookup only
getcls = self.ttype2class.get
c2s = self.class2style
escape_table = _escape_html_table
tagsfile = self.tagsfile
lspan = ''
line = ''
for ttype, value in tokensource:
if nocls:
cclass = getcls(ttype)
while cclass is None:
ttype = ttype.parent
cclass = getcls(ttype)
cspan = cclass and '<span style="%s">' % c2s[cclass][0] or ''
else:
cls = self._get_css_class(ttype)
cspan = cls and '<span class="%s">' % cls or ''
parts = value.translate(escape_table).split('\n')
if tagsfile and ttype in Token.Name:
filename, linenumber = self._lookup_ctag(value)
if linenumber:
base, filename = os.path.split(filename)
if base:
base += '/'
filename, extension = os.path.splitext(filename)
url = self.tagurlformat % {'path': base, 'fname': filename,
'fext': extension}
parts[0] = "<a href=\"%s#%s-%d\">%s" % \
(url, self.lineanchors, linenumber, parts[0])
parts[-1] = parts[-1] + "</a>"
# for all but the last line
for part in parts[:-1]:
if line:
if lspan != cspan:
line += (lspan and '</span>') + cspan + part + \
(cspan and '</span>') + lsep
else: # both are the same
line += part + (lspan and '</span>') + lsep
yield 1, line
line = ''
elif part:
yield 1, cspan + part + (cspan and '</span>') + lsep
else:
yield 1, lsep
# for the last line
if line and parts[-1]:
if lspan != cspan:
line += (lspan and '</span>') + cspan + parts[-1]
lspan = cspan
else:
line += parts[-1]
elif parts[-1]:
line = cspan + parts[-1]
lspan = cspan
# else we neither have to open a new span nor set lspan
if line:
yield 1, line + (lspan and '</span>') + lsep
def _lookup_ctag(self, token):
entry = ctags.TagEntry()
if self._ctags.find(entry, token, 0):
return entry['file'], entry['lineNumber']
else:
return None, None
def _highlight_lines(self, tokensource):
"""
Highlighted the lines specified in the `hl_lines` option by
post-processing the token stream coming from `_format_lines`.
"""
hls = self.hl_lines
for i, (t, value) in enumerate(tokensource):
if t != 1:
yield t, value
if i + 1 in hls: # i + 1 because Python indexes start at 0
if self.noclasses:
style = ''
if self.style.highlight_color is not None:
style = (' style="background-color: %s"' %
(self.style.highlight_color,))
yield 1, '<span%s>%s</span>' % (style, value)
else:
yield 1, '<span class="hll">%s</span>' % value
else:
yield 1, value
def wrap(self, source, outfile):
"""
Wrap the ``source``, which is a generator yielding
individual lines, in custom generators. See docstring
for `format`. Can be overridden.
"""
return self._wrap_div(self._wrap_pre(source))
def format_unencoded(self, tokensource, outfile):
"""
The formatting process uses several nested generators; which of
them are used is determined by the user's options.
Each generator should take at least one argument, ``inner``,
and wrap the pieces of text generated by this.
Always yield 2-tuples: (code, text). If "code" is 1, the text
is part of the original tokensource being highlighted, if it's
0, the text is some piece of wrapping. This makes it possible to
use several different wrappers that process the original source
linewise, e.g. line number generators.
"""
source = self._format_lines(tokensource)
if self.hl_lines:
source = self._highlight_lines(source)
if not self.nowrap:
if self.linenos == 2:
source = self._wrap_inlinelinenos(source)
if self.lineanchors:
source = self._wrap_lineanchors(source)
if self.linespans:
source = self._wrap_linespans(source)
source = self.wrap(source, outfile)
if self.linenos == 1:
source = self._wrap_tablelinenos(source)
if self.full:
source = self._wrap_full(source, outfile)
for t, piece in source:
outfile.write(piece)
| mit |
superdachs/OwnVPN | ownvpn/openvpn/models.py | 1 | 5460 | from django.db import models
from django.core import serializers
from network.models import Interface
import subprocess
import os
import uuid
class Tools():
def create_key():
# create key
tmpkeyname = str(uuid.uuid4())
keycmd = "openvpn --genkey --secret /tmp/%s.key" % tmpkeyname
p = subprocess.Popen(keycmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
if p.wait() != 0:
raise Exception("could not create temp key")
with open("/tmp/%s.key" % tmpkeyname, 'r') as tmpk:
key = tmpk.readlines()
os.remove("/tmp/%s.key" % tmpkeyname)
return ''.join(key)
class AddressPort(models.Model):
interface = models.ForeignKey(Interface)
port = models.IntegerField()
def __str__(self):
return "%s:%d" % (self.interface.address, self.port)
class Meta:
unique_together = ('interface', 'port')
class Openvpn(models.Model):
name = models.CharField(max_length=255, unique=True)
description = models.TextField(blank=True, null=True)
tun_ip = models.GenericIPAddressField()
config = models.CharField(max_length=255, blank=True, null=True, editable=False)
start_on_boot = models.BooleanField(default=False)
bind_to = models.OneToOneField('AddressPort')
def __str__(self):
return self.name
def control(self, command):
cmd = "sudo /usr/local/bin/controlvpn.sh %s %s" % (self.config, command)
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
p.wait()
class OpenvpnClient(Openvpn):
gateway = models.CharField(max_length=255)
server_port = models.IntegerField(default=1194)
server_ip = models.GenericIPAddressField()
static_key = models.TextField()
vpn_type = "openvpn_client"
class Meta:
unique_together = ('gateway', 'server_port')
def save(self, *args, **kwargs):
configfile = "/tmp/client-%s.conf" % self.name
keyfile = "/tmp/client-%s.key" % self.name
with open(keyfile, "w") as kf:
kf.write(self.static_key)
with open(configfile, "w") as cf:
cf.write("# autogenerated configuration\n")
cf.write("# for %s. Do not edit!\n" % self.name)
cf.write("remote %s\n" % self.gateway)
cf.write("port %d\n" % self.server_port)
cf.write("dev tun\n")
cf.write("lport %d\n" % self.bind_to.port)
cf.write("ifconfig %s %s\n" % (self.tun_ip, self.server_ip))
cf.write("secret /etc/openvpn/client-%s.key" % self.name)
deploycmd = "sudo /usr/local/bin/deployconfig.sh client-%s" % self.name
p = subprocess.Popen(deploycmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
if p.wait() != 0:
raise Exception("could not deploy config and/or key file(s)")
self.config = "client-%s" % self.name
if self.start_on_boot:
self.control("enable")
else:
self.control("disable")
super(OpenvpnClient, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
rmcmd = "sudo /usr/local/bin/rmconfig.sh client-%s" % self.name
p = subprocess.Popen(rmcmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
if p.wait() != 0:
raise Exception("could not delete config and/or key file(s)")
try:
self.control("stop")
except:
pass
try:
self.control("disable")
except:
pass
super(OpenvpnClient, self).delete(*args, **kwargs)
class OpenvpnServer(Openvpn):
client_ip = models.GenericIPAddressField()
static_key = models.TextField(default=Tools.create_key)
vpn_type = "openvpn_server"
def save(self, *args, **kwargs):
configfile = "/tmp/server-%s.conf" % self.name
keyfile = "/tmp/server-%s.key" % self.name
with open(keyfile, "w") as kf:
kf.write(self.static_key)
with open(configfile, "w") as cf:
cf.write("# autogenerated configuration\n")
cf.write("# for %s. Do not edit!\n" % self.name)
cf.write("port %d\n" % self.bind_to.port)
cf.write("dev tun\n")
cf.write("ifconfig %s %s\n" % (self.tun_ip, self.client_ip))
cf.write("secret /etc/openvpn/server-%s.key\n" % self.name)
deploycmd = "sudo /usr/local/bin/deployconfig.sh server-%s" % self.name
p = subprocess.Popen(deploycmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
if p.wait() != 0:
raise Exception("could not deploy config and/or key file(s)")
self.config = "server-%s" % self.name
if self.start_on_boot:
self.control("enable")
else:
self.control("disable")
super(OpenvpnServer, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
rmcmd = "sudo /usr/local/bin/rmconfig.sh server-%s" % self.name
p = subprocess.Popen(rmcmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
if p.wait() != 0:
raise Exception("could not delete config and/or key file(s)")
try:
self.control("stop")
except:
pass
try:
self.control("disable")
except:
pass
super(OpenvpnServer, self).delete(*args, **kwargs)
| gpl-3.0 |
cristiana214/cristianachavez214-cristianachavez | python/src/Lib/test/test_userstring.py | 56 | 4667 | #!/usr/bin/env python
# UserString is a wrapper around the native builtin string type.
# UserString instances should behave similar to builtin string objects.
import string
from test import test_support, string_tests
from UserString import UserString, MutableString
import warnings
class UserStringTest(
string_tests.CommonTest,
string_tests.MixinStrUnicodeUserStringTest,
string_tests.MixinStrStringUserStringTest,
string_tests.MixinStrUserStringTest
):
type2test = UserString
# Overwrite the three testing methods, because UserString
# can't cope with arguments propagated to UserString
# (and we don't test with subclasses)
def checkequal(self, result, object, methodname, *args):
result = self.fixtype(result)
object = self.fixtype(object)
# we don't fix the arguments, because UserString can't cope with it
realresult = getattr(object, methodname)(*args)
self.assertEqual(
result,
realresult
)
def checkraises(self, exc, object, methodname, *args):
object = self.fixtype(object)
# we don't fix the arguments, because UserString can't cope with it
self.assertRaises(
exc,
getattr(object, methodname),
*args
)
def checkcall(self, object, methodname, *args):
object = self.fixtype(object)
# we don't fix the arguments, because UserString can't cope with it
getattr(object, methodname)(*args)
class MutableStringTest(UserStringTest):
type2test = MutableString
# MutableStrings can be hashed => deactivate test
def test_hash(self):
pass
def test_setitem(self):
s = self.type2test("foo")
self.assertRaises(IndexError, s.__setitem__, -4, "bar")
self.assertRaises(IndexError, s.__setitem__, 3, "bar")
s[-1] = "bar"
self.assertEqual(s, "fobar")
s[0] = "bar"
self.assertEqual(s, "barobar")
def test_delitem(self):
s = self.type2test("foo")
self.assertRaises(IndexError, s.__delitem__, -4)
self.assertRaises(IndexError, s.__delitem__, 3)
del s[-1]
self.assertEqual(s, "fo")
del s[0]
self.assertEqual(s, "o")
del s[0]
self.assertEqual(s, "")
def test_setslice(self):
s = self.type2test("foo")
s[:] = "bar"
self.assertEqual(s, "bar")
s[1:2] = "foo"
self.assertEqual(s, "bfoor")
s[1:-1] = UserString("a")
self.assertEqual(s, "bar")
s[0:10] = 42
self.assertEqual(s, "42")
def test_delslice(self):
s = self.type2test("foobar")
del s[3:10]
self.assertEqual(s, "foo")
del s[-1:10]
self.assertEqual(s, "fo")
def test_extended_set_del_slice(self):
indices = (0, None, 1, 3, 19, 100, -1, -2, -31, -100)
orig = string.ascii_letters + string.digits
for start in indices:
for stop in indices:
# Use indices[1:] when MutableString can handle real
# extended slices
for step in (None, 1, -1):
s = self.type2test(orig)
L = list(orig)
# Make sure we have a slice of exactly the right length,
# but with (hopefully) different data.
data = L[start:stop:step]
data.reverse()
L[start:stop:step] = data
s[start:stop:step] = "".join(data)
self.assertEquals(s, "".join(L))
del L[start:stop:step]
del s[start:stop:step]
self.assertEquals(s, "".join(L))
def test_immutable(self):
s = self.type2test("foobar")
s2 = s.immutable()
self.assertEqual(s, s2)
self.assert_(isinstance(s2, UserString))
def test_iadd(self):
s = self.type2test("foo")
s += "bar"
self.assertEqual(s, "foobar")
s += UserString("baz")
self.assertEqual(s, "foobarbaz")
s += 42
self.assertEqual(s, "foobarbaz42")
def test_imul(self):
s = self.type2test("foo")
s *= 1
self.assertEqual(s, "foo")
s *= 2
self.assertEqual(s, "foofoo")
s *= -1
self.assertEqual(s, "")
def test_main():
with warnings.catch_warnings():
warnings.filterwarnings("ignore", ".*MutableString",
DeprecationWarning)
test_support.run_unittest(UserStringTest, MutableStringTest)
if __name__ == "__main__":
test_main()
| apache-2.0 |
cjaymes/pyscap | src/scap/model/oval_5/defs/windows/WmiStateElement.py | 1 | 1213 | # Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.model.oval_5.defs.windows.StateType import StateType
logger = logging.getLogger(__name__)
class WmiStateElement(StateType):
MODEL_MAP = {
'tag_name': 'wmi_state',
'elements': [
{'tag_name': 'namespace', 'class': 'scap.model.oval_5.defs.EntityStateType', 'min': 0},
{'tag_name': 'wql', 'class': 'scap.model.oval_5.defs.EntityStateType', 'min': 0},
{'tag_name': 'result', 'class': 'scap.model.oval_5.defs.EntityStateType', 'min': 0},
],
}
| gpl-3.0 |
lipengyu/django-bootstrap | xadmin/plugins/export.py | 1 | 7026 | import StringIO
import datetime
import sys
from django.http import HttpResponse
from django.template import loader
from django.utils.encoding import force_unicode, smart_unicode
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.utils.xmlutils import SimplerXMLGenerator
from django.db.models import BooleanField, NullBooleanField
from xadmin.sites import site
from xadmin.views import BaseAdminPlugin, ListAdminView
from xadmin.util import json
try:
import xlwt
has_xlwt = True
except:
has_xlwt = False
class ExportMenuPlugin(BaseAdminPlugin):
list_export = ('xls', 'csv', 'xml', 'json')
export_names = {'xls': 'Excel', 'csv': 'CSV', 'xml': 'XML', 'json': 'JSON'}
def init_request(self, *args, **kwargs):
self.list_export = [
f for f in self.list_export if f != 'xls' or has_xlwt]
def block_top_toolbar(self, context, nodes):
if self.list_export:
context.update({
'form_params': self.admin_view.get_form_params({'_do_': 'export'}, ('export_type',)),
'export_types': [{'type': et, 'name': self.export_names[et]} for et in self.list_export],
})
nodes.append(loader.render_to_string('xadmin/blocks/model_list.top_toolbar.exports.html', context_instance=context))
class ExportPlugin(BaseAdminPlugin):
export_mimes = {'xls': 'application/vnd.ms-excel', 'csv': 'text/csv',
'xml': 'application/xhtml+xml', 'json': 'application/json'}
def init_request(self, *args, **kwargs):
return self.request.GET.get('_do_') == 'export'
def get_results(self, context):
headers = [c for c in context['result_headers'].cells if c.export]
rows = context['results']
new_rows = []
for r in rows:
d = {}
for i, o in enumerate(filter(lambda c:getattr(c, 'export', False), r.cells)):
if (o.field is None and getattr(o.attr, 'boolean', False)) or \
(o.field and isinstance(o.field, (BooleanField, NullBooleanField))):
value = o.value
else:
value = escape(str(o.text))
d[force_unicode(headers[i].text)] = value
new_rows.append(d)
return new_rows
def get_xls_export(self, context):
results = self.get_results(context)
output = StringIO.StringIO()
export_header = (
self.request.GET.get('export_xls_header', 'off') == 'on')
model_name = self.opts.verbose_name
book = xlwt.Workbook(encoding='utf8')
sheet = book.add_sheet(
u"%s %s" % (_(u'Sheet'), force_unicode(model_name)))
styles = {'datetime': xlwt.easyxf(num_format_str='yyyy-mm-dd hh:mm:ss'),
'date': xlwt.easyxf(num_format_str='yyyy-mm-dd'),
'time': xlwt.easyxf(num_format_str='hh:mm:ss'),
'header': xlwt.easyxf('font: name Times New Roman, color-index red, bold on', num_format_str='#,##0.00'),
'default': xlwt.Style.default_style}
datas = [row.values() for row in results]
if export_header:
datas.insert(0, results[0].keys())
for rowx, row in enumerate(datas):
for colx, value in enumerate(row):
if export_header and rowx == 0:
cell_style = styles['header']
else:
if isinstance(value, datetime.datetime):
cell_style = styles['datetime']
elif isinstance(value, datetime.date):
cell_style = styles['date']
elif isinstance(value, datetime.time):
cell_style = styles['time']
else:
cell_style = styles['default']
sheet.write(rowx, colx, value, style=cell_style)
book.save(output)
output.seek(0)
return output.getvalue()
def _format_csv_text(self, t):
if isinstance(t, bool):
return _('Yes') if t else _('No')
t = t.replace('"', '""').replace(',', '\,')
if isinstance(t, basestring):
t = '"%s"' % t
return t
def get_csv_export(self, context):
results = self.get_results(context)
stream = []
if self.request.GET.get('export_csv_header', 'off') == 'on':
stream.append(
','.join(map(self._format_csv_text, results[0].keys())))
for row in results:
stream.append(','.join(map(self._format_csv_text, row.values())))
return '\r\n'.join(stream)
def _to_xml(self, xml, data):
if isinstance(data, (list, tuple)):
for item in data:
xml.startElement("row", {})
self._to_xml(xml, item)
xml.endElement("row")
elif isinstance(data, dict):
for key, value in data.iteritems():
xml.startElement(key, {})
self._to_xml(xml, value)
xml.endElement(key)
else:
xml.characters(smart_unicode(data))
def get_xml_export(self, context):
results = self.get_results(context)
stream = StringIO.StringIO()
xml = SimplerXMLGenerator(stream, "utf-8")
xml.startDocument()
xml.startElement("objects", {})
self._to_xml(xml, results)
xml.endElement("objects")
xml.endDocument()
return stream.getvalue().split('\n')[1]
def get_json_export(self, context):
results = self.get_results(context)
return json.dumps({'objects': results}, ensure_ascii=False,
indent=(self.request.GET.get('export_json_format', 'off') == 'on') and 4 or None)
def get_response(self, response, context, *args, **kwargs):
file_type = self.request.GET.get('export_type', 'csv')
response = HttpResponse(
mimetype="%s; charset=UTF-8" % self.export_mimes[file_type])
file_name = self.opts.verbose_name.replace(' ', '_')
response['Content-Disposition'] = ('attachment; filename=%s.%s' % (
file_name, file_type)).encode('utf-8')
response.write(getattr(self, 'get_%s_export' % file_type)(context))
return response
# View Methods
def get_result_list(self, __):
if self.request.GET.get('all', 'off') == 'on':
self.admin_view.list_per_page = sys.maxint
return __()
def result_header(self, item, field_name, row):
item.export = not item.attr or field_name == '__str__' or getattr(item.attr, 'allow_export', True)
return item
def result_item(self, item, obj, field_name, row):
item.export = item.field or field_name == '__str__' or getattr(item.attr, 'allow_export', True)
return item
site.register_plugin(ExportMenuPlugin, ListAdminView)
site.register_plugin(ExportPlugin, ListAdminView)
| bsd-3-clause |
AladdinSonni/youtube-dl | youtube_dl/extractor/vessel.py | 98 | 4801 | # coding: utf-8
from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..compat import compat_urllib_request
from ..utils import (
ExtractorError,
parse_iso8601,
)
class VesselIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?vessel\.com/videos/(?P<id>[0-9a-zA-Z]+)'
_API_URL_TEMPLATE = 'https://www.vessel.com/api/view/items/%s'
_LOGIN_URL = 'https://www.vessel.com/api/account/login'
_NETRC_MACHINE = 'vessel'
_TEST = {
'url': 'https://www.vessel.com/videos/HDN7G5UMs',
'md5': '455cdf8beb71c6dd797fd2f3818d05c4',
'info_dict': {
'id': 'HDN7G5UMs',
'ext': 'mp4',
'title': 'Nvidia GeForce GTX Titan X - The Best Video Card on the Market?',
'thumbnail': 're:^https?://.*\.jpg$',
'upload_date': '20150317',
'description': 'Did Nvidia pull out all the stops on the Titan X, or does its performance leave something to be desired?',
'timestamp': int,
},
}
@staticmethod
def make_json_request(url, data):
payload = json.dumps(data).encode('utf-8')
req = compat_urllib_request.Request(url, payload)
req.add_header('Content-Type', 'application/json; charset=utf-8')
return req
@staticmethod
def find_assets(data, asset_type, asset_id=None):
for asset in data.get('assets', []):
if not asset.get('type') == asset_type:
continue
elif asset_id is not None and not asset.get('id') == asset_id:
continue
else:
yield asset
def _check_access_rights(self, data):
access_info = data.get('__view', {})
if not access_info.get('allow_access', True):
err_code = access_info.get('error_code') or ''
if err_code == 'ITEM_PAID_ONLY':
raise ExtractorError(
'This video requires subscription.', expected=True)
else:
raise ExtractorError(
'Access to this content is restricted. (%s said: %s)' % (self.IE_NAME, err_code), expected=True)
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
self.report_login()
data = {
'client_id': 'web',
'type': 'password',
'user_key': username,
'password': password,
}
login_request = VesselIE.make_json_request(self._LOGIN_URL, data)
self._download_webpage(login_request, None, False, 'Wrong login info')
def _real_initialize(self):
self._login()
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
data = self._parse_json(self._search_regex(
r'App\.bootstrapData\((.*?)\);', webpage, 'data'), video_id)
asset_id = data['model']['data']['id']
req = VesselIE.make_json_request(
self._API_URL_TEMPLATE % asset_id, {'client': 'web'})
data = self._download_json(req, video_id)
video_asset_id = data.get('main_video_asset')
self._check_access_rights(data)
try:
video_asset = next(
VesselIE.find_assets(data, 'video', asset_id=video_asset_id))
except StopIteration:
raise ExtractorError('No video assets found')
formats = []
for f in video_asset.get('sources', []):
if f['name'] == 'hls-index':
formats.extend(self._extract_m3u8_formats(
f['location'], video_id, ext='mp4', m3u8_id='m3u8'))
else:
formats.append({
'format_id': f['name'],
'tbr': f.get('bitrate'),
'height': f.get('height'),
'width': f.get('width'),
'url': f['location'],
})
self._sort_formats(formats)
thumbnails = []
for im_asset in VesselIE.find_assets(data, 'image'):
thumbnails.append({
'url': im_asset['location'],
'width': im_asset.get('width', 0),
'height': im_asset.get('height', 0),
})
return {
'id': video_id,
'title': data['title'],
'formats': formats,
'thumbnails': thumbnails,
'description': data.get('short_description'),
'duration': data.get('duration'),
'comment_count': data.get('comment_count'),
'like_count': data.get('like_count'),
'view_count': data.get('view_count'),
'timestamp': parse_iso8601(data.get('released_at')),
}
| unlicense |
GladeRom/android_external_chromium_org | third_party/boringssl/update_gypi_and_asm.py | 26 | 7225 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can b
# found in the LICENSE file.
"""Enumerates the BoringSSL source in src/ and generates two gypi files:
boringssl.gypi and boringssl_tests.gypi."""
import os
import subprocess
import sys
# OS_ARCH_COMBOS maps from OS and platform to the OpenSSL assembly "style" for
# that platform and the extension used by asm files.
OS_ARCH_COMBOS = [
('linux', 'arm', 'elf', [''], 'S'),
('linux', 'x86', 'elf', ['-fPIC'], 'S'),
('linux', 'x86_64', 'elf', [''], 'S'),
('mac', 'x86', 'macosx', ['-fPIC'], 'S'),
('mac', 'x86_64', 'macosx', [''], 'S'),
('win', 'x86_64', 'masm', [''], 'asm'),
]
# NON_PERL_FILES enumerates assembly files that are not processed by the
# perlasm system.
NON_PERL_FILES = {
('linux', 'arm'): [
'src/crypto/poly1305/poly1305_arm_asm.S',
'src/crypto/chacha/chacha_vec_arm.S',
],
}
FILE_HEADER = """# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file is created by update_gypi_and_asm.py. Do not edit manually.
"""
def FindCMakeFiles(directory):
"""Returns list of all CMakeLists.txt files recursively in directory."""
cmakefiles = []
for (path, _, filenames) in os.walk(directory):
for filename in filenames:
if filename == 'CMakeLists.txt':
cmakefiles.append(os.path.join(path, filename))
return cmakefiles
def NoTests(dent, is_dir):
"""Filter function that can be passed to FindCFiles in order to remove test
sources."""
if is_dir:
return dent != 'test'
return 'test.' not in dent and not dent.startswith('example_')
def OnlyTests(dent, is_dir):
"""Filter function that can be passed to FindCFiles in order to remove
non-test sources."""
if is_dir:
return True
return '_test.' in dent or dent.startswith('example_')
def FindCFiles(directory, filter_func):
"""Recurses through directory and returns a list of paths to all the C source
files that pass filter_func."""
cfiles = []
for (path, dirnames, filenames) in os.walk(directory):
for filename in filenames:
if filename.endswith('.c') and filter_func(filename, False):
cfiles.append(os.path.join(path, filename))
continue
for (i, dirname) in enumerate(dirnames):
if not filter_func(dirname, True):
del dirnames[i]
return cfiles
def ExtractPerlAsmFromCMakeFile(cmakefile):
"""Parses the contents of the CMakeLists.txt file passed as an argument and
returns a list of all the perlasm() directives found in the file."""
perlasms = []
with open(cmakefile) as f:
for line in f:
line = line.strip()
if not line.startswith('perlasm('):
continue
if not line.endswith(')'):
raise ValueError('Bad perlasm line in %s' % cmakefile)
# Remove "perlasm(" from start and ")" from end
params = line[8:-1].split()
if len(params) < 2:
raise ValueError('Bad perlasm line in %s' % cmakefile)
perlasms.append({
'extra_args': params[2:],
'input': os.path.join(os.path.dirname(cmakefile), params[1]),
'output': os.path.join(os.path.dirname(cmakefile), params[0]),
})
return perlasms
def ReadPerlAsmOperations():
"""Returns a list of all perlasm() directives found in CMake config files in
src/."""
perlasms = []
cmakefiles = FindCMakeFiles('src')
for cmakefile in cmakefiles:
perlasms.extend(ExtractPerlAsmFromCMakeFile(cmakefile))
return perlasms
def PerlAsm(output_filename, input_filename, perlasm_style, extra_args):
"""Runs the a perlasm script and puts the output into output_filename."""
base_dir = os.path.dirname(output_filename)
if not os.path.isdir(base_dir):
os.makedirs(base_dir)
output = subprocess.check_output(
['perl', input_filename, perlasm_style] + extra_args)
with open(output_filename, 'w+') as out_file:
out_file.write(output)
def ArchForAsmFilename(filename):
"""Returns the architecture that a given asm file should be compiled for
based on substrings in the filename."""
if 'x86_64' in filename or 'avx2' in filename:
return 'x86_64'
elif ('x86' in filename and 'x86_64' not in filename) or '586' in filename:
return 'x86'
elif 'arm' in filename:
return 'arm'
else:
raise ValueError('Unknown arch for asm filename: ' + filename)
def WriteAsmFiles(perlasms):
"""Generates asm files from perlasm directives for each supported OS x
platform combination."""
asmfiles = {}
for osarch in OS_ARCH_COMBOS:
(osname, arch, perlasm_style, extra_args, asm_ext) = osarch
key = (osname, arch)
outDir = '%s-%s' % key
for perlasm in perlasms:
filename = os.path.basename(perlasm['input'])
output = perlasm['output']
if not output.startswith('src'):
raise ValueError('output missing src: %s' % output)
output = os.path.join(outDir, output[4:])
output = output.replace('${ASM_EXT}', asm_ext)
if arch == ArchForAsmFilename(filename):
PerlAsm(output, perlasm['input'], perlasm_style,
perlasm['extra_args'] + extra_args)
asmfiles.setdefault(key, []).append(output)
for (key, non_perl_asm_files) in NON_PERL_FILES.iteritems():
asmfiles.setdefault(key, []).extend(non_perl_asm_files)
return asmfiles
def PrintVariableSection(out, name, files):
out.write(' \'%s\': [\n' % name)
for f in sorted(files):
out.write(' \'%s\',\n' % f)
out.write(' ],\n')
def main():
crypto_c_files = FindCFiles(os.path.join('src', 'crypto'), NoTests)
ssl_c_files = FindCFiles(os.path.join('src', 'ssl'), NoTests)
with open('boringssl.gypi', 'w+') as gypi:
gypi.write(FILE_HEADER + '{\n \'variables\': {\n')
PrintVariableSection(
gypi, 'boringssl_lib_sources', crypto_c_files + ssl_c_files)
perlasms = ReadPerlAsmOperations()
for ((osname, arch), asm_files) in sorted(
WriteAsmFiles(perlasms).iteritems()):
PrintVariableSection(gypi, 'boringssl_%s_%s_sources' %
(osname, arch), asm_files)
gypi.write(' }\n}\n')
test_c_files = FindCFiles(os.path.join('src', 'crypto'), OnlyTests)
test_c_files += FindCFiles(os.path.join('src', 'ssl'), OnlyTests)
with open('boringssl_tests.gypi', 'w+') as test_gypi:
test_gypi.write(FILE_HEADER + '{\n \'targets\': [\n')
test_names = []
for test in sorted(test_c_files):
test_name = 'boringssl_%s' % os.path.splitext(os.path.basename(test))[0]
test_gypi.write(""" {
'target_name': '%s',
'type': 'executable',
'dependencies': [
'boringssl.gyp:boringssl',
],
'sources': [
'%s',
],
},\n""" % (test_name, test))
test_names.append(test_name)
test_names.sort()
test_gypi.write(""" ],
'variables': {
'boringssl_test_targets': [\n""")
for test in test_names:
test_gypi.write(""" '%s',\n""" % test)
test_gypi.write(' ],\n }\n}\n')
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
gdkar/pyglet | pyglet/image/codecs/gdkpixbuf2.py | 31 | 6835 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from ctypes import *
from pyglet.gl import *
from pyglet.image import *
from pyglet.image.codecs import *
from pyglet.image.codecs import gif
import pyglet.lib
import pyglet.window
gdk = pyglet.lib.load_library('gdk-x11-2.0')
gdkpixbuf = pyglet.lib.load_library('gdk_pixbuf-2.0')
GdkPixbufLoader = c_void_p
GdkPixbuf = c_void_p
gdkpixbuf.gdk_pixbuf_loader_new.restype = GdkPixbufLoader
gdkpixbuf.gdk_pixbuf_loader_get_pixbuf.restype = GdkPixbuf
gdkpixbuf.gdk_pixbuf_get_pixels.restype = c_void_p
gdkpixbuf.gdk_pixbuf_loader_get_animation.restype = c_void_p
gdkpixbuf.gdk_pixbuf_animation_get_iter.restype = c_void_p
gdkpixbuf.gdk_pixbuf_animation_iter_get_pixbuf.restype = GdkPixbuf
class GTimeVal(Structure):
_fields_ = [
('tv_sec', c_long),
('tv_usec', c_long)
]
class GdkPixbuf2ImageDecoder(ImageDecoder):
def get_file_extensions(self):
return ['.png', '.xpm', '.jpg', '.jpeg', '.tif', '.tiff', '.pnm',
'.ras', '.bmp', '.gif']
def get_animation_file_extensions(self):
return ['.gif', '.ani']
def _load(self, file, filename, load_func):
data = file.read()
loader = gdkpixbuf.gdk_pixbuf_loader_new()
gdkpixbuf.gdk_pixbuf_loader_write(loader, data, len(data), None)
if not gdkpixbuf.gdk_pixbuf_loader_close(loader, None):
raise ImageDecodeException(filename)
result = load_func(loader)
if not result:
raise ImageDecodeException('Unable to load: %s' % filename)
return result
def _pixbuf_to_image(self, pixbuf):
# Get format and dimensions
width = gdkpixbuf.gdk_pixbuf_get_width(pixbuf)
height = gdkpixbuf.gdk_pixbuf_get_height(pixbuf)
channels = gdkpixbuf.gdk_pixbuf_get_n_channels(pixbuf)
rowstride = gdkpixbuf.gdk_pixbuf_get_rowstride(pixbuf)
#has_alpha = gdkpixbuf.gdk_pixbuf_get_has_alpha(pixbuf)
pixels = gdkpixbuf.gdk_pixbuf_get_pixels(pixbuf)
# Copy pixel data.
buffer = (c_ubyte * (rowstride * height))()
memmove(buffer, pixels, rowstride * (height - 1) + width * channels)
# Release pixbuf
gdk.g_object_unref(pixbuf)
# Determine appropriate GL type
if channels == 3:
format = 'RGB'
else:
format = 'RGBA'
return ImageData(width, height, format, buffer, -rowstride)
def decode(self, file, filename):
pixbuf = self._load(file, filename,
gdkpixbuf.gdk_pixbuf_loader_get_pixbuf)
return self._pixbuf_to_image(pixbuf)
def decode_animation(self, file, filename):
# Extract GIF control data. If it's not a GIF, this method will
# raise.
gif_stream = gif.read(file)
delays = [image.delay for image in gif_stream.images]
# Get GDK animation iterator
file.seek(0)
anim = self._load(file, filename,
gdkpixbuf.gdk_pixbuf_loader_get_animation)
time = GTimeVal(0, 0)
iter = gdkpixbuf.gdk_pixbuf_animation_get_iter(anim, byref(time))
frames = []
# Extract each image
for control_delay in delays:
pixbuf = gdkpixbuf.gdk_pixbuf_animation_iter_get_pixbuf(iter)
# When attempting to load animated gifs with an alpha channel on
# linux gdkpixbuf will normally return a null pixbuf for the final
# frame resulting in a segfault:
# http://code.google.com/p/pyglet/issues/detail?id=411
# Since it is unclear why exactly this happens, the workaround
# below is to start again and extract that frame on its own.
if pixbuf == None:
file.seek(0)
anim = self._load(file, filename,
gdkpixbuf.gdk_pixbuf_loader_get_animation)
temptime = GTimeVal(0, 0)
iter = gdkpixbuf.gdk_pixbuf_animation_get_iter(anim, byref(temptime))
gdkpixbuf.gdk_pixbuf_animation_iter_advance(iter, byref(time))
pixbuf = gdkpixbuf.gdk_pixbuf_animation_iter_get_pixbuf(iter)
image = self._pixbuf_to_image(pixbuf)
frames.append(AnimationFrame(image, control_delay))
gdk_delay = gdkpixbuf.gdk_pixbuf_animation_iter_get_delay_time(iter)
if gdk_delay == -1:
break
gdk_delay = gdkpixbuf.gdk_pixbuf_animation_iter_get_delay_time(iter)
gdk_delay *= 1000 # milliseconds to microseconds
# Compare gdk_delay to control_delay for interest only.
#print control_delay, gdk_delay / 1000000.
us = time.tv_usec + gdk_delay
time.tv_sec += us // 1000000
time.tv_usec = us % 1000000
gdkpixbuf.gdk_pixbuf_animation_iter_advance(iter, byref(time))
return Animation(frames)
def get_decoders():
return [GdkPixbuf2ImageDecoder()]
def get_encoders():
return []
def init():
gdk.g_type_init()
init()
| bsd-3-clause |
Azure/azure-sdk-for-python | sdk/automation/azure-mgmt-automation/azure/mgmt/automation/aio/operations/_keys_operations.py | 1 | 4806 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class KeysOperations:
"""KeysOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.automation.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def list_by_automation_account(
self,
resource_group_name: str,
automation_account_name: str,
**kwargs
) -> "_models.KeyListResult":
"""Retrieve the automation keys for an account.
:param resource_group_name: Name of an Azure Resource group.
:type resource_group_name: str
:param automation_account_name: The name of the automation account.
:type automation_account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyListResult, or the result of cls(response)
:rtype: ~azure.mgmt.automation.models.KeyListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self.list_by_automation_account.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._]+$'),
'automationAccountName': self._serialize.url("automation_account_name", automation_account_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('KeyListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_automation_account.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}/listKeys'} # type: ignore
| mit |
zegra1989/pytree | prototype.py | 1 | 10484 | # -*- coding:utf-8 -*-
# 使用 UTF-8
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
class Node(object):
def __init__(self):
super(Node, self).__init__()
self.num = 0
self.keys = []
self.pnodes = []
self.isleaf = True
class BNode(Node):
def __init__(self, degree):
super(BNode, self).__init__()
self.degree = degree
self.keys = [None for _ in xrange(degree*2-1)]
self.pnodes = [None for _ in xrange(degree*2)]
def __str__(self):
return "Object:{0} num:{1} keys:{2}".format(
super(BNode, self).__str__(), self.num, self.keys)
class BTree(object):
"""docstring for BTree"""
def __init__(self, degree = 2):
super(BTree, self).__init__()
self.degree = degree
self.threshold = degree*2-1
self.root = BNode(self.degree)
def search(self, key, node = None):
if node is None:
node = self.root
ipos = 0
while ipos < node.num and key > node.keys[ipos]:
ipos += 1
if ipos < node.num and key == node.keys[ipos]:
return node, ipos
if node.isleaf is True:
return None
return self.search(key, node.pnodes[ipos])
def split(self, parent, ipos, node):
new_node = BNode(self.degree)
new_node.isleaf = node.isleaf
for i in xrange(0, self.degree-1):
new_node.keys[i] = node.keys[i+self.degree]
new_node.num = node.num = self.degree-1
if node.isleaf is False:
for i in xrange(0, self.degree):
new_node.pnodes[i] = node.pnodes[i+self.degree]
for i in xrange(parent.num-1, ipos-1, -1):
# 此处不会越界,因为在 insert 中有保护
parent.keys[i+1] = parent.keys[i]
parent.keys[ipos] = node.keys[self.degree-1]
parent.num += 1
for i in xrange(parent.num-1, ipos, -1):
parent.pnodes[i+1] = parent.pnodes[i]
parent.pnodes[ipos+1] = new_node
def insert_key(self, node, key):
ipos = node.num-1
if node.isleaf is True:
while ipos >= 0 and key < node.keys[ipos]:
# 此处不会越界,因为在 insert 中有保护
node.keys[ipos+1] = node.keys[ipos]
ipos -= 1
node.keys[ipos+1] = key
node.num += 1
return None
# insert to non-leaf node, search child
while ipos >= 0 and key < node.keys[ipos]:
ipos -= 1
ipos += 1
child = node.pnodes[ipos]
if child.num == self.threshold:
self.split(node, ipos, child)
if key > node.keys[ipos]:
# 被插入的节点为新分裂出来的点
ipos += 1
self.insert_key(node.pnodes[ipos], key)
def insert(self, key):
if self.root.num != self.threshold:
return self.insert_key(self.root, key)
old_root = self.root
new_root = BNode(self.degree)
new_root.isleaf = False
new_root.pnodes[0] = old_root
self.root = new_root
self.split(new_root, 0, old_root)
self.insert_key(new_root, key)
def merge(self, node, ipos):
"""
将一个key左右两个兄弟合并
ipos 是 node.keys 中关键词的位置
ipos 也是 node.pnodes 中 关键词 左孩子的位置
"""
lchild = node.pnodes[ipos]
lchild.keys[lchild.num] = node.keys[ipos]
lchild.num += 1
rchild = node.pnodes[ipos+1]
irpos = 0
while irpos < rchild.num:
lchild.keys[lchild.num+irpos] = rchild.keys[irpos]
lchild.pnodes[lchild.num+irpos] = rchild.pnodes[irpos]
irpos += 1
lchild.num += rchild.num
lchild.pnodes[lchild.num] = rchild.pnodes[rchild.num]
# Free rchild
while ipos < node.num-1:
node.keys[ipos] = node.keys[ipos+1]
node.pnodes[ipos+1] = node.pnodes[ipos+2]
ipos += 1
node.num -= 1
def pop(self, node=None):
"""
弹出以 node 为起点的树中最后一个键值
"""
if node is None:
node = self.root
if node.num == 0:
return None
if node.isleaf is True:
key = node.keys[node.num-1]
self.remove_key(node, key)
self.shrink()
return key
while True:
ipos = self.guarantee(node, node.num)
if node.pnodes[ipos].isleaf is True:
break
node = node.pnodes[ipos]
child = node.pnodes[node.num]
key = child.keys[child.num-1]
self.remove_key(node, key)
self.shrink()
return key
def shift(self, node=None):
"""
弹出以 node 为起点的树中第一个键值
"""
if node is None:
node = self.root
if node.num == 0:
return None
if node.isleaf is True:
key = node.keys[0]
self.remove_key(node, key)
self.shrink()
return key
while True:
self.guarantee(node, 0)
if node.pnodes[0].isleaf is True:
break
node = node.pnodes[0]
key = node.pnodes[0].keys[0]
self.remove_key(node, key)
self.shrink()
return key
def shrink(self):
if self.root.num == 0 and self.root.pnodes[0] is not None:
# Free root
self.root = self.root.pnodes[0]
def guarantee(self, node, ipos):
"""
确保 node.pnode[ipos] 拥有至少 t 个关键词
"""
# Condition: 3
child = node.pnodes[ipos]
if child.num >= self.degree:
return ipos
# Condition: 3a
# 如果 ipos = 0,则 child 没有左兄弟
if ipos > 0 and node.pnodes[ipos-1].num >= self.degree:
lbrother = node.pnodes[ipos-1]
child.pnodes[child.num+1] = child.pnodes[child.num]
icpos = child.num-1
while icpos >= 0:
child.keys[icpos+1] = child.keys[icpos]
child.pnodes[icpos+1] = child.pnodes[icpos]
icpos -= 1
child.keys[0] = node.keys[ipos-1]
child.pnodes[0] = lbrother.pnodes[lbrother.num]
child.num += 1
node.keys[ipos-1] = lbrother.keys[lbrother.num-1]
lbrother.num -= 1
return ipos
# 如果 ipos = node.num, 则 child 没有右兄弟
if ipos < node.num and node.pnodes[ipos+1].num >= self.degree:
rbrother = node.pnodes[ipos+1]
child.keys[child.num] = node.keys[ipos]
child.pnodes[child.num+1] = rbrother.pnodes[0]
child.num += 1
node.keys[ipos] = rbrother.keys[0]
irpos = 0
while irpos < rbrother.num-1:
rbrother.keys[irpos] = rbrother.keys[irpos+1]
rbrother.pnodes[irpos] = rbrother.pnodes[irpos+1]
irpos += 1
rbrother.pnodes[irpos] = rbrother.pnodes[irpos+1]
rbrother.num -= 1
return ipos
# Condition: 3b
# 如果指针指向最后一个位置(ipos = node.num)
# 不存在 node.keys[ipos] 关键词
# 结点 node.pnodes[ipos] 是 node.keys[ipos-1] 关键词的右孩子
# 为了统一计算,将 ipos 左移一个位置,使得右孩子下标统一为 ipos+1
if ipos == node.num:
ipos -= 1
self.merge(node, ipos)
return ipos
def remove_key(self, node, key):
# 获取关键词所在的孩子结点位置
# node.pnodes[ipos] 指向 目标孩子节点
# 如果 ipos < node.num,则 目标孩子 是 node.keys[ipos] 关键词的左孩子
# 如果 ipos = node.num,则 目标孩子 是 node.keys[ipos-1] 关键词的右孩子
#
# 如果判断 key 是否在 node.keys 中
# 如果 ipos = 0,则说明 key 不在 node.keys 中
# 如果 ipos > 0,则应该通过 key == node.keys[ipos-1] 判断是否包含
ipos = node.num-1
while ipos >= 0 and key < node.keys[ipos]:
ipos -= 1
ipos += 1
if ipos > 0 and key == node.keys[ipos-1]:
# 此时 node.keys[ipos] 为匹配的关键词
ipos = ipos-1
if node.isleaf is True:
# Condition: 1
while ipos < node.num-1:
node.keys[ipos] = node.keys[ipos+1]
ipos += 1
node.num -= 1
return 0
else:
# Condition: 2
lchild = node.pnodes[ipos]
if lchild.num >= self.degree:
# Condition: 2a
node.keys[ipos] = self.pop(lchild)
assert node.keys[ipos] is not None
return 0
# 因为已经匹配到了关键词,因此一定有右孩子
rchild = node.pnodes[ipos+1]
if rchild.num >= self.degree:
# Condition: 2b
node.keys[ipos] = self.shift(rchild)
assert node.keys[ipos] is not None
return 0
# Condition: 2c
self.merge(node, ipos)
return self.remove_key(node.pnodes[ipos], key)
else:
if node.isleaf is True:
# 没有找到要删除的节点
return -1
else:
# Condition: 3
ipos = self.guarantee(node, ipos)
return self.remove_key(node.pnodes[ipos], key)
def remove(self, key):
self.remove_key(self.root, key)
self.shrink()
def print_node(self, node, strings, depth=0):
if node is None:
return
strings.append(">"*depth + str(node.keys[:node.num])+" "+str(node.num))
if node.isleaf is False:
strings.append("")
for ipos in xrange(node.num+1):
self.print_node(node.pnodes[ipos], strings, depth+1)
strings.append("")
def __str__(self):
strings = []
self.print_node(self.root, strings)
return "\n".join(strings)
| mit |
mgedmin/ansible | lib/ansible/module_utils/shell.py | 13 | 8606 | #
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import os
import re
import socket
import time
import signal
try:
import paramiko
from paramiko.ssh_exception import AuthenticationException
HAS_PARAMIKO = True
except ImportError:
HAS_PARAMIKO = False
from ansible.module_utils.basic import get_exception
from ansible.module_utils.network import NetworkError
from ansible.module_utils.six import BytesIO
from ansible.module_utils._text import to_native
ANSI_RE = [
re.compile(r'(\x1b\[\?1h\x1b=)'),
re.compile(r'\x08'),
re.compile(r'\x1b[^m]*m')
]
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class ShellError(Exception):
def __init__(self, msg, command=None):
super(ShellError, self).__init__(msg)
self.command = command
class Shell(object):
def __init__(self, prompts_re=None, errors_re=None, kickstart=True, timeout=10):
self.ssh = None
self.shell = None
self.kickstart = kickstart
self._matched_prompt = None
self.prompts = prompts_re or list()
self.errors = errors_re or list()
self._timeout = timeout
self._history = list()
signal.signal(signal.SIGALRM, self.alarm_handler)
def open(self, host, port=22, username=None, password=None,
key_filename=None, pkey=None, look_for_keys=None,
allow_agent=False, key_policy="loose"):
self.ssh = paramiko.SSHClient()
if key_policy != "ignore":
self.ssh.load_system_host_keys()
try:
self.ssh.load_host_keys(os.path.expanduser('~/.ssh/known_hosts'))
except IOError:
pass
if key_policy == "strict":
self.ssh.set_missing_host_key_policy(paramiko.RejectPolicy())
else:
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# unless explicitly set, disable look for keys if a password is
# present. this changes the default search order paramiko implements
if not look_for_keys:
look_for_keys = password is None
try:
self.ssh.connect(
host, port=port, username=username, password=password,
timeout=self._timeout, look_for_keys=look_for_keys, pkey=pkey,
key_filename=key_filename, allow_agent=allow_agent,
)
self.shell = self.ssh.invoke_shell()
self.shell.settimeout(self._timeout)
except socket.gaierror:
raise ShellError("unable to resolve host name")
except AuthenticationException:
raise ShellError('Unable to authenticate to remote device')
except socket.timeout:
raise ShellError("timeout trying to connect to remote device")
except socket.error:
exc = get_exception()
if exc.errno == 60:
raise ShellError('timeout trying to connect to host')
raise
if self.kickstart:
self.shell.sendall("\n")
self.receive()
def strip(self, data):
for regex in ANSI_RE:
data = regex.sub('', data)
return data
def alarm_handler(self, signum, frame):
self.shell.close()
raise ShellError('timeout trying to send command: %s' % self._history[-1])
def receive(self, cmd=None):
recv = BytesIO()
handled = False
while True:
data = self.shell.recv(200)
recv.write(data)
recv.seek(recv.tell() - len(data))
window = self.strip(recv.read().decode('utf8'))
if hasattr(cmd, 'prompt') and not handled:
handled = self.handle_prompt(window, cmd)
try:
if self.find_prompt(window):
resp = self.strip(recv.getvalue().decode('utf8'))
return self.sanitize(cmd, resp)
except ShellError:
exc = get_exception()
exc.command = cmd
raise
def send(self, commands):
responses = list()
try:
for command in to_list(commands):
signal.alarm(self._timeout)
self._history.append(str(command))
cmd = '%s\r' % str(command)
self.shell.sendall(cmd)
if self._timeout == 0:
return
responses.append(self.receive(command))
except socket.timeout:
raise ShellError("timeout trying to send command: %s" % cmd)
except socket.error:
exc = get_exception()
raise ShellError("problem sending command to host: %s" % to_native(exc))
return responses
def close(self):
self.shell.close()
def handle_prompt(self, resp, cmd):
prompt = to_list(cmd.prompt)
response = to_list(cmd.response)
for pr, ans in zip(prompt, response):
match = pr.search(resp)
if match:
answer = '%s\r' % ans
self.shell.sendall(answer)
return True
def sanitize(self, cmd, resp):
cleaned = []
for line in resp.splitlines():
if line.lstrip().startswith(str(cmd)) or self.find_prompt(line):
continue
cleaned.append(line)
return "\n".join(cleaned)
def find_prompt(self, response):
for regex in self.errors:
if regex.search(response):
raise ShellError('matched error in response: %s' % response)
for regex in self.prompts:
match = regex.search(response)
if match:
self._matched_prompt = match.group()
return True
class CliBase(object):
"""Basic paramiko-based ssh transport any NetworkModule can use."""
def __init__(self):
if not HAS_PARAMIKO:
raise NetworkError(
msg='paramiko is required but does not appear to be installed. '
'It can be installed using `pip install paramiko`'
)
self.shell = None
self._connected = False
self.default_output = 'text'
def connect(self, params, kickstart=True):
host = params['host']
port = params.get('port') or 22
username = params['username']
password = params.get('password')
key_file = params.get('ssh_keyfile')
timeout = params['timeout']
try:
self.shell = Shell(
kickstart=kickstart,
prompts_re=self.CLI_PROMPTS_RE,
errors_re=self.CLI_ERRORS_RE,
timeout=timeout
)
self.shell.open(host, port=port, username=username,
password=password, key_filename=key_file)
except ShellError:
exc = get_exception()
raise NetworkError(msg='failed to connect to %s:%s' % (host, port),
exc=to_native(exc))
self._connected = True
def disconnect(self):
self.shell.close()
self._connected = False
def authorize(self, params, **kwargs):
pass
def execute(self, commands):
try:
return self.shell.send(commands)
except ShellError:
exc = get_exception()
commands = [str(c) for c in commands]
raise NetworkError(to_native(exc), commands=commands)
def run_commands(self, commands):
return self.execute(to_list(commands))
def configure(self, commands):
raise NotImplementedError
def get_config(self, **kwargs):
raise NotImplementedError
def load_config(self, commands, **kwargs):
raise NotImplementedError
def save_config(self):
raise NotImplementedError
| gpl-3.0 |
TimYi/django | tests/model_package/tests.py | 380 | 2668 | from __future__ import unicode_literals
from django.db import connection, models
from django.db.backends.utils import truncate_name
from django.test import TestCase
from .models.article import Article, Site
from .models.publication import Publication
class Advertisement(models.Model):
customer = models.CharField(max_length=100)
publications = models.ManyToManyField("model_package.Publication", blank=True)
class ModelPackageTests(TestCase):
def test_m2m_tables_in_subpackage_models(self):
"""
Regression for #12168: models split into subpackages still get M2M
tables.
"""
p = Publication.objects.create(title="FooBar")
site = Site.objects.create(name="example.com")
a = Article.objects.create(headline="a foo headline")
a.publications.add(p)
a.sites.add(site)
a = Article.objects.get(id=a.pk)
self.assertEqual(a.id, a.pk)
self.assertEqual(a.sites.count(), 1)
def test_models_in_the_test_package(self):
"""
Regression for #12245 - Models can exist in the test package, too.
"""
p = Publication.objects.create(title="FooBar")
ad = Advertisement.objects.create(customer="Lawrence Journal-World")
ad.publications.add(p)
ad = Advertisement.objects.get(id=ad.pk)
self.assertEqual(ad.publications.count(), 1)
def test_automatic_m2m_column_names(self):
"""
Regression for #12386 - field names on the autogenerated intermediate
class that are specified as dotted strings don't retain any path
component for the field or column name.
"""
self.assertEqual(
Article.publications.through._meta.fields[1].name, 'article'
)
self.assertEqual(
Article.publications.through._meta.fields[1].get_attname_column(),
('article_id', 'article_id')
)
self.assertEqual(
Article.publications.through._meta.fields[2].name, 'publication'
)
self.assertEqual(
Article.publications.through._meta.fields[2].get_attname_column(),
('publication_id', 'publication_id')
)
self.assertEqual(
Article._meta.get_field('publications').m2m_db_table(),
truncate_name('model_package_article_publications', connection.ops.max_name_length()),
)
self.assertEqual(
Article._meta.get_field('publications').m2m_column_name(), 'article_id'
)
self.assertEqual(
Article._meta.get_field('publications').m2m_reverse_name(),
'publication_id'
)
| bsd-3-clause |
epuzanov/ZenPacks.community.PgSQLMon | setup.py | 1 | 2669 | ################################
# These variables are overwritten by Zenoss when the ZenPack is exported
# or saved. Do not modify them directly here.
# NB: PACKAGES is deprecated
NAME = "ZenPacks.community.PgSQLMon"
VERSION = "2.6"
AUTHOR = "Egor Puzanov"
LICENSE = "GPLv2"
NAMESPACE_PACKAGES = ['ZenPacks', 'ZenPacks.community']
PACKAGES = ['ZenPacks', 'ZenPacks.community', 'ZenPacks.community.PgSQLMon']
INSTALL_REQUIRES = ['ZenPacks.community.RDBMS>=2.3', 'ZenPacks.community.SQLDataSource>=2.0']
COMPAT_ZENOSS_VERS = ">=2.5"
PREV_ZENPACK_NAME = ""
# STOP_REPLACEMENTS
################################
# Zenoss will not overwrite any changes you make below here.
from setuptools import setup, find_packages
setup(
# This ZenPack metadata should usually be edited with the Zenoss
# ZenPack edit page. Whenever the edit page is submitted it will
# overwrite the values below (the ones it knows about) with new values.
name = NAME,
version = VERSION,
author = AUTHOR,
license = LICENSE,
# This is the version spec which indicates what versions of Zenoss
# this ZenPack is compatible with
compatZenossVers = COMPAT_ZENOSS_VERS,
# previousZenPackName is a facility for telling Zenoss that the name
# of this ZenPack has changed. If no ZenPack with the current name is
# installed then a zenpack of this name if installed will be upgraded.
prevZenPackName = PREV_ZENPACK_NAME,
# Indicate to setuptools which namespace packages the zenpack
# participates in
namespace_packages = NAMESPACE_PACKAGES,
# Tell setuptools what packages this zenpack provides.
packages = find_packages(),
# Tell setuptools to figure out for itself which files to include
# in the binary egg when it is built.
include_package_data = True,
# The MANIFEST.in file is the recommended way of including additional files
# in your ZenPack. package_data is another.
#package_data = {}
# Indicate dependencies on other python modules or ZenPacks. This line
# is modified by zenoss when the ZenPack edit page is submitted. Zenoss
# tries to put add/delete the names it manages at the beginning of this
# list, so any manual additions should be added to the end. Things will
# go poorly if this line is broken into multiple lines or modified to
# dramatically.
install_requires = INSTALL_REQUIRES,
# Every ZenPack egg must define exactly one zenoss.zenpacks entry point
# of this form.
entry_points = {
'zenoss.zenpacks': '%s = %s' % (NAME, NAME),
},
# All ZenPack eggs must be installed in unzipped form.
zip_safe = False,
)
| gpl-2.0 |
asiersarasua/QGIS | python/plugins/processing/algs/qgis/FieldPyculator.py | 5 | 8323 | # -*- coding: utf-8 -*-
"""
***************************************************************************
FieldPyculator.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya & NextGIS'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya & NextGIS'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import sys
from qgis.PyQt.QtCore import QVariant
from qgis.core import (QgsProcessingException,
QgsField,
QgsFeatureSink,
QgsProcessing,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterString,
QgsProcessingParameterEnum,
QgsProcessingParameterNumber,
QgsProcessingParameterFeatureSink)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
class FieldsPyculator(QgisAlgorithm):
INPUT = 'INPUT'
FIELD_NAME = 'FIELD_NAME'
FIELD_TYPE = 'FIELD_TYPE'
FIELD_LENGTH = 'FIELD_LENGTH'
FIELD_PRECISION = 'FIELD_PRECISION'
GLOBAL = 'GLOBAL'
FORMULA = 'FORMULA'
OUTPUT = 'OUTPUT'
RESULT_VAR_NAME = 'value'
TYPES = [QVariant.LongLong, QVariant.Double, QVariant.String]
def group(self):
return self.tr('Vector table')
def groupId(self):
return 'vectortable'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.type_names = [self.tr('Integer'),
self.tr('Float'),
self.tr('String')]
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT, self.tr('Input layer'),
types=[QgsProcessing.TypeVector]))
self.addParameter(QgsProcessingParameterString(self.FIELD_NAME,
self.tr('Result field name'), defaultValue='NewField'))
self.addParameter(QgsProcessingParameterEnum(self.FIELD_TYPE,
self.tr('Field type'), options=self.type_names))
self.addParameter(QgsProcessingParameterNumber(self.FIELD_LENGTH,
self.tr('Field length'), minValue=0,
defaultValue=10))
self.addParameter(QgsProcessingParameterNumber(self.FIELD_PRECISION,
self.tr('Field precision'), minValue=0, maxValue=15,
defaultValue=3))
self.addParameter(QgsProcessingParameterString(self.GLOBAL,
self.tr('Global expression'), multiLine=True, optional=True))
self.addParameter(QgsProcessingParameterString(self.FORMULA,
self.tr('Formula'), defaultValue='value = ', multiLine=True))
self.addParameter(QgsProcessingParameterFeatureSink(self.OUTPUT,
self.tr('Calculated')))
def name(self):
return 'advancedpythonfieldcalculator'
def displayName(self):
return self.tr('Advanced Python field calculator')
def processAlgorithm(self, parameters, context, feedback):
source = self.parameterAsSource(parameters, self.INPUT, context)
if source is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT))
field_name = self.parameterAsString(parameters, self.FIELD_NAME, context)
field_type = self.TYPES[self.parameterAsEnum(parameters, self.FIELD_TYPE, context)]
width = self.parameterAsInt(parameters, self.FIELD_LENGTH, context)
precision = self.parameterAsInt(parameters, self.FIELD_PRECISION, context)
code = self.parameterAsString(parameters, self.FORMULA, context)
globalExpression = self.parameterAsString(parameters, self.GLOBAL, context)
fields = source.fields()
field = QgsField(field_name, field_type, '', width, precision)
fields.append(field)
new_ns = {}
(sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context,
fields, source.wkbType(), source.sourceCrs())
if sink is None:
raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT))
# Run global code
if globalExpression.strip() != '':
try:
bytecode = compile(globalExpression, '<string>', 'exec')
exec(bytecode, new_ns)
except:
raise QgsProcessingException(
self.tr("FieldPyculator code execute error.Global code block can't be executed!\n{0}\n{1}").format(
str(sys.exc_info()[0].__name__), str(sys.exc_info()[1])))
# Replace all fields tags
fields = source.fields()
num = 0
for field in fields:
field_name = str(field.name())
replval = '__attr[' + str(num) + ']'
code = code.replace('<' + field_name + '>', replval)
num += 1
# Replace all special vars
code = code.replace('$id', '__id')
code = code.replace('$geom', '__geom')
need_id = code.find('__id') != -1
need_geom = code.find('__geom') != -1
need_attrs = code.find('__attr') != -1
# Compile
try:
bytecode = compile(code, '<string>', 'exec')
except:
raise QgsProcessingException(
self.tr("FieldPyculator code execute error. Field code block can't be executed!\n{0}\n{1}").format(
str(sys.exc_info()[0].__name__), str(sys.exc_info()[1])))
# Run
features = source.getFeatures()
total = 100.0 / source.featureCount() if source.featureCount() else 0
for current, feat in enumerate(features):
if feedback.isCanceled():
break
feedback.setProgress(int(current * total))
attrs = feat.attributes()
feat_id = feat.id()
# Add needed vars
if need_id:
new_ns['__id'] = feat_id
if need_geom:
geom = feat.geometry()
new_ns['__geom'] = geom
if need_attrs:
pyattrs = [a for a in attrs]
new_ns['__attr'] = pyattrs
# Clear old result
if self.RESULT_VAR_NAME in new_ns:
del new_ns[self.RESULT_VAR_NAME]
# Exec
exec(bytecode, new_ns)
# Check result
if self.RESULT_VAR_NAME not in new_ns:
raise QgsProcessingException(
self.tr("FieldPyculator code execute error\n"
"Field code block does not return '{0}' variable! "
"Please declare this variable in your code!").format(self.RESULT_VAR_NAME))
# Write feature
attrs.append(new_ns[self.RESULT_VAR_NAME])
feat.setAttributes(attrs)
sink.addFeature(feat, QgsFeatureSink.FastInsert)
return {self.OUTPUT: dest_id}
def checkParameterValues(self, parameters, context):
# TODO check that formula is correct and fields exist
return super(FieldsPyculator, self).checkParameterValues(parameters, context)
| gpl-2.0 |
texastribune/tx_salaries | tx_salaries/utils/transformers/texas_tech_university.py | 1 | 2223 | from datetime import date
from . import base
from . import mixins
class TransformedRecord(mixins.GenericCompensationMixin,
mixins.GenericDepartmentMixin,
mixins.GenericJobTitleMixin,
mixins.MembershipMixin,
mixins.OrganizationMixin,
mixins.PostMixin,
mixins.RaceMixin,
mixins.LinkMixin,
base.BaseTransformedRecord):
MAP = {
'department': 'Department',
'name': 'Name',
'job_title': 'Title',
'last_name': 'Last Name',
'hire_date': 'Hire Date',
'pay_status': 'FT or PT Status',
'compensation': 'Salary',
'race': 'Race',
'gender': 'Gender',
}
gender_map = {'Female': 'F', 'Femail': 'F', 'Male': 'M'}
NAME_FIELDS = ('name', )
ORGANIZATION_NAME = 'Texas Tech University'
ORGANIZATION_CLASSIFICATION = 'University'
# The data we get for Texas Tech System is always valid
is_valid = True
# All employees are full-time right now
compensation_type = 'Full Time'
DATE_PROVIDED = date(2013, 7, 31)
URL = 'http://raw.texastribune.org.s3.amazonaws.com/texas_tech_university/salaries/2013-07/Faculty%20Open%20Records%20-%20Tribune.xlsx'
@property
def identifier(self):
"""
Identifier by Texas Tech Systems
Ignore everything but name/gender. We have not found any
duplicate name gender records (yet), and should not as TT
includes middle initials.
"""
excluded = [self.race_key, self.department_key, self.job_title_key,
self.hire_date_key, self.compensation_key]
return {
'scheme': 'tx_salaries_hash',
'identifier': base.create_hash_for_record(self.data,
exclude=excluded)
}
@property
def person(self):
name = self.get_name()
return {
'family_name': name.last,
'given_name': name.first,
'additional_name': name.middle,
'name': unicode(name),
'gender': self.gender_map[self.gender]
}
transform = base.transform_factory(record_class=TransformedRecord,
transform_func=base.generic_merge_cell_transform)
| apache-2.0 |
gco/rietveld | codereview/patiencediff.py | 28 | 3535 | # Copyright (C) 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import difflib
class PseudoPatienceSequenceMatcher(difflib.SequenceMatcher):
"""Provides a SequenceMatcher that prefers longer "first" matches to longer
"second" matches.
"""
def get_matching_blocks(self):
"""Returns list of triples describing matching subsequences.
Each triple is of the form (i, j, n), and means that a[i:i+n] == b[j:j+n].
The triples are monotonically increasing in i and j.
The last triple is a dummy, and has the value (len(a), len(b), 0). It is the
only triple with n == 0. If (i, j, n) and (i', j', n') are adjacent triples
in the list, and the second is not the last triple in the list, then
i+n != i' or j+n != j'; in other words, adjacent triples always describe
non-adjacent equal blocks.
"""
matches = difflib.SequenceMatcher.get_matching_blocks(self)
# Make sure all elements are of type difflib.Match.
for index in xrange(len(matches)):
if not isinstance(matches[index], difflib.Match):
matches[index] = difflib.Match(matches[index][0],
matches[index][1],
matches[index][2])
# Check if there's a match at the beginning of the current region, and
# insert a new Match object at the beginning of |matches| if necessary.
if matches[0].a != matches[0].b:
match_length = 0
is_a = matches[0].a < matches[0].b
index = matches[0].a if is_a else matches[0].b
while (index + match_length < len(self.a) and
index + match_length < len(self.b) and
self.a[index + match_length] == self.b[index + match_length]):
match_length += 1
if match_length:
matches[0] = difflib.Match(
(index if is_a else matches[0].a) + match_length,
(matches[0].b if is_a else index) + match_length,
matches[0].size - match_length)
if matches[0].size == 0:
matches[0] = difflib.Match(index, index, match_length)
else:
matches.insert(0, difflib.Match(index, index, match_length))
if len(matches) < 2:
return matches
# For all pairs of Match objects, prefer a longer |first| Match if the end
# of the first match is the same as the beginning of the second match.
for index in xrange(len(matches) - 2):
first = matches[index]
second = matches[index + 1]
while True:
if (first.a + first.size < len(self.a) and
first.b + first.size < len(self.b) and
second.a < len(self.a) and second.b < len(self.b) and
self.a[first.a + first.size] == self.b[first.b + first.size] and
self.a[second.a] == self.b[second.b]):
first = difflib.Match(first.a, first.b, first.size + 1)
second = difflib.Match(second.a + 1, second.b + 1, second.size - 1)
else:
break
matches[index] = first
matches[index + 1] = second
return matches
| apache-2.0 |
grilo/ansible-1 | lib/ansible/modules/cloud/pubnub/pubnub_blocks.py | 84 | 23806 | #!/usr/bin/python
#
# PubNub Real-time Cloud-Hosted Push API and Push Notification Client
# Frameworks
# Copyright (C) 2016 PubNub Inc.
# http://www.pubnub.com/
# http://www.pubnub.com/terms
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pubnub_blocks
version_added: '2.2'
short_description: PubNub blocks management module.
description:
- "This module allows Ansible to interface with the PubNub BLOCKS
infrastructure by providing the following operations: create / remove,
start / stop and rename for blocks and create / modify / remove for event
handlers"
author:
- PubNub <support@pubnub.com> (@pubnub)
- Sergey Mamontov <sergey@pubnub.com> (@parfeon)
requirements:
- "python >= 2.7"
- "pubnub_blocks_client >= 1.0"
options:
email:
description:
- Email from account for which new session should be started.
- "Not required if C(cache) contains result of previous module call (in
same play)."
required: false
password:
description:
- Password which match to account to which specified C(email) belong.
- "Not required if C(cache) contains result of previous module call (in
same play)."
required: false
cache:
description: >
In case if single play use blocks management module few times it is
preferred to enabled 'caching' by making previous module to share
gathered artifacts and pass them to this parameter.
required: false
default: {}
account:
description:
- "Name of PubNub account for from which C(application) will be used to
manage blocks."
- "User\'s account will be used if value not set or empty."
required: false
version_added: '2.4'
application:
description:
- "Name of target PubNub application for which blocks configuration on
specific C(keyset) will be done."
required: true
keyset:
description:
- Name of application's keys set which is bound to managed blocks.
required: true
state:
description:
- "Intended block state after event handlers creation / update process
will be completed."
required: false
default: 'started'
choices: ['started', 'stopped', 'present', 'absent']
name:
description:
- Name of managed block which will be later visible on admin.pubnub.com.
required: true
description:
description:
- "Short block description which will be later visible on
admin.pubnub.com. Used only if block doesn\'t exists and won\'t change
description for existing block."
required: false
default: 'New block'
event_handlers:
description:
- "List of event handlers which should be updated for specified block
C(name)."
- "Each entry for new event handler should contain: C(name), C(src),
C(channels), C(event). C(name) used as event handler name which can be
used later to make changes to it."
- C(src) is full path to file with event handler code.
- "C(channels) is name of channel from which event handler is waiting
for events."
- "C(event) is type of event which is able to trigger event handler:
I(js-before-publish), I(js-after-publish), I(js-after-presence)."
- "Each entry for existing handlers should contain C(name) (so target
handler can be identified). Rest parameters (C(src), C(channels) and
C(event)) can be added if changes required for them."
- "It is possible to rename event handler by adding C(changes) key to
event handler payload and pass dictionary, which will contain single key
C(name), where new name should be passed."
- "To remove particular event handler it is possible to set C(state) for
it to C(absent) and it will be removed."
required: false
default: []
changes:
description:
- "List of fields which should be changed by block itself (doesn't
affect any event handlers)."
- "Possible options for change is: C(name)."
required: false
default: {}
validate_certs:
description:
- "This key allow to try skip certificates check when performing REST API
calls. Sometimes host may have issues with certificates on it and this
will cause problems to call PubNub REST API."
- If check should be ignored C(False) should be passed to this parameter.
required: false
default: true
'''
EXAMPLES = '''
# Event handler create example.
- name: Create single event handler
pubnub_blocks:
email: '{{ email }}'
password: '{{ password }}'
application: '{{ app_name }}'
keyset: '{{ keyset_name }}'
name: '{{ block_name }}'
event_handlers:
-
src: '{{ path_to_handler_source }}'
name: '{{ handler_name }}'
event: 'js-before-publish'
channels: '{{ handler_channel }}'
# Change event handler trigger event type.
- name: Change event handler 'event'
pubnub_blocks:
email: '{{ email }}'
password: '{{ password }}'
application: '{{ app_name }}'
keyset: '{{ keyset_name }}'
name: '{{ block_name }}'
event_handlers:
-
name: '{{ handler_name }}'
event: 'js-after-publish'
# Stop block and event handlers.
- name: Stopping block
pubnub_blocks:
email: '{{ email }}'
password: '{{ password }}'
application: '{{ app_name }}'
keyset: '{{ keyset_name }}'
name: '{{ block_name }}'
state: stop
# Multiple module calls with cached result passing
- name: Create '{{ block_name }}' block
register: module_cache
pubnub_blocks:
email: '{{ email }}'
password: '{{ password }}'
application: '{{ app_name }}'
keyset: '{{ keyset_name }}'
name: '{{ block_name }}'
state: present
- name: Add '{{ event_handler_1_name }}' handler to '{{ block_name }}'
register: module_cache
pubnub_blocks:
cache: '{{ module_cache }}'
application: '{{ app_name }}'
keyset: '{{ keyset_name }}'
name: '{{ block_name }}'
state: present
event_handlers:
-
src: '{{ path_to_handler_1_source }}'
name: '{{ event_handler_1_name }}'
channels: '{{ event_handler_1_channel }}'
event: 'js-before-publish'
- name: Add '{{ event_handler_2_name }}' handler to '{{ block_name }}'
register: module_cache
pubnub_blocks:
cache: '{{ module_cache }}'
application: '{{ app_name }}'
keyset: '{{ keyset_name }}'
name: '{{ block_name }}'
state: present
event_handlers:
-
src: '{{ path_to_handler_2_source }}'
name: '{{ event_handler_2_name }}'
channels: '{{ event_handler_2_channel }}'
event: 'js-before-publish'
- name: Start '{{ block_name }}' block
register: module_cache
pubnub_blocks:
cache: '{{ module_cache }}'
application: '{{ app_name }}'
keyset: '{{ keyset_name }}'
name: '{{ block_name }}'
state: started
'''
RETURN = '''
module_cache:
description: "Cached account information. In case if with single play module
used few times it is better to pass cached data to next module calls to speed
up process."
type: dict
returned: always
'''
import copy
import os
try:
# Import PubNub BLOCKS client.
from pubnub_blocks_client import User, Account, Owner, Application, Keyset
from pubnub_blocks_client import Block, EventHandler
import pubnub_blocks_client.exceptions as exceptions
HAS_PUBNUB_BLOCKS_CLIENT = True
except ImportError:
HAS_PUBNUB_BLOCKS_CLIENT = False
User = None
Account = None
Owner = None
Application = None
Keyset = None
Block = None
EventHandler = None
exceptions = None
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_text
def pubnub_user(module):
"""Create and configure user model if it possible.
:type module: AnsibleModule
:param module: Reference on module which contain module launch
information and status report methods.
:rtype: User
:return: Reference on initialized and ready to use user or 'None' in
case if not all required information has been passed to block.
"""
user = None
params = module.params
if params.get('cache') and params['cache'].get('module_cache'):
cache = params['cache']['module_cache']
user = User()
user.restore(cache=copy.deepcopy(cache['pnm_user']))
elif params.get('email') and params.get('password'):
user = User(email=params.get('email'), password=params.get('password'))
else:
err_msg = 'It looks like not account credentials has been passed or ' \
'\'cache\' field doesn\'t have result of previous module ' \
'call.'
module.fail_json(msg='Missing account credentials.',
description=err_msg, changed=False)
return user
def pubnub_account(module, user):
"""Create and configure account if it is possible.
:type module: AnsibleModule
:param module: Reference on module which contain module launch
information and status report methods.
:type user: User
:param user: Reference on authorized user for which one of accounts
should be used during manipulations with block.
:rtype: Account
:return: Reference on initialized and ready to use account or 'None' in
case if not all required information has been passed to block.
"""
params = module.params
if params.get('account'):
account_name = params.get('account')
account = user.account(name=params.get('account'))
if account is None:
err_frmt = 'It looks like there is no \'{0}\' account for ' \
'authorized user. Please make sure what correct ' \
'name has been passed during module configuration.'
module.fail_json(msg='Missing account.',
description=err_frmt.format(account_name),
changed=False)
else:
account = user.accounts()[0]
return account
def pubnub_application(module, account):
"""Retrieve reference on target application from account model.
NOTE: In case if account authorization will fail or there is no
application with specified name, module will exit with error.
:type module: AnsibleModule
:param module: Reference on module which contain module launch
information and status report methods.
:type account: Account
:param account: Reference on PubNub account model from which reference
on application should be fetched.
:rtype: Application
:return: Reference on initialized and ready to use application model.
"""
application = None
params = module.params
try:
application = account.application(params['application'])
except (exceptions.AccountError, exceptions.GeneralPubNubError) as exc:
exc_msg = _failure_title_from_exception(exc)
exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0]
module.fail_json(msg=exc_msg, description=exc_descr,
changed=account.changed,
module_cache=dict(account))
if application is None:
err_fmt = 'There is no \'{0}\' application for {1}. Make sure what ' \
'correct application name has been passed. If application ' \
'doesn\'t exist you can create it on admin.pubnub.com.'
email = account.owner.email
module.fail_json(msg=err_fmt.format(params['application'], email),
changed=account.changed, module_cache=dict(account))
return application
def pubnub_keyset(module, account, application):
"""Retrieve reference on target keyset from application model.
NOTE: In case if there is no keyset with specified name, module will
exit with error.
:type module: AnsibleModule
:param module: Reference on module which contain module launch
information and status report methods.
:type account: Account
:param account: Reference on PubNub account model which will be
used in case of error to export cached data.
:type application: Application
:param application: Reference on PubNub application model from which
reference on keyset should be fetched.
:rtype: Keyset
:return: Reference on initialized and ready to use keyset model.
"""
params = module.params
keyset = application.keyset(params['keyset'])
if keyset is None:
err_fmt = 'There is no \'{0}\' keyset for \'{1}\' application. Make ' \
'sure what correct keyset name has been passed. If keyset ' \
'doesn\'t exist you can create it on admin.pubnub.com.'
module.fail_json(msg=err_fmt.format(params['keyset'],
application.name),
changed=account.changed, module_cache=dict(account))
return keyset
def pubnub_block(module, account, keyset):
"""Retrieve reference on target keyset from application model.
NOTE: In case if there is no block with specified name and module
configured to start/stop it, module will exit with error.
:type module: AnsibleModule
:param module: Reference on module which contain module launch
information and status report methods.
:type account: Account
:param account: Reference on PubNub account model which will be used in
case of error to export cached data.
:type keyset: Keyset
:param keyset: Reference on keyset model from which reference on block
should be fetched.
:rtype: Block
:return: Reference on initialized and ready to use keyset model.
"""
block = None
params = module.params
try:
block = keyset.block(params['name'])
except (exceptions.KeysetError, exceptions.GeneralPubNubError) as exc:
exc_msg = _failure_title_from_exception(exc)
exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0]
module.fail_json(msg=exc_msg, description=exc_descr,
changed=account.changed, module_cache=dict(account))
# Report error because block doesn't exists and at the same time
# requested to start/stop.
if block is None and params['state'] in ['started', 'stopped']:
block_name = params.get('name')
module.fail_json(msg="'{0}' block doesn't exists.".format(block_name),
changed=account.changed, module_cache=dict(account))
if block is None and params['state'] == 'present':
block = Block(name=params.get('name'),
description=params.get('description'))
keyset.add_block(block)
if block:
# Update block information if required.
if params.get('changes') and params['changes'].get('name'):
block.name = params['changes']['name']
if params.get('description'):
block.description = params.get('description')
return block
def pubnub_event_handler(block, data):
"""Retrieve reference on target event handler from application model.
:type block: Block
:param block: Reference on block model from which reference on event
handlers should be fetched.
:type data: dict
:param data: Reference on dictionary which contain information about
event handler and whether it should be created or not.
:rtype: EventHandler
:return: Reference on initialized and ready to use event handler model.
'None' will be returned in case if there is no handler with
specified name and no request to create it.
"""
event_handler = block.event_handler(data['name'])
# Prepare payload for event handler update.
changed_name = (data.pop('changes').get('name')
if 'changes' in data else None)
name = data.get('name') or changed_name
channels = data.get('channels')
event = data.get('event')
code = _content_of_file_at_path(data.get('src'))
state = data.get('state') or 'present'
# Create event handler if required.
if event_handler is None and state == 'present':
event_handler = EventHandler(name=name, channels=channels, event=event,
code=code)
block.add_event_handler(event_handler)
# Update event handler if required.
if event_handler is not None and state == 'present':
if name is not None:
event_handler.name = name
if channels is not None:
event_handler.channels = channels
if event is not None:
event_handler.event = event
if code is not None:
event_handler.code = code
return event_handler
def _failure_title_from_exception(exception):
"""Compose human-readable title for module error title.
Title will be based on status codes if they has been provided.
:type exception: exceptions.GeneralPubNubError
:param exception: Reference on exception for which title should be
composed.
:rtype: str
:return: Reference on error tile which should be shown on module
failure.
"""
title = 'General REST API access error.'
if exception.code == exceptions.PN_AUTHORIZATION_MISSING_CREDENTIALS:
title = 'Authorization error: missing credentials.'
elif exception.code == exceptions.PN_AUTHORIZATION_WRONG_CREDENTIALS:
title = 'Authorization error: wrong credentials.'
elif exception.code == exceptions.PN_USER_INSUFFICIENT_RIGHTS:
title = 'API access error: insufficient access rights.'
elif exception.code == exceptions.PN_API_ACCESS_TOKEN_EXPIRED:
title = 'API access error: time token expired.'
elif exception.code == exceptions.PN_KEYSET_BLOCK_EXISTS:
title = 'Block create did fail: block with same name already exists).'
elif exception.code == exceptions.PN_KEYSET_BLOCKS_FETCH_DID_FAIL:
title = 'Unable fetch list of blocks for keyset.'
elif exception.code == exceptions.PN_BLOCK_CREATE_DID_FAIL:
title = 'Block creation did fail.'
elif exception.code == exceptions.PN_BLOCK_UPDATE_DID_FAIL:
title = 'Block update did fail.'
elif exception.code == exceptions.PN_BLOCK_REMOVE_DID_FAIL:
title = 'Block removal did fail.'
elif exception.code == exceptions.PN_BLOCK_START_STOP_DID_FAIL:
title = 'Block start/stop did fail.'
elif exception.code == exceptions.PN_EVENT_HANDLER_MISSING_FIELDS:
title = 'Event handler creation did fail: missing fields.'
elif exception.code == exceptions.PN_BLOCK_EVENT_HANDLER_EXISTS:
title = 'Event handler creation did fail: missing fields.'
elif exception.code == exceptions.PN_EVENT_HANDLER_CREATE_DID_FAIL:
title = 'Event handler creation did fail.'
elif exception.code == exceptions.PN_EVENT_HANDLER_UPDATE_DID_FAIL:
title = 'Event handler update did fail.'
elif exception.code == exceptions.PN_EVENT_HANDLER_REMOVE_DID_FAIL:
title = 'Event handler removal did fail.'
return title
def _content_of_file_at_path(path):
"""Read file content.
Try read content of file at specified path.
:type path: str
:param path: Full path to location of file which should be read'ed.
:rtype: content
:return: File content or 'None'
"""
content = None
if path and os.path.exists(path):
with open(path, mode="rt") as opened_file:
b_content = opened_file.read()
try:
content = to_text(b_content, errors='surrogate_or_strict')
except UnicodeError:
pass
return content
def main():
fields = dict(
email=dict(default='', required=False, type='str'),
password=dict(default='', required=False, type='str', no_log=True),
account=dict(default='', required=False, type='str'),
application=dict(required=True, type='str'),
keyset=dict(required=True, type='str'),
state=dict(default='present', type='str',
choices=['started', 'stopped', 'present', 'absent']),
name=dict(required=True, type='str'), description=dict(type='str'),
event_handlers=dict(default=list(), type='list'),
changes=dict(default=dict(), type='dict'),
cache=dict(default=dict(), type='dict'),
validate_certs=dict(default=True, type='bool'))
module = AnsibleModule(argument_spec=fields, supports_check_mode=True)
if not HAS_PUBNUB_BLOCKS_CLIENT:
module.fail_json(msg='pubnub_blocks_client required for this module.')
params = module.params
# Authorize user.
user = pubnub_user(module)
# Initialize PubNub account instance.
account = pubnub_account(module, user=user)
# Try fetch application with which module should work.
application = pubnub_application(module, account=account)
# Try fetch keyset with which module should work.
keyset = pubnub_keyset(module, account=account, application=application)
# Try fetch block with which module should work.
block = pubnub_block(module, account=account, keyset=keyset)
is_new_block = block is not None and block.uid == -1
# Check whether block should be removed or not.
if block is not None and params['state'] == 'absent':
keyset.remove_block(block)
block = None
if block is not None:
# Update block information if required.
if params.get('changes') and params['changes'].get('name'):
block.name = params['changes']['name']
# Process event changes to event handlers.
for event_handler_data in params.get('event_handlers') or list():
state = event_handler_data.get('state') or 'present'
event_handler = pubnub_event_handler(data=event_handler_data,
block=block)
if state == 'absent' and event_handler:
block.delete_event_handler(event_handler)
# Update block operation state if required.
if block and not is_new_block:
if params['state'] == 'started':
block.start()
elif params['state'] == 'stopped':
block.stop()
# Save current account state.
if not module.check_mode:
try:
account.save()
except (exceptions.APIAccessError, exceptions.KeysetError,
exceptions.BlockError, exceptions.EventHandlerError,
exceptions.GeneralPubNubError) as exc:
module_cache = dict(account)
module_cache.update(dict(pnm_user=dict(user)))
exc_msg = _failure_title_from_exception(exc)
exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0]
module.fail_json(msg=exc_msg, description=exc_descr,
changed=account.changed,
module_cache=module_cache)
# Report module execution results.
module_cache = dict(account)
module_cache.update(dict(pnm_user=dict(user)))
changed_will_change = account.changed or account.will_change
module.exit_json(changed=changed_will_change, module_cache=module_cache)
if __name__ == '__main__':
main()
| gpl-3.0 |
selboo/starl-mangle | ajaxterm2/ajaxterm2.py | 1 | 21163 | #!/usr/bin/env python
#
# ajaxterm2 - web-based terminal
#
# Copyright (C) 2013 Akiri Solutions, Inc. : http://www.akirisolutions.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ajaxterm: Copyright (C) Antony Lesuisse: lesuisse AT gmail.com
#
import array
import cgi
import fcntl
import os
import pty
import re
import signal
import select
import sys
import threading
import time
import termios
import struct
import httplib2
import logging
from ConfigParser import SafeConfigParser
import webob
class Terminal:
def __init__(self,width=80,height=24):
self.width=width
self.height=height
self.init()
self.reset()
def init(self):
self.esc_seq={
"\x00": None,
"\x05": self.esc_da,
"\x07": None,
"\x08": self.esc_0x08,
"\x09": self.esc_0x09,
"\x0a": self.esc_0x0a,
"\x0b": self.esc_0x0a,
"\x0c": self.esc_0x0a,
"\x0d": self.esc_0x0d,
"\x0e": None,
"\x0f": None,
"\x1b#8": None,
"\x1b=": None,
"\x1b>": None,
"\x1b(0": None,
"\x1b(A": None,
"\x1b(B": None,
"\x1b[c": self.esc_da,
"\x1b[0c": self.esc_da,
"\x1b]R": None,
"\x1b7": self.esc_save,
"\x1b8": self.esc_restore,
"\x1bD": None,
"\x1bE": None,
"\x1bH": None,
"\x1bM": self.esc_ri,
"\x1bN": None,
"\x1bO": None,
"\x1bZ": self.esc_da,
"\x1ba": None,
"\x1bc": self.reset,
"\x1bn": None,
"\x1bo": None,
}
for k,v in self.esc_seq.items():
if v==None:
self.esc_seq[k]=self.esc_ignore
# regex
d={
r'\[\??([0-9;]*)([@ABCDEFGHJKLMPXacdefghlmnqrstu`])' : self.csi_dispatch,
r'\]([^\x07]+)\x07' : self.esc_ignore,
}
self.esc_re=[]
for k,v in d.items():
self.esc_re.append((re.compile('\x1b'+k),v))
# define csi sequences
self.csi_seq={
'@': (self.csi_at,[1]),
'`': (self.csi_G,[1]),
'J': (self.csi_J,[0]),
'K': (self.csi_K,[0]),
}
for i in [i[4] for i in dir(self) if i.startswith('csi_') and len(i)==5]:
if not self.csi_seq.has_key(i):
self.csi_seq[i]=(getattr(self,'csi_'+i),[1])
# Init 0-256 to latin1 and html translation table
self.trl1=""
for i in range(256):
if i<32:
self.trl1+=" "
elif i<127 or i>160:
self.trl1+=chr(i)
else:
self.trl1+="?"
self.trhtml=""
for i in range(256):
if i==0x0a or (i>32 and i<127) or i>160:
self.trhtml+=chr(i)
elif i<=32:
# self.trhtml+="\xa0" <- latin1
self.trhtml += ' ' # utf8
else:
self.trhtml+="?"
def reset(self,s=""):
self.scr = array.array('i',[0x000700]*(self.width*self.height))
self.st = 0
self.sb = self.height-1
self.cx_bak = self.cx = 0
self.cy_bak = self.cy = 0
self.cl = 0
self.sgr = 0x000700
self.buf = ""
self.outbuf = ""
self.last_html = ""
def peek(self,y1,x1,y2,x2):
return self.scr[self.width*y1+x1:self.width*y2+x2]
def poke(self,y,x,s):
pos=self.width*y+x
self.scr[pos:pos+len(s)]=s
def zero(self,y1,x1,y2,x2):
w=self.width*(y2-y1)+x2-x1+1
z=array.array('i',[0x000700]*w)
self.scr[self.width*y1+x1:self.width*y2+x2+1]=z
def scroll_up(self,y1,y2):
self.poke(y1,0,self.peek(y1+1,0,y2,self.width))
self.zero(y2,0,y2,self.width-1)
def scroll_down(self,y1,y2):
self.poke(y1+1,0,self.peek(y1,0,y2-1,self.width))
self.zero(y1,0,y1,self.width-1)
def scroll_right(self,y,x):
self.poke(y,x+1,self.peek(y,x,y,self.width))
self.zero(y,x,y,x)
def cursor_down(self):
if self.cy>=self.st and self.cy<=self.sb:
self.cl=0
q,r=divmod(self.cy+1,self.sb+1)
if q:
self.scroll_up(self.st,self.sb)
self.cy=self.sb
else:
self.cy=r
def cursor_right(self):
q,r=divmod(self.cx+1,self.width)
if q:
self.cl=1
else:
self.cx=r
def echo(self,c):
if self.cl:
self.cursor_down()
self.cx=0
self.scr[(self.cy*self.width)+self.cx]=self.sgr|ord(c)
self.cursor_right()
def esc_0x08(self,s):
self.cx=max(0,self.cx-1)
def esc_0x09(self,s):
x=self.cx+8
q,r=divmod(x,8)
self.cx=(q*8)%self.width
def esc_0x0a(self,s):
self.cursor_down()
def esc_0x0d(self,s):
self.cl=0
self.cx=0
def esc_save(self,s):
self.cx_bak=self.cx
self.cy_bak=self.cy
def esc_restore(self,s):
self.cx=self.cx_bak
self.cy=self.cy_bak
self.cl=0
def esc_da(self,s):
self.outbuf="\x1b[?6c"
def esc_ri(self,s):
self.cy=max(self.st,self.cy-1)
if self.cy==self.st:
self.scroll_down(self.st,self.sb)
def esc_ignore(self,*s):
pass
# print "term:ignore: %s"%repr(s)
def csi_dispatch(self,seq,mo):
# CSI sequences
s=mo.group(1)
c=mo.group(2)
f=self.csi_seq.get(c,None)
if f:
try:
l=[min(int(i),1024) for i in s.split(';') if len(i)<4]
except ValueError:
l=[]
if len(l)==0:
l=f[1]
f[0](l)
# else:
# print 'csi ignore',c,l
def csi_at(self,l):
for i in range(l[0]):
self.scroll_right(self.cy,self.cx)
def csi_A(self,l):
self.cy=max(self.st,self.cy-l[0])
def csi_B(self,l):
self.cy=min(self.sb,self.cy+l[0])
def csi_C(self,l):
self.cx=min(self.width-1,self.cx+l[0])
self.cl=0
def csi_D(self,l):
self.cx=max(0,self.cx-l[0])
self.cl=0
def csi_E(self,l):
self.csi_B(l)
self.cx=0
self.cl=0
def csi_F(self,l):
self.csi_A(l)
self.cx=0
self.cl=0
def csi_G(self,l):
self.cx=min(self.width,l[0])-1
def csi_H(self,l):
if len(l)<2: l=[1,1]
self.cx=min(self.width,l[1])-1
self.cy=min(self.height,l[0])-1
self.cl=0
def csi_J(self,l):
if l[0]==0:
self.zero(self.cy,self.cx,self.height-1,self.width-1)
elif l[0]==1:
self.zero(0,0,self.cy,self.cx)
elif l[0]==2:
self.zero(0,0,self.height-1,self.width-1)
def csi_K(self,l):
if l[0]==0:
self.zero(self.cy,self.cx,self.cy,self.width-1)
elif l[0]==1:
self.zero(self.cy,0,self.cy,self.cx)
elif l[0]==2:
self.zero(self.cy,0,self.cy,self.width-1)
def csi_L(self,l):
for i in range(l[0]):
if self.cy<self.sb:
self.scroll_down(self.cy,self.sb)
def csi_M(self,l):
if self.cy>=self.st and self.cy<=self.sb:
for i in range(l[0]):
self.scroll_up(self.cy,self.sb)
def csi_P(self,l):
w,cx,cy=self.width,self.cx,self.cy
end=self.peek(cy,cx,cy,w)
self.csi_K([0])
self.poke(cy,cx,end[l[0]:])
def csi_X(self,l):
self.zero(self.cy,self.cx,self.cy,self.cx+l[0])
def csi_a(self,l):
self.csi_C(l)
def csi_c(self,l):
#'\x1b[?0c' 0-8 cursor size
pass
def csi_d(self,l):
self.cy=min(self.height,l[0])-1
def csi_e(self,l):
self.csi_B(l)
def csi_f(self,l):
self.csi_H(l)
def csi_h(self,l):
if l[0]==4:
pass
# print "insert on"
def csi_l(self,l):
if l[0]==4:
pass
# print "insert off"
def csi_m(self,l):
for i in l:
if i==0 or i==39 or i==49 or i==27:
self.sgr=0x000700
elif i==1:
self.sgr=(self.sgr|0x000800)
elif i==7:
self.sgr=0x070000
elif i>=30 and i<=37:
c=i-30
self.sgr=(self.sgr&0xff08ff)|(c<<8)
elif i>=40 and i<=47:
c=i-40
self.sgr=(self.sgr&0x00ffff)|(c<<16)
# else:
# print "CSI sgr ignore",l,i
# print 'sgr: %r %x'%(l,self.sgr)
def csi_r(self,l):
if len(l)<2: l=[0,self.height]
self.st=min(self.height-1,l[0]-1)
self.sb=min(self.height-1,l[1]-1)
self.sb=max(self.st,self.sb)
def csi_s(self,l):
self.esc_save(0)
def csi_u(self,l):
self.esc_restore(0)
def escape(self):
e=self.buf
if len(e)>32:
# print "error %r"%e
self.buf=""
elif e in self.esc_seq:
self.esc_seq[e](e)
self.buf=""
else:
for r,f in self.esc_re:
mo=r.match(e)
if mo:
f(e,mo)
self.buf=""
break
# if self.buf=='': print "ESC %r\n"%e
def write(self,s):
for i in s:
if len(self.buf) or (i in self.esc_seq):
self.buf+=i
self.escape()
elif i == '\x1b':
self.buf+=i
else:
self.echo(i)
def read(self):
b=self.outbuf
self.outbuf=""
return b
def dump(self):
r=''
for i in self.scr:
r+=chr(i&255)
return r
def dumplatin1(self):
raise NotImplementedError('dumplatin1')
def dumphtml(self, color=1):
h = self.height
w = self.width
r = ""
span = ""
span_bg,span_fg = -1,-1
for i in range(h*w):
q,c = divmod(self.scr[i], 256)
if color:
bg,fg = divmod(q, 256)
else:
bg,fg = 0,7
if i == self.cy*w + self.cx:
bg,fg = 1,7
if (bg != span_bg or fg != span_fg or i == h*w-1):
if len(span):
r += '<span class="f%d b%d">%s</span>' % \
(span_fg, span_bg, cgi.escape(span.translate(self.trhtml)))
# r += '<span class="f%d b%d">%s</span>' % \
# (span_fg, span_bg, cgi.escape(span))
span = ""
span_bg,span_fg = bg,fg
span += chr(c)
if i%w == w-1:
span += '\n'
r = '<?xml version="1.0" encoding="ISO-8859-1"?><pre class="term">%s</pre>'%r
if self.last_html == r:
return '<?xml version="1.0"?><idem></idem>'
else:
self.last_html = r
# print self
return r
def __repr__(self):
print '__repr__'
d=self.dumplatin1()
r=""
for i in range(self.height):
r+="|%s|\n"%d[self.width*i:self.width*(i+1)]
return r
class SynchronizedMethod:
def __init__(self,lock,orig):
self.lock=lock
self.orig=orig
def __call__(self,*l):
self.lock.acquire()
r=self.orig(*l)
self.lock.release()
return r
class Multiplex:
def __init__(self,cmd=None):
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
self.cmd=cmd
self.proc={}
self.lock=threading.RLock()
self.thread=threading.Thread(target=self.loop)
self.alive=1
# synchronize methods
for name in ['create','fds','proc_read','proc_write','dump','die','run']:
orig=getattr(self,name)
setattr(self,name,SynchronizedMethod(self.lock,orig))
self.thread.start()
def create(self, w=80, h=25, ipad='', user='', port=''):
pid,fd=pty.fork()
if pid==0:
try:
fdl=[int(i) for i in os.listdir('/proc/self/fd')]
except OSError:
fdl=range(256)
for i in [i for i in fdl if i>2]:
try:
os.close(i)
except OSError:
pass
if self.cmd:
cmd=['/bin/sh','-c',self.cmd]
elif os.getuid()==1:
cmd=['/bin/login']
else:
#sys.stdout.write("Login: ")
#login=sys.stdin.readline().strip()
port='-p'+port
if re.match('^[0-9A-Za-z-_. ]+$',user):
cmd=['ssh']
cmd+=['-oPreferredAuthentications=keyboard-interactive,password']
cmd+=['-oNoHostAuthenticationForLocalhost=yes']
cmd+=['-oLogLevel=FATAL']
cmd+=[port]
cmd+=['-F/dev/null','-l',user,ipad]
else:
os._exit(0)
env={}
env["COLUMNS"]=str(w)
env["LINES"]=str(h)
env["TERM"]="linux"
env["PATH"]=os.environ['PATH']
os.execvpe(cmd[0],cmd,env)
else:
fcntl.fcntl(fd, fcntl.F_SETFL, os.O_NONBLOCK)
# python bug http://python.org/sf/1112949 on amd64
fcntl.ioctl(fd, struct.unpack('i',struct.pack('I',termios.TIOCSWINSZ))[0], struct.pack("HHHH",h,w,0,0))
self.proc[fd]={'pid':pid,'term':Terminal(w,h),'buf':'','time':time.time()}
return fd
def die(self):
self.alive = 0
def run(self):
return self.alive
def fds(self):
return self.proc.keys()
def proc_kill(self, fd):
if fd in self.proc:
self.proc[fd]['time']=0
t=time.time()
for i in self.proc.keys():
t0=self.proc[i]['time']
if (t-t0)>120:
try:
os.close(i)
os.kill(self.proc[i]['pid'],signal.SIGTERM)
except (IOError,OSError):
pass
del self.proc[i]
def proc_read(self, fd):
try:
t=self.proc[fd]['term']
t.write(os.read(fd,65536))
reply=t.read()
if reply:
os.write(fd,reply)
self.proc[fd]['time']=time.time()
except (KeyError,IOError,OSError):
self.proc_kill(fd)
def proc_write(self, fd, s):
try:
os.write(fd, s)
except (IOError,OSError):
self.proc_kill(fd)
def dump(self, fd, color=1):
try:
return self.proc[fd]['term'].dumphtml(color)
except KeyError:
return False
def loop(self):
while self.run():
fds=self.fds()
i,o,e=select.select(fds, [], [], 1.0)
for fd in i:
self.proc_read(fd)
if len(i):
time.sleep(0.002)
for i in self.proc.keys():
try:
os.close(i)
os.kill(self.proc[i]['pid'],signal.SIGTERM)
except (IOError,OSError):
pass
class AjaxTerm:
def __init__(self, cmd=None):
self.multi = Multiplex(cmd)
self.session = {}
def __call__(self, environ, start_response):
req = webob.Request(environ)
res = webob.Response()
ipad = req.POST['i'].split('@')[1]
user = req.POST['i'].split('@')[0]
port = req.POST['p']
#print time.time()
#print req.POST
if not environ['HTTP_COOKIE']:
print 'No Session...'
exit()
try:
Status_Ok
except NameError:
global Status_Ok,session_key
Status_Ok = 1
if Status_Ok < 2:
session_txt = environ['HTTP_COOKIE']
session_res = re.compile('sessionid=(\w{32})')
session_key = session_res.findall( session_txt )[0]
http = httplib2.Http()
url = "http://mangle.starl.com.cn/api/session/%s" %session_key
response,content = http.request(url,'GET')
if content == 'ok':
Status_Ok = 600
else:
exit()
if req.method == 'POST':
#s = req.POST["s"]
s = ipad+user+port+'.'+session_key
k = req.POST["k"]
color = 'c' in req.POST and req.POST["c"] or 0
w = int(req.POST["w"])
h = int(req.POST["h"])
Status_Ok = Status_Ok - 1
else:
#s = req.GET["s"]
s = ipad+user+port+'.'+session_key
k = req.GET["k"]
color = 'c' in req.GET and req.GET["c"] or 0
w = int(req.GET["w"])
h = int(req.GET["h"])
Status_Ok = Status_Ok - 1
#print s
#print self.session
if s in self.session:
term = self.session[s]
else:
if not (w>2 and w<256 and h>2 and h<100):
w,h=80,25
term=self.session[s]=self.multi.create(w,h,ipad,user,port)
if k:
self.multi.proc_write(term,k)
time.sleep(0.002)
dump = self.multi.dump(term, color)
res.content_type = 'text/xml'
if isinstance(dump, str):
res.body_file.write(dump)
#req.write(dump)
#req.response_gzencode=1
else:
del self.session[s]
res.body_file.write('<?xml version="1.0"?><idem></idem>')
# print "sessions %r"%self.session
return res(environ, start_response)
def make_app(global_conf, cmd=None):
return AjaxTerm(cmd)
def make_server(global_conf, port, host='', use_reloader=False):
port = int(port)
if use_reloader:
from paste import reloader
reloader.install()
def serve(app):
from paste import httpserver
httpserver.serve(app, host=host, port=port)
return serve
def make_logger(name, config):
if not name.startswith('logger:'):
section = 'logger:'+name
else:
section = name
x, name = section.split(':', 1)
level = getattr(logging, config.get(section, 'level'))
filename = config.get(section, 'filename')
if filename and filename.lower() == 'stderr':
filename = None
fmt = config.get(section, 'format', raw=True)
datefmt = config.get(section, 'datefmt', raw=True)
if name == 'root':
logger = logging.getLogger()
else:
logger = logging.getLogger(name)
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
if filename:
from logging.handlers import RotatingFileHandler
max_bytes = config.getint(section, 'max_bytes')
backup_count = config.getint(section, 'backup_count')
handler = RotatingFileHandler(filename, "a", max_bytes, backup_count)
else:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(level)
def make_loggers(path):
defaults = { 'level': 'INFO',
'filename': None,
'format': '[%(asctime)s] %(levelname)s %(message)s',
'datefmt': None,
'max_bytes': 20*1024*1024,
'backup_count': 5 }
config = SafeConfigParser(defaults)
config.read([path])
for section in config.sections():
if section.startswith('logger:'):
make_logger(section, config)
def main():
from argparse import ArgumentParser
parser = ArgumentParser(description='Akiri WSGI Server')
parser.add_argument('configpath', nargs='?', default='ajaxterm2.ini')
args = parser.parse_args()
path = os.path.abspath(args.configpath)
if not os.path.isfile(path):
import pkg_resources
dist = pkg_resources.get_distribution('ajaxterm2')
path = os.path.join(dist.location, args.configpath)
url = 'config:'+path
make_loggers(path)
import paste.deploy
app = paste.deploy.loadapp(url)
serve = paste.deploy.loadserver(url)
serve(app)
return 0
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 |
emgee/formal | formal/__init__.py | 1 | 3033 | """A package (for Nevow) for defining the schema, validation and rendering of
HTML forms.
"""
version_info = (0, 12, 0)
version = '.'.join([str(i) for i in version_info])
from nevow import static
from formal.types import *
from formal.validation import *
from formal.widget import *
from formal.widgets.restwidget import *
from formal.widgets.multiselect import *
from formal.widgets.richtextarea import *
from formal.form import Form, Field, Group, ResourceMixin, renderForm
from formal import iformal
def widgetFactory(widgetClass, *a, **k):
def _(original):
return widgetClass(original, *a, **k)
return _
try:
import pkg_resources
except ImportError:
import os.path
defaultCSS = static.File(os.path.join(os.path.split(__file__)[0], 'formal.css'))
formsJS = static.File(os.path.join(os.path.split(__file__)[0], 'js'))
else:
from formal.util import LazyResource
defaultCSS = LazyResource(lambda: static.File(pkg_resources.resource_filename('formal', 'formal.css')))
formsJS = LazyResource(lambda: static.File(pkg_resources.resource_filename('formal', 'js')))
del LazyResource
# Register standard adapters
from twisted.python.components import registerAdapter
from formal import converters
from formal.util import SequenceKeyLabelAdapter
registerAdapter(TextInput, String, iformal.IWidget)
registerAdapter(TextInput, Integer, iformal.IWidget)
registerAdapter(TextInput, Float, iformal.IWidget)
registerAdapter(Checkbox, Boolean, iformal.IWidget)
registerAdapter(DatePartsInput, Date, iformal.IWidget)
registerAdapter(TextInput, Time, iformal.IWidget)
registerAdapter(FileUploadRaw, File, iformal.IWidget)
registerAdapter(TextAreaList, Sequence, iformal.IWidget)
registerAdapter(SequenceKeyLabelAdapter, tuple, iformal.IKey)
registerAdapter(SequenceKeyLabelAdapter, tuple, iformal.ILabel)
registerAdapter(converters.NullConverter, String, iformal.IStringConvertible)
registerAdapter(converters.DateToDateTupleConverter, Date, iformal.IDateTupleConvertible)
registerAdapter(converters.BooleanToStringConverter, Boolean, iformal.IBooleanConvertible)
registerAdapter(converters.BooleanToStringConverter, Boolean, iformal.IStringConvertible)
registerAdapter(converters.IntegerToStringConverter, Integer, iformal.IStringConvertible)
registerAdapter(converters.FloatToStringConverter, Float, iformal.IStringConvertible)
registerAdapter(converters.DateToStringConverter, Date, iformal.IStringConvertible)
registerAdapter(converters.TimeToStringConverter, Time, iformal.IStringConvertible)
registerAdapter(converters.NullConverter, File, iformal.IFileConvertible)
registerAdapter(converters.NullConverter, Sequence, iformal.ISequenceConvertible)
registerAdapter(converters.SequenceToStringConverter, Sequence, iformal.IStringConvertible)
try:
Decimal
except NameError:
pass
else:
registerAdapter(TextInput, Decimal, iformal.IWidget)
registerAdapter(converters.DecimalToStringConverter, Decimal, iformal.IStringConvertible)
del SequenceKeyLabelAdapter
del registerAdapter
| mit |
KSG-IT/ksg-nett | economy/urls.py | 1 | 1520 | from django.urls import path
from . import views
urlpatterns = [
path('', views.economy_home, name='economy_home'),
path('deposit/', views.deposits, name='economy_deposit'),
path('deposit/<int:deposit_id>', views.deposit_detail, name='economy_deposit_detail'),
path('deposit/<int:deposit_id>/approve', views.deposit_approve, name='economy_approve_deposit'),
path('deposit/<int:deposit_id>/invalidate', views.deposit_invalidate, name='economy_invalidate_deposit'),
path('deposit/<int:deposit_id>/edit', views.deposit_edit, name='economy_edit_deposit'),
path("soci-sessions", views.soci_sessions, name="economy_soci_sessions"),
#path("soci-sessions-closed", views.soci_sessions_closed, name="economy_soci_sessions_closed"),
#path("soci-sessions-open", views.soci_sessions_open, name="economy_soci_sessions_open"),
path("soci-sessions/create", views.soci_session_create, name="economy_soci_session_create"),
path("soci-sessions/<int:soci_session_id>/delete", views.soci_session_delete, name="economy_soci_session_delete"),
path("soci-sessions/<int:soci_session_id>/close", views.soci_session_close, name="economy_soci_session_close"),
path("soci-sessions/<int:soci_session_id>", views.soci_session_detail, name="economy_soci_session_detail"),
path("soci-sessions/<int:soci_session_id>/product-order/add", views.product_order_add),
path("product-orders/<int:product_order_id>/delete", views.product_order_delete,
name="economy_product_orders_delete"),
]
| gpl-3.0 |
GREO/gnuradio-git | gr-trellis/src/examples/test_tcm_combined.py | 8 | 3535 | #!/usr/bin/env python
from gnuradio import gr
from gnuradio import audio
from gnuradio import trellis
from gnuradio import eng_notation
import math
import sys
import fsm_utils
def run_test (f,Kb,bitspersymbol,K,dimensionality,constellation,N0,seed):
tb = gr.top_block ()
# TX
src = gr.lfsr_32k_source_s()
src_head = gr.head (gr.sizeof_short,Kb/16) # packet size in shorts
s2fsmi = gr.packed_to_unpacked_ss(bitspersymbol,gr.GR_MSB_FIRST) # unpack shorts to symbols compatible with the FSM input cardinality
enc = trellis.encoder_ss(f,0) # initial state = 0
mod = gr.chunks_to_symbols_sf(constellation,dimensionality)
# CHANNEL
add = gr.add_ff()
noise = gr.noise_source_f(gr.GR_GAUSSIAN,math.sqrt(N0/2),seed)
# RX
va = trellis.viterbi_combined_fs(f,K,0,-1,dimensionality,constellation,trellis.TRELLIS_EUCLIDEAN) # Put -1 if the Initial/Final states are not set.
fsmi2s = gr.unpacked_to_packed_ss(bitspersymbol,gr.GR_MSB_FIRST) # pack FSM input symbols to shorts
dst = gr.check_lfsr_32k_s();
tb.connect (src,src_head,s2fsmi,enc,mod)
tb.connect (mod,(add,0))
tb.connect (noise,(add,1))
tb.connect (add,va,fsmi2s,dst)
tb.run()
# A bit of cheating: run the program once and print the
# final encoder state..
# Then put it as the last argument in the viterbi block
#print "final state = " , enc.ST()
ntotal = dst.ntotal ()
nright = dst.nright ()
runlength = dst.runlength ()
return (ntotal,ntotal-nright)
def main(args):
nargs = len (args)
if nargs == 3:
fname=args[0]
esn0_db=float(args[1]) # Es/No in dB
rep=int(args[2]) # number of times the experiment is run to collect enough errors
else:
sys.stderr.write ('usage: test_tcm_combined.py fsm_fname Es/No_db repetitions\n')
sys.exit (1)
# system parameters
f=trellis.fsm(fname) # get the FSM specification from a file (will hopefully be automated in the future...)
Kb=1024*16 # packet size in bits (make it multiple of 16)
bitspersymbol = int(round(math.log(f.I())/math.log(2))) # bits per FSM input symbol
K=Kb/bitspersymbol # packet size in trellis steps
modulation = fsm_utils.psk4 # see fsm_utils.py for available predefined modulations
dimensionality = modulation[0]
constellation = modulation[1]
if len(constellation)/dimensionality != f.O():
sys.stderr.write ('Incompatible FSM output cardinality and modulation size.\n')
sys.exit (1)
# calculate average symbol energy
Es = 0
for i in range(len(constellation)):
Es = Es + constellation[i]**2
Es = Es / (len(constellation)/dimensionality)
N0=Es/pow(10.0,esn0_db/10.0); # noise variance
tot_s=0 # total number of transmitted shorts
terr_s=0 # total number of shorts in error
terr_p=0 # total number of packets in error
for i in range(rep):
(s,e)=run_test(f,Kb,bitspersymbol,K,dimensionality,constellation,N0,-long(666+i)) # run experiment with different seed to get different noise realizations
tot_s=tot_s+s
terr_s=terr_s+e
terr_p=terr_p+(terr_s!=0)
if ((i+1)%100==0) : # display progress
print i+1,terr_p, '%.2e' % ((1.0*terr_p)/(i+1)),tot_s,terr_s, '%.2e' % ((1.0*terr_s)/tot_s)
# estimate of the (short or bit) error rate
print rep,terr_p, '%.2e' % ((1.0*terr_p)/(i+1)),tot_s,terr_s, '%.2e' % ((1.0*terr_s)/tot_s)
if __name__ == '__main__':
main (sys.argv[1:])
| gpl-3.0 |
grilo/ansible-1 | lib/ansible/utils/module_docs_fragments/avi.py | 74 | 1719 | #
# Created on December 12, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Avi Version: 16.3.4
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
class ModuleDocFragment(object):
# Avi common documentation fragment
DOCUMENTATION = """
options:
controller:
description:
- IP address or hostname of the controller. The default value is the environment variable C(AVI_CONTROLLER).
username:
description:
- Username used for accessing Avi controller. The default value is the environment variable C(AVI_USERNAME).
password:
description:
- Password of Avi user in Avi controller. The default value is the environment variable C(AVI_PASSWORD).
tenant:
description:
- Name of tenant used for all Avi API calls and context of object.
default: admin
tenant_uuid:
description:
- UUID of tenant used for all Avi API calls and context of object.
default: ''
api_version:
description:
- Avi API version of to use for Avi API and objects.
"""
| gpl-3.0 |
jniediek/mne-python | examples/preprocessing/plot_find_ecg_artifacts.py | 14 | 1313 | """
==================
Find ECG artifacts
==================
Locate QRS component of ECG.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
event_id = 999
ecg_events, _, _ = mne.preprocessing.find_ecg_events(raw, event_id,
ch_name='MEG 1531')
# Read epochs
picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=False, eog=False,
include=['MEG 1531'], exclude='bads')
tmin, tmax = -0.1, 0.1
epochs = mne.Epochs(raw, ecg_events, event_id, tmin, tmax, picks=picks,
proj=False)
data = epochs.get_data()
print("Number of detected ECG artifacts : %d" % len(data))
###############################################################################
# Plot ECG artifacts
plt.plot(1e3 * epochs.times, np.squeeze(data).T)
plt.xlabel('Times (ms)')
plt.ylabel('ECG')
plt.show()
| bsd-3-clause |
york-2015/yowsup | yowsup/layers/axolotl/protocolentities/notification_encrypt.py | 48 | 1181 | from yowsup.layers.protocol_notifications.protocolentities import NotificationProtocolEntity
from yowsup.structs import ProtocolTreeNode
class EncryptNotification(NotificationProtocolEntity):
"""
<notification t="1419824928" id="2451228097" from="s.whatsapp.net" type="encrypt">
<count value="9">
</count>
</notification>
"""
def __init__(self, count, timestamp, _id = None, notify = None, offline = None):
super(EncryptNotification, self).__init__("encrypt", _id, "s.whatsapp.net", timestamp, notify, offline)
self.setProps(count)
def setProps(self, count):
self.count = int(count)
def getCount(self):
return self.count
def toProtocolTreeNode(self):
node = super(EncryptNotification, self).toProtocolTreeNode()
countNode = ProtocolTreeNode("count", {"value": str(self.count)})
node.addChild(countNode)
return node
@staticmethod
def fromProtocolTreeNode(node):
entity = NotificationProtocolEntity.fromProtocolTreeNode(node)
entity.__class__ = EncryptNotification
entity.setProps(node.getChild("count")["value"])
return entity | gpl-3.0 |
joernhees/scikit-learn | sklearn/externals/joblib/_memory_helpers.py | 52 | 3606 | try:
# Available in Python 3
from tokenize import open as open_py_source
except ImportError:
# Copied from python3 tokenize
from codecs import lookup, BOM_UTF8
import re
from io import TextIOWrapper, open
cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def _detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that
should be used to decode a Python source file. It requires one
argment, readline, in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are
present, but disagree, a SyntaxError will be raised. If the encoding
cookie is an invalid charset, raise a SyntaxError. Note that if a
utf-8 bom is found, 'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be
returned.
"""
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return b''
def find_cookie(line):
try:
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
matches = cookie_re.findall(line_string)
if not matches:
return None
encoding = _get_normal_name(matches[0])
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
raise SyntaxError("unknown encoding: " + encoding)
if bom_found:
if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
raise SyntaxError('encoding problem: utf-8')
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def open_py_source(filename):
"""Open a file in read only mode using the encoding detected by
detect_encoding().
"""
buffer = open(filename, 'rb')
encoding, lines = _detect_encoding(buffer.readline)
buffer.seek(0)
text = TextIOWrapper(buffer, encoding, line_buffering=True)
text.mode = 'r'
return text
| bsd-3-clause |
charris/numpy | numpy/tests/test_numpy_version.py | 5 | 1575 | """
Check the numpy version is valid.
Note that a development version is marked by the presence of 'dev0' or '+'
in the version string, all else is treated as a release. The version string
itself is set from the output of ``git describe`` which relies on tags.
Examples
--------
Valid Development: 1.22.0.dev0 1.22.0.dev0+5-g7999db4df2 1.22.0+5-g7999db4df2
Valid Release: 1.21.0.rc1, 1.21.0.b1, 1.21.0
Invalid: 1.22.0.dev, 1.22.0.dev0-5-g7999db4dfB, 1.21.0.d1, 1.21.a
Note that a release is determined by the version string, which in turn
is controlled by the result of the ``git describe`` command.
"""
import re
import numpy as np
from numpy.testing import assert_
def test_valid_numpy_version():
# Verify that the numpy version is a valid one (no .post suffix or other
# nonsense). See gh-6431 for an issue caused by an invalid version.
version_pattern = r"^[0-9]+\.[0-9]+\.[0-9]+(a[0-9]|b[0-9]|rc[0-9]|)"
dev_suffix = r"(\.dev0|)(\+[0-9]*\.g[0-9a-f]+|)"
if np.version.release:
res = re.match(version_pattern + '$', np.__version__)
else:
res = re.match(version_pattern + dev_suffix + '$', np.__version__)
assert_(res is not None, np.__version__)
def test_short_version():
# Check numpy.short_version actually exists
if np.version.release:
assert_(np.__version__ == np.version.short_version,
"short_version mismatch in release version")
else:
assert_(np.__version__.split("+")[0] == np.version.short_version,
"short_version mismatch in development version")
| bsd-3-clause |
ryfeus/lambda-packs | Keras_tensorflow_nightly/source2.7/tensorflow/contrib/nn/python/ops/sampling_ops.py | 16 | 15000 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops related to candidate sampling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
def _rank_resample(weights, biases, inputs, sampled_values, num_resampled,
resampling_temperature, partition_strategy):
"""A helper function for rank_sampled_softmax_loss.
This computes, for each i in `sampled_values`,
log(sum_j exp((w_i * x_j + b_i) / resampling_temperature))
where w_i, b_i are the weight and bias of the i-th class, respectively,
and j ranges over the rows of `inputs`. For efficiency, we rearrange the
computation to
log(sum_j exp(w_i * (x_j / resampling_temperature))) +
b_i / resampling_temperature.
This translates to the following batched computation using tensorflow ops:
reduce_logsumexp(matmul(embeddings,
transpose(inputs / resampling_temperature))) +
biases / resampling_temperature
The computation of the first term is colocated with the embeddings using
`transform_fn` in `embedding_ops._embedding_lookup_and_transform`. The second
term, not the bottleneck, is computed at the worker.
Args:
weights: From `rank_sampled_softmax_loss`.
biases: From `rank_sampled_softmax_loss`.
inputs: From `rank_sampled_softmax_loss`.
sampled_values: A tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
num_resampled: An `int`. This many values are selected from
`sampled_values` using the adaptive resampling algorithm. The caller
must ensure that `num_resampled` is less than the size of
`sampled_values`.
resampling_temperature: A scalar `Tensor` with the temperature parameter
for the adaptive resampling algorithm.
partition_strategy: From `rank_sampled_softmax_loss`.
Returns:
A tuple of (`resampled_candidates`, `true_expected_count`,
`resampled_expected_count`), similar to `sampled_values` but sampled
down to `num_resampled` values.
"""
# This code supports passing a Tensor for num_resampled, but since it is only
# called with an int, that's what we specify in the arg list. If this
# function is ever externalized, we should change the doc to support Tensor.
sampled, true_expected_count, sampled_expected_count = sampled_values
sampled = math_ops.cast(array_ops.stop_gradient(sampled), dtypes.int64)
true_expected_count = array_ops.stop_gradient(true_expected_count)
sampled_expected_count = array_ops.stop_gradient(sampled_expected_count)
reweighted_inputs = inputs / resampling_temperature
def logsumexp_logit(embeddings):
return math_ops.reduce_logsumexp(
math_ops.matmul(embeddings, reweighted_inputs, transpose_b=True),
axis=1,
keep_dims=False)
# Calling this protected form of embedding_lookup allows co-locating
# the logsumexp computation with the partitioned weights, which yields
# a large speedup in practice.
sampled_logits = embedding_ops._embedding_lookup_and_transform( # pylint: disable=protected-access
weights, sampled, partition_strategy, transform_fn=logsumexp_logit)
sampled_b = array_ops.reshape(
embedding_ops.embedding_lookup(biases, sampled, partition_strategy), [-1])
sampled_logits += sampled_b / resampling_temperature
_, resampled_indices = nn.top_k(sampled_logits, k=num_resampled, sorted=False)
resampled = array_ops.gather(sampled, indices=resampled_indices)
resampled_expected_count = array_ops.gather(
sampled_expected_count, indices=resampled_indices)
return resampled, true_expected_count, resampled_expected_count
def rank_sampled_softmax_loss(weights,
biases,
labels,
inputs,
num_sampled,
num_resampled,
num_classes,
num_true,
sampled_values,
resampling_temperature,
remove_accidental_hits,
partition_strategy,
name=None):
"""Computes softmax loss using rank-based adaptive resampling.
This has been shown to improve rank loss after training compared to
@{tf.nn.sampled_softmax_loss}. For a description of the algorithm and some
experimental results, please see: [TAPAS: Two-pass Approximate Adaptive
Sampling for Softmax](https://arxiv.org/abs/1707.03073).
Sampling follows two phases:
* In the first phase, `num_sampled` classes are selected using
@{tf.nn.learned_unigram_candidate_sampler} or supplied `sampled_values`.
The logits are calculated on those sampled classes. This phases is
similar to @{tf.nn.sampled_softmax_loss}.
* In the second phase, the `num_resampled` classes with highest predicted
probability are kept. Probabilities are
`LogSumExp(logits / resampling_temperature)`, where the sum is over
`inputs`.
The `resampling_temperature` parameter controls the "adaptiveness" of the
resampling. At lower temperatures, resampling is more adaptive because it
picks more candidates close to the predicted classes. A common strategy is
to decrease the temperature as training proceeds.
See @{tf.nn.sampled_softmax_loss} for more documentation on sampling and
for typical default values for some of the parameters.
This operation is for training only. It is generally an underestimate of
the full softmax loss.
A common use case is to use this method for training, and calculate the full
softmax loss for evaluation or inference. In this case, you must set
`partition_strategy="div"` for the two losses to be consistent, as in the
following example:
```python
if mode == "train":
loss = rank_sampled_softmax_loss(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
...,
partition_strategy="div")
elif mode == "eval":
logits = tf.matmul(inputs, tf.transpose(weights))
logits = tf.nn.bias_add(logits, biases)
labels_one_hot = tf.one_hot(labels, n_classes)
loss = tf.nn.softmax_cross_entropy_with_logits(
labels=labels_one_hot,
logits=logits)
```
Args:
weights: A `Tensor` or `PartitionedVariable` of shape `[num_classes, dim]`,
or a list of `Tensor` objects whose concatenation along dimension 0
has shape [num_classes, dim]. The (possibly-sharded) class embeddings.
biases: A `Tensor` or `PartitionedVariable` of shape `[num_classes]`.
The (possibly-sharded) class biases.
labels: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes. Note that this format differs from
the `labels` argument of `nn.softmax_cross_entropy_with_logits`.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
num_sampled: An `int`. The number of classes to randomly sample per batch.
num_resampled: An `int`. The number of classes to select from the
`num_sampled` classes using the adaptive resampling algorithm. Must be
less than `num_sampled`.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: A tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
If None, default to `nn.learned_unigram_candidate_sampler`.
resampling_temperature: A scalar `Tensor` with the temperature parameter
for the adaptive resampling algorithm.
remove_accidental_hits: A `bool`. Whether to remove "accidental hits"
where a sampled class equals one of the target classes.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.
See @{tf.nn.embedding_lookup} for more details.
name: A name for the operation (optional).
Returns:
A `batch_size` 1-D tensor of per-example sampled softmax losses.
Raises:
ValueError: If `num_sampled <= num_resampled`.
"""
if num_sampled > num_classes:
raise ValueError("num_sampled ({}) cannot be greater than num_classes ({})".
format(num_sampled, num_classes))
if num_sampled <= num_resampled:
raise ValueError("num_resampled ({}) must be less than num_sampled ({})".
format(num_resampled, num_sampled))
if partition_strategy not in ("div", "mod"):
raise ValueError(
"unsupported partition_strategy ({})".format(partition_strategy))
with ops.name_scope(name, "rank_sampled_softmax_loss", [
weights, biases, labels, inputs, sampled_values, resampling_temperature
]) as name:
if not sampled_values:
sampled_values = nn.learned_unigram_candidate_sampler(
true_classes=labels,
num_true=num_true,
num_sampled=num_sampled,
unique=True,
range_max=num_classes)
# From sampled_values, select the top num_resampled values using the
# adaptive rank resampling strategy.
resampled_values = _rank_resample(weights, biases, inputs, sampled_values,
num_resampled, resampling_temperature,
partition_strategy)
return nn.sampled_softmax_loss(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
num_sampled=num_resampled,
num_classes=num_classes,
num_true=num_true,
sampled_values=resampled_values,
remove_accidental_hits=remove_accidental_hits,
partition_strategy=partition_strategy,
name=name)
def sampled_sparse_softmax_loss(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
sampled_values=None,
remove_accidental_hits=True,
partition_strategy="mod",
name="sampled_sparse_softmax_loss"):
"""Computes and returns the sampled sparse softmax training loss.
This is a faster way to train a softmax classifier over a huge number of
classes.
This operation is for training only. It is generally an underestimate of
the full softmax loss.
A common use case is to use this method for training, and calculate the full
softmax loss for evaluation or inference. In this case, you must set
`partition_strategy="div"` for the two losses to be consistent, as in the
following example:
```python
if mode == "train":
loss = tf.nn.sampled_sparse_softmax_loss(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
...,
partition_strategy="div")
elif mode == "eval":
logits = tf.matmul(inputs, tf.transpose(weights))
logits = tf.nn.bias_add(logits, biases)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.squeeze(labels),
logits=logits)
```
See our [Candidate Sampling Algorithms Reference]
(https://www.tensorflow.org/extras/candidate_sampling.pdf)
Also see Section 3 of [Jean et al., 2014](http://arxiv.org/abs/1412.2007)
([pdf](http://arxiv.org/pdf/1412.2007.pdf)) for the math.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape
[num_classes, dim]. The (possibly-sharded) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The class biases.
labels: A `Tensor` of type `int64` and shape `[batch_size, 1]`.
The index of the single target class for each row of logits. Note that
this format differs from the `labels` argument of
`nn.sparse_softmax_cross_entropy_with_logits`.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
num_sampled: An `int`. The number of classes to randomly sample per batch.
num_classes: An `int`. The number of possible classes.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
remove_accidental_hits: A `bool`. whether to remove "accidental hits"
where a sampled class equals one of the target classes. Default is
True.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.
Default is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: A name for the operation (optional).
Returns:
A `batch_size` 1-D tensor of per-example sampled softmax losses.
"""
logits, _ = nn_impl._compute_sampled_logits(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
num_sampled=num_sampled,
num_classes=num_classes,
num_true=1,
sampled_values=sampled_values,
subtract_log_q=True,
remove_accidental_hits=remove_accidental_hits,
partition_strategy=partition_strategy,
name=name)
# There is only one true label. _compute_sampled_logits puts the true logit
# at index 0.
labels = array_ops.zeros([array_ops.shape(logits)[0], 1], dtype=dtypes.int64)
sampled_losses = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=array_ops.squeeze(labels), logits=logits)
# sampled_losses is a [batch_size] tensor.
return sampled_losses
| mit |
damdam-s/hr | __unported__/hr_payroll_period/wizard/payroll_period_end.py | 22 | 46063 | # -*- coding:utf-8 -*-
#
#
# Copyright (C) 2013 Michael Telahun Makonnen <mmakonnen@gmail.com>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
import logging
import math
from datetime import datetime
from dateutil.relativedelta import relativedelta
from pytz import timezone
from openerp import netsvc
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT as OEDATETIME_FORMAT
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as OEDATE_FORMAT
from openerp.tools.translate import _
from openerp.osv import fields, orm
_logger = logging.getLogger(__name__)
class payroll_period_end_1(orm.TransientModel):
_name = 'hr.payroll.period.end.1'
_description = 'End of Payroll Period Wizard Step 1'
_change_res = {
'br100': 0,
'br50': 0,
'br10': 0,
'br5': 0,
'br1': 0,
'cent50': 0,
'cent25': 0,
'cent10': 0,
'cent05': 0,
'cent01': 0,
'done': False,
}
_columns = {
'period_id': fields.integer(
'Period ID',
),
'is_ended': fields.boolean(
'Past End Day?',
),
'public_holiday_ids': fields.many2many(
'hr.holidays.public.line',
'hr_holidays_pay_period_rel',
'holiday_id',
'period_id',
'Public Holidays',
readonly=True,
),
'alert_critical': fields.integer(
'Critical Severity',
readonly=True,
),
'alert_high': fields.integer(
'High Severity',
readonly=True,
),
'alert_medium': fields.integer(
'Medium Severity',
readonly=True,
),
'alert_low': fields.integer(
'Low Severity',
readonly=True
),
'pex_critical': fields.integer(
'Critical',
readonly=True,
),
'pex_high': fields.integer(
'High',
readonly=True,
),
'pex_medium': fields.integer(
'Medium',
readonly=True,
),
'pex_low': fields.integer(
'Low',
readonly=True,
),
'locked': fields.boolean(
'Is Period Locked?',
readonly=True,
),
'can_unlock': fields.boolean(
'Can Unlock Period?',
readonly=True,
),
'payslips': fields.boolean(
'Have Pay Slips Been Generated?',
readonly=True,
),
'ps_generated': fields.boolean(
'Pay Slip Generated?',
readonly=True,
),
'payment_started': fields.boolean(
'Payment Started?',
readonly=True,
),
'closed': fields.boolean(
'Pay Period Closed?',
readonly=True,
),
'br100': fields.integer(
'100 Birr',
readonly=True,
),
'br50': fields.integer(
'50 Birr',
readonly=True,
),
'br10': fields.integer(
'10 Birr',
readonly=True,
),
'br5': fields.integer(
'5 Birr',
readonly=True,
),
'br1': fields.integer(
'1 Birr',
readonly=True,
),
'cent50': fields.integer(
'50 Cents',
readonly=True,
),
'cent25': fields.integer(
'25 Cents',
readonly=True,
),
'cent10': fields.integer(
'10 Cents',
readonly=True,
),
'cent05': fields.integer(
'5 Cents',
readonly=True,
),
'cent01': fields.integer(
'1 Cent',
readonly=True,
),
'exact_change': fields.char(
'Exact Change Total',
size=32,
readonly=True,
),
'ps_amendments_conf': fields.many2many(
'hr.payslip.amendment',
'hr_payslip_pay_period_rel',
'amendment_id',
'period_id',
'Confirmed Amendments',
readonly=True,
),
'ps_amendments_draft': fields.many2many(
'hr.payslip.amendment',
'hr_payslip_pay_period_rel',
'amendment_id',
'period_id',
'Draft Amendments',
readonly=True,
),
}
def _get_period_id(self, cr, uid, context=None):
if context is None:
context = {}
return context.get('active_id', False)
def _get_is_ended(self, cr, uid, context=None):
flag = False
if context is None:
context = {}
period_id = context.get('active_id', False)
if period_id:
flag = self.pool.get('hr.payroll.period').is_ended(
cr, uid, period_id, context=context)
return flag
def _alerts_count(self, cr, uid, severity, context=None):
alert_obj = self.pool.get('hr.schedule.alert')
if context is None:
context = {}
period_id = context.get('active_id', False)
alert_ids = []
if period_id:
period = self.pool.get('hr.payroll.period').browse(
cr, uid, period_id, context=context)
if period:
employee_ids = []
[employee_ids.append(c.employee_id.id)
for c in period.schedule_id.contract_ids]
alert_ids = alert_obj.search(
cr, uid, ['&', ('name', '>=', period.date_start),
('name', '<=', period.date_end),
('severity', '=', severity),
('employee_id', 'in', employee_ids),
],
context=context)
return len(alert_ids)
def _critical_alerts(self, cr, uid, context=None):
return self._alerts_count(cr, uid, 'critical', context)
def _high_alerts(self, cr, uid, context=None):
return self._alerts_count(cr, uid, 'high', context)
def _medium_alerts(self, cr, uid, context=None):
return self._alerts_count(cr, uid, 'medium', context)
def _low_alerts(self, cr, uid, context=None):
return self._alerts_count(cr, uid, 'low', context)
def _pex_count(self, cr, uid, severity, context=None):
ex_obj = self.pool.get('hr.payslip.exception')
run_obj = self.pool.get('hr.payslip.run')
if context is None:
context = {}
period_id = context.get('active_id', False)
ex_ids = []
slip_ids = []
if period_id:
period = self.pool.get('hr.payroll.period').browse(
cr, uid, period_id, context=context)
if period and period.register_id:
for run_id in period.register_id.run_ids:
data = run_obj.read(
cr, uid, run_id.id, ['slip_ids'], context=context)
[slip_ids.append(i) for i in data['slip_ids']]
ex_ids = ex_obj.search(cr, uid, [('severity', '=', severity),
('slip_id', 'in', slip_ids),
],
context=context)
return len(ex_ids)
def _pex_critical(self, cr, uid, context=None):
return self._pex_count(cr, uid, 'critical', context)
def _pex_high(self, cr, uid, context=None):
return self._pex_count(cr, uid, 'high', context)
def _pex_medium(self, cr, uid, context=None):
return self._pex_count(cr, uid, 'medium', context)
def _pex_low(self, cr, uid, context=None):
return self._pex_count(cr, uid, 'low', context)
def _no_missing_punches(self, cr, uid, context=None):
ids = self._missing_punches(cr, uid, context)
res = len(ids)
return res
def _missing_punches(self, cr, uid, context=None):
#
# TODO - Someone who cares about DST should update this code to handle
# it.
#
missing_punch_ids = []
if context is None:
context = {}
period_id = context.get('active_id', False)
if period_id:
period = self.pool.get('hr.payroll.period').browse(
cr, uid, period_id, context=context)
if period:
attendance_obj = self.pool.get('hr.attendance')
utc_tz = timezone('UTC')
dt = datetime.strptime(period.date_start, '%Y-%m-%d %H:%M:%S')
utcDtStart = utc_tz.localize(dt, is_dst=False)
dt = datetime.strptime(period.date_end, '%Y-%m-%d %H:%M:%S')
utcDtEnd = utc_tz.localize(dt, is_dst=False)
for contract in period.schedule_id.contract_ids:
employee = contract.employee_id
punch_ids = attendance_obj.search(cr, uid, [
('employee_id', '=', employee.id),
'&',
('name', '>=', utcDtStart.strftime(
'%Y-%m-%d %H:%M:S')),
('name', '<=', utcDtEnd.strftime(
'%Y-%m-%d %H:%M:S')),
], order='name', context=context)
prevPunch = False
if len(punch_ids) > 0:
punches = attendance_obj.browse(
cr, uid, punch_ids, context=context)
for punch in punches:
if not prevPunch:
# First Punch must be a sign-in
if punch.action != 'sign_in':
missing_punch_ids.append(punch.id)
elif punch.action == 'sign_in':
if prevPunch.action != 'sign_out':
missing_punch_ids.append(prevPunch.id)
elif punch.action == 'sign_out':
if prevPunch.action != 'sign_in':
missing_punch_ids.append(punch.id)
prevPunch = punch
# The last punch should be a sign out
if prevPunch and prevPunch.action != 'sign_out':
missing_punch_ids.append(prevPunch.id)
return missing_punch_ids
def _get_locked(self, cr, uid, context=None):
flag = False
if context is None:
context = {}
period_id = context.get('active_id', False)
if period_id:
data = self.pool.get('hr.payroll.period').read(
cr, uid, period_id, ['state'], context=context)
flag = (data.get('state') in [
'locked', 'generate', 'payment', 'closed'])
return flag
def _get_can_unlock(self, cr, uid, context=None):
flag = False
if context is None:
context = {}
period_id = context.get('active_id', False)
if period_id:
data = self.pool.get('hr.payroll.period').read(
cr, uid, period_id, ['state'], context=context)
flag = (data.get('state') in ['locked', 'generate'])
return flag
def _get_payslips(self, cr, uid, context=None):
flag = False
if context is None:
context = {}
period_id = context.get('active_id', False)
if period_id:
data = self.pool.get('hr.payroll.period').read(
cr, uid, period_id, ['state', 'register_id'], context=context)
flag |= (
data.get('state') in ['generate', 'payment', 'closed']
and data.get('register_id', False)
)
return flag
def _get_ps_generated(self, cr, uid, context=None):
flag = False
if context is None:
context = {}
period_id = context.get('active_id', False)
if period_id:
data = self.pool.get('hr.payroll.period').read(
cr, uid, period_id, ['state'], context=context)
if data.get('state') in ['generate']:
flag = True
return flag
def _get_payment_started(self, cr, uid, context=None):
flag = False
if context is None:
context = {}
period_id = context.get('active_id', False)
if period_id:
data = self.pool.get('hr.payroll.period').read(
cr, uid, period_id, ['state'], context=context)
if data.get('state') in ['payment', 'closed']:
flag = True
return flag
def _get_change(self, cr, uid, context=None):
if self._change_res['done']:
return self._change_res
self._change_res = {
'br100': 0,
'br50': 0,
'br10': 0,
'br5': 0,
'br1': 0,
'cent50': 0,
'cent25': 0,
'cent10': 0,
'cent05': 0,
'cent01': 0,
'done': False,
}
net_lines = []
if context is None:
context = {}
period_id = context.get('active_id', False)
if period_id:
data = self.pool.get('hr.payroll.period').read(
cr, uid, period_id, ['register_id'], context=context
)
if data['register_id']:
data = self.pool.get(
'hr.payroll.register').read(
cr, uid, data['register_id'][0], ['run_ids'],
context=context
)
if data['run_ids']:
ps_runs = self.pool.get(
'hr.payslip.run').browse(cr, uid, data['run_ids'],
context=context)
for psr in ps_runs:
for slip in psr.slip_ids:
for line in slip.line_ids:
if line.salary_rule_id.code == 'NET':
net_lines.append(line.total)
if len(net_lines) > 0:
for net in net_lines:
cents, birrs = math.modf(net)
birrs = int(birrs)
cents = int(round(cents, 2) * 100.0)
if birrs >= 100:
self._change_res['br100'] += birrs / 100
birrs %= 100
if birrs >= 50:
self._change_res['br50'] += birrs / 50
birrs %= 50
if birrs >= 10:
self._change_res['br10'] += birrs / 10
birrs %= 10
if birrs >= 5:
self._change_res['br5'] += birrs / 5
birrs %= 5
if birrs >= 1:
self._change_res['br1'] += birrs
if cents >= 50:
self._change_res['cent50'] += cents / 50
cents %= 50
if cents >= 25:
self._change_res['cent25'] += cents / 25
cents %= 25
if cents >= 10:
self._change_res['cent10'] += cents / 10
cents %= 10
if cents >= 5:
self._change_res['cent05'] += cents / 5
cents %= 5
if cents >= 1:
self._change_res['cent01'] += cents
self._change_res['done'] = True
return self._change_res
def _get_br100(self, cr, uid, context=None):
return self._get_change(cr, uid, context=context)['br100']
def _get_br50(self, cr, uid, context=None):
return self._get_change(cr, uid, context=context)['br50']
def _get_br10(self, cr, uid, context=None):
return self._get_change(cr, uid, context=context)['br10']
def _get_br5(self, cr, uid, context=None):
return self._get_change(cr, uid, context=context)['br5']
def _get_br1(self, cr, uid, context=None):
return self._get_change(cr, uid, context=context)['br1']
def _get_cent50(self, cr, uid, context=None):
return self._get_change(cr, uid, context=context)['cent50']
def _get_cent25(self, cr, uid, context=None):
return self._get_change(cr, uid, context=context)['cent25']
def _get_cent10(self, cr, uid, context=None):
return self._get_change(cr, uid, context=context)['cent10']
def _get_cent05(self, cr, uid, context=None):
return self._get_change(cr, uid, context=context)['cent05']
def _get_cent01(self, cr, uid, context=None):
return self._get_change(cr, uid, context=context)['cent01']
def _get_change_total(self, cr, uid, context=None):
birr = self._change_res['br100'] * 100
birr += self._change_res['br50'] * 50
birr += self._change_res['br10'] * 10
birr += self._change_res['br5'] * 5
birr += self._change_res['br1']
cents = self._change_res['cent50'] * 50
cents += self._change_res['cent25'] * 25
cents += self._change_res['cent10'] * 10
cents += self._change_res['cent05'] * 5
cents += self._change_res['cent01']
birr += cents / 100
cents %= 100
return 'Br ' + str(birr) + '.' + str(cents)
def _get_closed(self, cr, uid, context=None):
flag = False
if context is None:
context = {}
period_id = context.get('active_id', False)
if period_id:
data = self.pool.get('hr.payroll.period').read(
cr, uid, period_id, ['state'], context=context)
if data.get('state') in ['closed']:
flag = True
return flag
def _get_public_holidays(self, cr, uid, context=None):
holiday_ids = []
if context is None:
context = {}
period_id = context.get('active_id', False)
if not period_id:
return holiday_ids
data = self.pool.get('hr.payroll.period').read(
cr, uid, period_id, ['date_start', 'date_end'], context=context
)
start = datetime.strptime(
data['date_start'], OEDATETIME_FORMAT).date().strftime(
OEDATE_FORMAT)
end = datetime.strptime(
data['date_end'], OEDATETIME_FORMAT).date().strftime(OEDATE_FORMAT)
holiday_ids = self.pool.get('hr.holidays.public.line').search(
cr, uid, [
'&',
('date', '>=', start),
('date', '<=', end),
], context=context
)
return holiday_ids
def _get_confirmed_amendments(self, cr, uid, context=None):
psa_ids = []
if context is None:
context = {}
period_id = context.get('active_id', False)
if not period_id:
return psa_ids
psa_ids = self.pool.get(
'hr.payslip.amendment').search(
cr, uid, [
('pay_period_id', '=', period_id),
('state', 'in', ['validate']),
], context=context
)
return psa_ids
def _get_draft_amendments(self, cr, uid, context=None):
psa_ids = []
if context is None:
context = {}
period_id = context.get('active_id', False)
if not period_id:
return psa_ids
psa_ids = self.pool.get(
'hr.payslip.amendment').search(
cr, uid, [
('pay_period_id', '=', period_id),
('state', 'in', ['draft']),
], context=context
)
return psa_ids
_defaults = {
'period_id': _get_period_id,
'is_ended': _get_is_ended,
'public_holiday_ids': _get_public_holidays,
'alert_critical': _critical_alerts,
'alert_high': _high_alerts,
'alert_medium': _medium_alerts,
'alert_low': _low_alerts,
'pex_critical': _pex_critical,
'pex_high': _pex_high,
'pex_medium': _pex_medium,
'pex_low': _pex_low,
'locked': _get_locked,
'can_unlock': _get_can_unlock,
'ps_generated': _get_ps_generated,
'payslips': _get_payslips,
'payment_started': _get_payment_started,
'closed': _get_closed,
'br100': _get_br100,
'br50': _get_br50,
'br10': _get_br10,
'br5': _get_br5,
'br1': _get_br1,
'cent50': _get_cent50,
'cent25': _get_cent25,
'cent10': _get_cent10,
'cent05': _get_cent05,
'cent01': _get_cent01,
'exact_change': _get_change_total,
'ps_amendments_conf': _get_confirmed_amendments,
'ps_amendments_draft': _get_draft_amendments,
}
def reload(self, cr, uid, ids, context=None):
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'hr.payroll.period.end.1',
'type': 'ir.actions.act_window',
'target': 'current',
'context': context
}
def view_alerts(self, cr, uid, ids, context=None):
if context is None:
context = {}
period_id = context.get('active_id', False)
employee_ids = []
if period_id:
period = self.pool.get('hr.payroll.period').browse(
cr, uid, period_id, context=context)
if period:
[employee_ids.append(c.employee_id.id)
for c in period.schedule_id.contract_ids]
else:
return {}
else:
return {}
return {
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'hr.schedule.alert',
'domain': [
('employee_id', 'in', employee_ids),
'&',
('name', '>=', period.date_start),
('name', '<=', period.date_end),
],
'type': 'ir.actions.act_window',
'target': 'current',
'context': context
}
def view_payroll_exceptions(self, cr, uid, ids, context=None):
if context is None:
context = {}
period_id = context.get('active_id', False)
ex_obj = self.pool.get('hr.payslip.exception')
run_obj = self.pool.get('hr.payslip.run')
ex_ids = []
slip_ids = []
if period_id:
period = self.pool.get('hr.payroll.period').browse(
cr, uid, period_id, context=context)
if period:
for run_id in period.register_id.run_ids:
data = run_obj.read(
cr, uid, run_id.id, ['slip_ids'], context=context)
[slip_ids.append(i) for i in data['slip_ids']]
ex_ids = ex_obj.search(
cr, uid, [('slip_id', 'in', slip_ids)], context=context)
return {
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'hr.payslip.exception',
'domain': [('id', 'in', ex_ids)],
'type': 'ir.actions.act_window',
'target': 'current',
'context': context
}
def _do_recalc_alerts(self, cr, uid, ids, context=None):
alert_obj = self.pool.get('hr.schedule.alert')
if context is None:
context = {}
period_id = context.get('active_id', False)
if period_id:
period = self.pool.get('hr.payroll.period').browse(
cr, uid, period_id, context=context)
if period:
employee_ids = []
[employee_ids.append(c.employee_id.id)
for c in period.schedule_id.contract_ids]
dtStart = datetime.strptime(
period.date_start, '%Y-%m-%d %H:%M:%S')
dtEnd = datetime.strptime(period.date_end, '%Y-%m-%d %H:%M:%S')
dtNext = dtStart
while dtNext <= dtEnd:
for employee_id in employee_ids:
alert_obj.compute_alerts_by_employee(
cr, uid, employee_id,
dtNext.date().strftime(
'%Y-%m-%d'),
context=context)
dtNext += relativedelta(days=+1)
def recalc_alerts(self, cr, uid, ids, context=None):
self._do_recalc_alerts(cr, uid, ids, context=context)
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'hr.payroll.period.end.1',
'type': 'ir.actions.act_window',
'target': 'inline',
'context': context
}
def lock_period(self, cr, uid, ids, context=None):
if context is None:
context = {}
period_id = context.get('active_id', False)
if not period_id:
return
# TODO - should not be necessary any more
# Make sure to re-calculate alerts first. Just in case.
# self._do_recalc_alerts(cr, uid, ids, context=context)
data = self.read(cr, uid, ids[0], ['alert_critical'], context=context)
if data.get('alert_critical') != 0:
raise orm.except_orm(
_('Unable to Lock the Payroll Period'),
_('There are one or more Critical Severity Exceptions. '
'Please correct them before proceeding.')
)
wkf_service = netsvc.LocalService('workflow')
wkf_service.trg_validate(
uid, 'hr.payroll.period', period_id, 'lock_period', cr)
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'hr.payroll.period.end.1',
'type': 'ir.actions.act_window',
'target': 'inline',
'context': context
}
def _remove_register(self, cr, uid, register_id, context=None):
reg_obj = self.pool.get('hr.payroll.register')
run_obj = self.pool.get('hr.payslip.run')
slip_obj = self.pool.get('hr.payslip')
reg_data = reg_obj.read(
cr, uid, register_id, ['run_ids'], context=context)
for run_id in reg_data['run_ids']:
run_data = run_obj.read(
cr, uid, run_id, ['slip_ids'], context=context)
slip_obj.unlink(cr, uid, run_data['slip_ids'], context=context)
run_obj.unlink(cr, uid, reg_data['run_ids'], context=context)
reg_obj.unlink(cr, uid, register_id, context=context)
def unlock_period(self, cr, uid, ids, context=None):
if context is None:
context = {}
period_id = context.get('active_id', False)
if not period_id:
return
# Re-wind pay period if we are in payslip generation state
#
p_data = self.pool.get('hr.payroll.period').read(
cr, uid, period_id,
['state', 'register_id'],
context=context)
if p_data['state'] == 'generate':
self._remove_register(
cr, uid, p_data['register_id'][0], context=context)
wkf_service = netsvc.LocalService('workflow')
wkf_service.trg_validate(
uid, 'hr.payroll.period', period_id, 'unlock_period', cr)
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'hr.payroll.period.end.1',
'type': 'ir.actions.act_window',
'target': 'inline',
'context': context
}
def create_payroll_register(self, cr, uid, ids, context=None):
if context is None:
context = {}
period_id = context.get('active_id', False)
if not period_id:
return
# Get relevant data from the period object
period_obj = self.pool.get('hr.payroll.period')
p_data = period_obj.read(cr, uid, period_id,
['name', 'date_start', 'date_end',
'schedule_id', 'register_id', 'state'],
context=context)
if p_data['state'] not in ['locked', 'generate']:
raise orm.except_orm(_('Invalid Action'), _(
'You must lock the payroll period first.'))
# Remove any pre-existing payroll registers
if p_data['register_id']:
self._remove_register(cr, uid, p_data['register_id'][0], context)
# Create the payroll register
register_obj = self.pool.get('hr.payroll.register')
register_id = register_obj.create(cr, uid, {
'name': p_data['name'] + ': Register',
'date_start': p_data['date_start'],
'date_end': p_data['date_end'],
}, context=context)
# Get list of departments and list of contracts for this period's
# schedule
r_data = register_obj.read(
cr, uid, register_id, ['company_id'], context=context)
department_ids = self.pool.get('hr.department').search(
cr, uid,
[('company_id', '=', r_data['company_id'][0])],
context=context)
s_data = self.pool.get(
'hr.payroll.period.schedule').read(
cr, uid, p_data['schedule_id'][0],
['contract_ids', 'tz'], context=context)
# Create payslips for employees, in all departments,
# that have a contract in this
# pay period's schedule
self.create_payslip_runs(
cr, uid, register_id, department_ids, s_data['contract_ids'],
s_data['tz'], context=context)
# Attach payroll register to this pay period
period_obj.write(cr, uid, period_id, {
'register_id': register_id}, context=context)
# Trigger re-calculation of exact change
self._change_res['done'] = False
# Mark the pay period as being in the payroll generation stage
netsvc.LocalService('workflow').trg_validate(
uid, 'hr.payroll.period', period_id, 'generate_payslips', cr)
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'hr.payroll.period.end.1',
'type': 'ir.actions.act_window',
'target': 'inline',
'context': context
}
def create_payslip_runs(
self, cr, uid, register_id, dept_ids, contract_ids, tz, context=None
):
contract_obj = self.pool.get('hr.contract')
dept_obj = self.pool.get('hr.department')
ee_obj = self.pool.get('hr.employee')
slip_obj = self.pool.get('hr.payslip')
run_obj = self.pool.get('hr.payslip.run')
reg_obj = self.pool.get('hr.payroll.register')
pr = reg_obj.browse(cr, uid, register_id, context=context)
# DateTime in db is store as naive UTC. Convert it to explicit UTC
# and then convert
# that into the time zone of the pay period schedule.
#
local_tz = timezone(tz)
utc_tz = timezone('UTC')
utcDTStart = utc_tz.localize(
datetime.strptime(pr.date_start, '%Y-%m-%d %H:%M:%S'))
loclDTStart = utcDTStart.astimezone(local_tz)
date_start = loclDTStart.strftime('%Y-%m-%d')
utcDTEnd = utc_tz.localize(
datetime.strptime(pr.date_end, '%Y-%m-%d %H:%M:%S'))
loclDTEnd = utcDTEnd.astimezone(local_tz)
date_end = loclDTEnd.strftime('%Y-%m-%d')
# Get Pay Slip Amendments, Employee ID, and the amount of the amendment
#
psa_codes = []
psa_ids = self._get_confirmed_amendments(cr, uid, context)
for psa in self.pool.get('hr.payslip.amendment').browse(
cr, uid, psa_ids, context=context
):
psa_codes.append(
(psa.employee_id.id, psa.input_id.code, psa.amount))
# Keep track of employees that have already been included
seen_ee_ids = []
# Create payslip batch (run) for each department
#
for dept in dept_obj.browse(cr, uid, dept_ids, context=context):
ee_ids = []
contracts_dict = {}
c_ids = contract_obj.search(cr, uid, [
('id', 'in', contract_ids),
'|',
('department_id.id', '=', dept.id),
('employee_id.department_id.id', '=', dept.id)
], context=context)
c2_ids = contract_obj.search(
cr, uid, [
('id', 'in', contract_ids),
'|',
('job_id.department_id.id', '=', dept.id),
('end_job_id.department_id.id', '=', dept.id),
], context=context)
for i in c2_ids:
if i not in c_ids:
c_ids.append(i)
c_data = contract_obj.read(
cr, uid, c_ids, ['employee_id'], context=context)
for data in c_data:
if contracts_dict.get(data['employee_id'][0], False):
contracts_dict[data['employee_id'][0]].append(data['id'])
else:
contracts_dict.update(
{data['employee_id'][0]: [data['id']]})
if data['employee_id'][0] not in ee_ids:
ee_ids.append(data['employee_id'][0])
if len(ee_ids) == 0:
continue
# Alphabetize
ee_ids = ee_obj.search(
cr, uid, [('id', 'in', ee_ids)], context=context)
run_res = {
'name': dept.complete_name,
'date_start': date_start,
'date_end': date_end,
'register_id': register_id,
}
run_id = run_obj.create(cr, uid, run_res, context=context)
# Create a pay slip for each employee in each department that has
# a contract in the pay period schedule of this pay period
#
slip_ids = []
for ee in ee_obj.browse(cr, uid, ee_ids, context=context):
if ee.id in seen_ee_ids:
continue
found_contract = False
for contract in ee.contract_ids:
# Does employee have a contract in this pay period?
#
dContractStart = datetime.strptime(
contract.date_start, OEDATE_FORMAT).date()
dContractEnd = loclDTEnd.date()
if contract.date_end:
dContractEnd = datetime.strptime(
contract.date_end, OEDATE_FORMAT).date()
if (
dContractStart > loclDTEnd.date()
or dContractEnd < loclDTStart.date()
):
continue
elif contract.id in contracts_dict[ee.id]:
found_contract = contract
break
if not found_contract:
continue
# If the contract doesn't cover the full pay period use
# the contract
# dates as start/end dates instead of the full period.
#
temp_date_start = date_start
temp_date_end = date_end
if dContractStart > datetime.strptime(
date_start, OEDATE_FORMAT
).date():
temp_date_start = dContractStart.strftime(OEDATE_FORMAT)
if (
found_contract.date_end
and dContractEnd < datetime.strptime(
date_end, OEDATE_FORMAT).date()
):
temp_date_end = dContractEnd.strftime(OEDATE_FORMAT)
slip_data = slip_obj.onchange_employee_id(
cr, uid, [],
temp_date_start, temp_date_end,
ee.id, contract_id=False,
context=context)
# Make modifications to rule inputs
#
for line in slip_data['value'].get('input_line_ids', False):
# Pay Slip Amendment modifications
for eid, code, amount in psa_codes:
if eid == ee.id and line['code'] == code:
line['amount'] = amount
break
res = {
'employee_id': ee.id,
'name': slip_data['value'].get('name', False),
'struct_id': slip_data['value'].get('struct_id', False),
'contract_id': slip_data['value'].get(
'contract_id', False),
'payslip_run_id': run_id,
'input_line_ids': [
(0, 0, x) for x in slip_data['value'].get(
'input_line_ids', False)
],
'worked_days_line_ids': [
(0, 0, x)
for x
in slip_data['value'].get(
'worked_days_line_ids', False)
],
'date_from': date_start,
'date_to': date_end
}
slip_ids.append(slip_obj.create(cr, uid, res, context=context))
seen_ee_ids.append(ee.id)
# Calculate payroll for all the pay slips in this batch (run)
slip_obj.compute_sheet(cr, uid, slip_ids, context=context)
return
def view_payroll_register(self, cr, uid, ids, context=None):
if context is None:
context = {}
period_id = context.get('active_id', False)
if not period_id:
return {'type': 'ir.actions.act_window_close'}
p_data = self.pool.get(
'hr.payroll.period').read(cr, uid, period_id, ['register_id'],
context=context)
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'hr.payroll.register',
'res_id': p_data['register_id'][0],
'type': 'ir.actions.act_window',
'target': 'current',
'nodestroy': True,
'context': context,
}
def start_payments(self, cr, uid, ids, context=None):
if context is None:
context = {}
period_id = context.get('active_id', False)
if not period_id:
return {'type': 'ir.actions.act_window_close'}
# Do not continue if there are still any critical payroll exceptions
#
data = self.read(cr, uid, ids[0], ['pex_critical'], context=context)
if data.get('pex_critical') != 0:
raise orm.except_orm(
_('Unable to Start Payments'),
_('There are one or more Critical Payroll Exceptions. '
'Please correct them before proceeding.'))
p_data = self.pool.get('hr.payroll.period').read(
cr, uid, period_id,
['state', 'register_id'],
context=context)
if p_data['state'] != 'generate':
return {'type': 'ir.actions.act_window_close'}
wkf_service = netsvc.LocalService('workflow')
# Set Pay Slip Amendments to Done
#
psa_ids = self._get_confirmed_amendments(cr, uid, context)
[wkf_service.trg_validate(
uid, 'hr.payslip.amendment', psa_id, 'payslip_done', cr)
for psa_id in psa_ids]
# Verify Pay Slips
#
reg_obj = self.pool.get('hr.payroll.register')
reg_data = reg_obj.read(
cr, uid, p_data['register_id'][0], ['run_ids'], context=context)
for run_id in reg_data['run_ids']:
run_data = self.pool.get('hr.payslip.run').read(
cr, uid, run_id,
['slip_ids'], context=context)
[wkf_service.trg_validate(
uid, 'hr.payslip', slip_id, 'hr_verify_sheet', cr)
for slip_id in run_data['slip_ids']]
wkf_service.trg_validate(
uid, 'hr.payroll.period', period_id, 'start_payments', cr)
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'hr.payroll.period.end.1',
'type': 'ir.actions.act_window',
'target': 'inline',
'context': context
}
def print_payslips(self, cr, uid, ids, context=None):
if context is None:
context = {}
period_id = context.get('active_id', False)
if not period_id:
return {'type': 'ir.actions.act_window_close'}
p_data = self.pool.get(
'hr.payroll.period').read(cr, uid, period_id, ['register_id'],
context=context)
return {
'type': 'ir.actions.report.xml',
'report_name': 'hr.payroll.register.payslips',
'datas': {'ids': [p_data['register_id'][0]]},
}
def print_payroll_summary(self, cr, uid, ids, context=None):
if context is None:
context = {}
period_id = context.get('active_id', False)
if not period_id:
return {'type': 'ir.actions.act_window_close'}
p_data = self.pool.get(
'hr.payroll.period').read(cr, uid, period_id, ['register_id'],
context=context)
return {
'type': 'ir.actions.report.xml',
'report_name': 'hr.payroll.register.summary',
'datas': {'ids': [p_data['register_id'][0]]},
}
def print_payroll_register(self, cr, uid, ids, context=None):
if context is None:
context = {}
period_id = context.get('active_id', False)
if not period_id:
return {'type': 'ir.actions.act_window_close'}
p_data = self.pool.get(
'hr.payroll.period').read(cr, uid, period_id, ['register_id'],
context=context)
return {
'type': 'ir.actions.report.xml',
'report_name': 'hr_payroll_register_report',
'datas': {'ids': [p_data['register_id'][0]]},
}
def print_payslip_details(self, cr, uid, ids, context=None):
if context is None:
context = {}
period_id = context.get('active_id', False)
if not period_id:
return {'type': 'ir.actions.act_window_close'}
p_data = self.pool.get(
'hr.payroll.period').read(cr, uid, period_id, ['register_id'],
context=context)
register = self.pool.get(
'hr.payroll.register').browse(cr, uid, p_data['register_id'][0],
context=context)
slip_ids = []
for run in register.run_ids:
[slip_ids.append(s.id) for s in run.slip_ids]
return {
'type': 'ir.actions.report.xml',
'report_name': 'payslip',
'datas': {'ids': slip_ids},
}
def print_contribution_registers(self, cr, uid, ids, context=None):
if context is None:
context = {}
period_id = context.get('active_id', False)
if not period_id:
return {'type': 'ir.actions.act_window_close'}
data = self.pool.get(
'hr.payroll.period').read(
cr, uid, period_id, ['date_start', 'date_end'],
context=context)
register_ids = self.pool.get('hr.contribution.register').search(
cr, uid, [], context=context)
form = {'date_from': data['date_start'],
'date_to': data['date_end'], }
return {
'type': 'ir.actions.report.xml',
'report_name': 'contribution.register.lines',
'datas': {
'ids': register_ids, 'form': form,
'model': 'hr.contribution.register'},
}
def close_pay_period(self, cr, uid, ids, context=None):
if context is None:
context = {}
period_id = context.get('active_id', False)
if not period_id:
return {'type': 'ir.actions.act_window_close'}
p_data = self.pool.get('hr.payroll.period').read(cr, uid, period_id,
['state'],
context=context)
if p_data['state'] != 'payment':
return {'type': 'ir.actions.act_window_close'}
wkf_service = netsvc.LocalService('workflow')
wkf_service.trg_validate(
uid, 'hr.payroll.period', period_id, 'close_period', cr)
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'hr.payroll.period',
'type': 'ir.actions.act_window',
'target': 'current',
'context': context
}
| agpl-3.0 |
c2theg/DDoS_Infomation_Sharing | libraries/suds-jurko-0.6/suds/plugin.py | 8 | 7128 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The plugin module provides classes for implementation
of suds plugins.
"""
from suds import *
from logging import getLogger
log = getLogger(__name__)
class Context(object):
"""
Plugin context.
"""
pass
class InitContext(Context):
"""
Init Context.
@ivar wsdl: The wsdl.
@type wsdl: L{wsdl.Definitions}
"""
pass
class DocumentContext(Context):
"""
The XML document load context.
@ivar url: The URL.
@type url: str
@ivar document: Either the XML text or the B{parsed} document root.
@type document: (str|L{sax.element.Element})
"""
pass
class MessageContext(Context):
"""
The context for sending the SOAP envelope.
@ivar envelope: The SOAP envelope to be sent.
@type envelope: (str|L{sax.element.Element})
@ivar reply: The reply.
@type reply: (str|L{sax.element.Element}|object)
"""
pass
class Plugin:
"""
Plugin base.
"""
pass
class InitPlugin(Plugin):
"""
The base class for suds I{init} plugins.
"""
def initialized(self, context):
"""
Suds client initialization.
Called after wsdl the has been loaded. Provides the plugin
with the opportunity to inspect/modify the WSDL.
@param context: The init context.
@type context: L{InitContext}
"""
pass
class DocumentPlugin(Plugin):
"""
The base class for suds I{document} plugins.
"""
def loaded(self, context):
"""
Suds has loaded a WSDL/XSD document. Provides the plugin
with an opportunity to inspect/modify the unparsed document.
Called after each WSDL/XSD document is loaded.
@param context: The document context.
@type context: L{DocumentContext}
"""
pass
def parsed(self, context):
"""
Suds has parsed a WSDL/XSD document. Provides the plugin
with an opportunity to inspect/modify the parsed document.
Called after each WSDL/XSD document is parsed.
@param context: The document context.
@type context: L{DocumentContext}
"""
pass
class MessagePlugin(Plugin):
"""
The base class for suds I{SOAP message} plugins.
"""
def marshalled(self, context):
"""
Suds will send the specified soap envelope.
Provides the plugin with the opportunity to inspect/modify
the envelope Document before it is sent.
@param context: The send context.
The I{envelope} is the envelope document.
@type context: L{MessageContext}
"""
pass
def sending(self, context):
"""
Suds will send the specified SOAP envelope.
Provides the plugin with the opportunity to inspect/modify
the message text it is sent.
@param context: The send context.
The I{envelope} is the envelope text.
@type context: L{MessageContext}
"""
pass
def received(self, context):
"""
Suds has received the specified reply.
Provides the plugin with the opportunity to inspect/modify
the received XML text before it is SAX parsed.
@param context: The reply context.
The I{reply} is the raw text.
@type context: L{MessageContext}
"""
pass
def parsed(self, context):
"""
Suds has sax parsed the received reply.
Provides the plugin with the opportunity to inspect/modify
the sax parsed DOM tree for the reply before it is unmarshalled.
@param context: The reply context.
The I{reply} is DOM tree.
@type context: L{MessageContext}
"""
pass
def unmarshalled(self, context):
"""
Suds has unmarshalled the received reply.
Provides the plugin with the opportunity to inspect/modify
the unmarshalled reply object before it is returned.
@param context: The reply context.
The I{reply} is unmarshalled suds object.
@type context: L{MessageContext}
"""
pass
class PluginContainer:
"""
Plugin container provides easy method invocation.
@ivar plugins: A list of plugin objects.
@type plugins: [L{Plugin},]
@cvar ctxclass: A dict of plugin method / context classes.
@type ctxclass: dict
"""
domains = {\
'init': (InitContext, InitPlugin),
'document': (DocumentContext, DocumentPlugin),
'message': (MessageContext, MessagePlugin ),
}
def __init__(self, plugins):
"""
@param plugins: A list of plugin objects.
@type plugins: [L{Plugin},]
"""
self.plugins = plugins
def __getattr__(self, name):
domain = self.domains.get(name)
if domain:
plugins = []
ctx, pclass = domain
for p in self.plugins:
if isinstance(p, pclass):
plugins.append(p)
return PluginDomain(ctx, plugins)
else:
raise Exception, 'plugin domain (%s), invalid' % name
class PluginDomain:
"""
The plugin domain.
@ivar ctx: A context.
@type ctx: L{Context}
@ivar plugins: A list of plugins (targets).
@type plugins: list
"""
def __init__(self, ctx, plugins):
self.ctx = ctx
self.plugins = plugins
def __getattr__(self, name):
return Method(name, self)
class Method:
"""
Plugin method.
@ivar name: The method name.
@type name: str
@ivar domain: The plugin domain.
@type domain: L{PluginDomain}
"""
def __init__(self, name, domain):
"""
@param name: The method name.
@type name: str
@param domain: A plugin domain.
@type domain: L{PluginDomain}
"""
self.name = name
self.domain = domain
def __call__(self, **kwargs):
ctx = self.domain.ctx()
ctx.__dict__.update(kwargs)
for plugin in self.domain.plugins:
try:
method = getattr(plugin, self.name, None)
if method and callable(method):
method(ctx)
except Exception, pe:
log.exception(pe)
return ctx
| mit |
Jgarcia-IAS/localizacion | openerp/osv/orm.py | 126 | 6167 | import simplejson
from lxml import etree
from ..exceptions import except_orm
from ..models import (
MetaModel,
BaseModel,
Model, TransientModel, AbstractModel,
MAGIC_COLUMNS,
LOG_ACCESS_COLUMNS,
)
# extra definitions for backward compatibility
browse_record_list = BaseModel
class browse_record(object):
""" Pseudo-class for testing record instances """
class __metaclass__(type):
def __instancecheck__(self, inst):
return isinstance(inst, BaseModel) and len(inst) <= 1
class browse_null(object):
""" Pseudo-class for testing null instances """
class __metaclass__(type):
def __instancecheck__(self, inst):
return isinstance(inst, BaseModel) and not inst
def transfer_field_to_modifiers(field, modifiers):
default_values = {}
state_exceptions = {}
for attr in ('invisible', 'readonly', 'required'):
state_exceptions[attr] = []
default_values[attr] = bool(field.get(attr))
for state, modifs in (field.get("states",{})).items():
for modif in modifs:
if default_values[modif[0]] != modif[1]:
state_exceptions[modif[0]].append(state)
for attr, default_value in default_values.items():
if state_exceptions[attr]:
modifiers[attr] = [("state", "not in" if default_value else "in", state_exceptions[attr])]
else:
modifiers[attr] = default_value
# Don't deal with groups, it is done by check_group().
# Need the context to evaluate the invisible attribute on tree views.
# For non-tree views, the context shouldn't be given.
def transfer_node_to_modifiers(node, modifiers, context=None, in_tree_view=False):
if node.get('attrs'):
modifiers.update(eval(node.get('attrs')))
if node.get('states'):
if 'invisible' in modifiers and isinstance(modifiers['invisible'], list):
# TODO combine with AND or OR, use implicit AND for now.
modifiers['invisible'].append(('state', 'not in', node.get('states').split(',')))
else:
modifiers['invisible'] = [('state', 'not in', node.get('states').split(','))]
for a in ('invisible', 'readonly', 'required'):
if node.get(a):
v = bool(eval(node.get(a), {'context': context or {}}))
if in_tree_view and a == 'invisible':
# Invisible in a tree view has a specific meaning, make it a
# new key in the modifiers attribute.
modifiers['tree_invisible'] = v
elif v or (a not in modifiers or not isinstance(modifiers[a], list)):
# Don't set the attribute to False if a dynamic value was
# provided (i.e. a domain from attrs or states).
modifiers[a] = v
def simplify_modifiers(modifiers):
for a in ('invisible', 'readonly', 'required'):
if a in modifiers and not modifiers[a]:
del modifiers[a]
def transfer_modifiers_to_node(modifiers, node):
if modifiers:
simplify_modifiers(modifiers)
node.set('modifiers', simplejson.dumps(modifiers))
def setup_modifiers(node, field=None, context=None, in_tree_view=False):
""" Processes node attributes and field descriptors to generate
the ``modifiers`` node attribute and set it on the provided node.
Alters its first argument in-place.
:param node: ``field`` node from an OpenERP view
:type node: lxml.etree._Element
:param dict field: field descriptor corresponding to the provided node
:param dict context: execution context used to evaluate node attributes
:param bool in_tree_view: triggers the ``tree_invisible`` code
path (separate from ``invisible``): in
tree view there are two levels of
invisibility, cell content (a column is
present but the cell itself is not
displayed) with ``invisible`` and column
invisibility (the whole column is
hidden) with ``tree_invisible``.
:returns: nothing
"""
modifiers = {}
if field is not None:
transfer_field_to_modifiers(field, modifiers)
transfer_node_to_modifiers(
node, modifiers, context=context, in_tree_view=in_tree_view)
transfer_modifiers_to_node(modifiers, node)
def test_modifiers(what, expected):
modifiers = {}
if isinstance(what, basestring):
node = etree.fromstring(what)
transfer_node_to_modifiers(node, modifiers)
simplify_modifiers(modifiers)
json = simplejson.dumps(modifiers)
assert json == expected, "%s != %s" % (json, expected)
elif isinstance(what, dict):
transfer_field_to_modifiers(what, modifiers)
simplify_modifiers(modifiers)
json = simplejson.dumps(modifiers)
assert json == expected, "%s != %s" % (json, expected)
# To use this test:
# import openerp
# openerp.osv.orm.modifiers_tests()
def modifiers_tests():
test_modifiers('<field name="a"/>', '{}')
test_modifiers('<field name="a" invisible="1"/>', '{"invisible": true}')
test_modifiers('<field name="a" readonly="1"/>', '{"readonly": true}')
test_modifiers('<field name="a" required="1"/>', '{"required": true}')
test_modifiers('<field name="a" invisible="0"/>', '{}')
test_modifiers('<field name="a" readonly="0"/>', '{}')
test_modifiers('<field name="a" required="0"/>', '{}')
test_modifiers('<field name="a" invisible="1" required="1"/>', '{"invisible": true, "required": true}') # TODO order is not guaranteed
test_modifiers('<field name="a" invisible="1" required="0"/>', '{"invisible": true}')
test_modifiers('<field name="a" invisible="0" required="1"/>', '{"required": true}')
test_modifiers("""<field name="a" attrs="{'invisible': [('b', '=', 'c')]}"/>""", '{"invisible": [["b", "=", "c"]]}')
# The dictionary is supposed to be the result of fields_get().
test_modifiers({}, '{}')
test_modifiers({"invisible": True}, '{"invisible": true}')
test_modifiers({"invisible": False}, '{}')
| agpl-3.0 |
klooer/rvi_backend | web/sota/migrations/0009_auto_20141011_0231.py | 8 | 1126 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import sota.models
class Migration(migrations.Migration):
dependencies = [
('sota', '0008_auto_20141010_1725'),
]
operations = [
migrations.RemoveField(
model_name='update',
name='upd_current',
),
migrations.AlterField(
model_name='retry',
name='ret_deadline',
field=models.DateField(verbose_name=b'Update Deadline'),
),
migrations.AlterField(
model_name='update',
name='upd_retries',
field=models.IntegerField(default=b'0', verbose_name=b'Maximum Retries', validators=[sota.models.validate_upd_retries]),
),
migrations.AlterField(
model_name='update',
name='upd_status',
field=models.CharField(default=b'PE', max_length=2, verbose_name=b'Update Status', choices=[(b'PE', b'Pending'), (b'ST', b'Started'), (b'RU', b'Running'), (b'AB', b'Aborted'), (b'SU', b'Success'), (b'FA', b'Failed')]),
),
]
| mpl-2.0 |
rhndg/openedx | common/test/acceptance/tests/studio/test_studio_settings_certificates.py | 20 | 8185 | """
Acceptance tests for Studio's Setting pages
"""
from .base_studio_test import StudioCourseTest
from ...pages.studio.settings_certificates import CertificatesPage
class CertificatesTest(StudioCourseTest):
"""
Tests for settings/certificates Page.
"""
def setUp(self, is_staff=False):
super(CertificatesTest, self).setUp(is_staff)
self.certificates_page = CertificatesPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
def make_signatory_data(self, prefix='First'):
"""
Makes signatory dict which can be used in the tests to create certificates
"""
return {
'name': '{prefix} Signatory Name'.format(prefix=prefix),
'title': '{prefix} Signatory Title'.format(prefix=prefix),
'organization': '{prefix} Signatory Organization'.format(prefix=prefix),
}
def create_and_verify_certificate(self, course_title_override, existing_certs, signatories):
"""
Creates a new certificate and verifies that it was properly created.
"""
self.assertEqual(existing_certs, len(self.certificates_page.certificates))
if existing_certs == 0:
self.certificates_page.wait_for_first_certificate_button()
self.certificates_page.click_first_certificate_button()
else:
self.certificates_page.wait_for_add_certificate_button()
self.certificates_page.click_add_certificate_button()
certificate = self.certificates_page.certificates[existing_certs]
# Set the certificate properties
certificate.course_title = course_title_override
# add signatories
added_signatories = 0
for idx, signatory in enumerate(signatories):
certificate.signatories[idx].name = signatory['name']
certificate.signatories[idx].title = signatory['title']
certificate.signatories[idx].organization = signatory['organization']
certificate.signatories[idx].upload_signature_image('Signature-{}.png'.format(idx))
added_signatories += 1
if len(signatories) > added_signatories:
certificate.click_add_signatory_button()
# Save the certificate
self.assertEqual(certificate.get_text('.action-primary'), "Create")
certificate.click_create_certificate_button()
self.assertIn(course_title_override, certificate.course_title)
return certificate
def test_no_certificates_by_default(self):
"""
Scenario: Ensure that message telling me to create a new certificate is
shown when no certificate exist.
Given I have a course without certificates
When I go to the Certificates page in Studio
Then I see "You have not created any certificates yet." message
"""
self.certificates_page.visit()
self.assertTrue(self.certificates_page.no_certificates_message_shown)
self.assertIn(
"You have not created any certificates yet.",
self.certificates_page.no_certificates_message_text
)
def test_can_create_and_edit_certficate(self):
"""
Scenario: Ensure that the certificates can be created and edited correctly.
Given I have a course without certificates
When I click button 'Add your first Certificate'
And I set new the course title override and signatory and click the button 'Create'
Then I see the new certificate is added and has correct data
When I edit the certificate
And I change the name and click the button 'Save'
Then I see the certificate is saved successfully and has the new name
"""
self.certificates_page.visit()
self.certificates_page.wait_for_first_certificate_button()
certificate = self.create_and_verify_certificate(
"Course Title Override",
0,
[self.make_signatory_data('first'), self.make_signatory_data('second')]
)
# Edit the certificate
certificate.click_edit_certificate_button()
certificate.course_title = "Updated Course Title Override 2"
self.assertEqual(certificate.get_text('.action-primary'), "Save")
certificate.click_save_certificate_button()
self.assertIn("Updated Course Title Override 2", certificate.course_title)
def test_can_delete_certificate(self):
"""
Scenario: Ensure that the user can delete certificate.
Given I have a course with 1 certificate
And I go to the Certificates page
When I delete the Certificate with name "New Certificate"
Then I see that there is no certificate
When I refresh the page
Then I see that the certificate has been deleted
"""
self.certificates_page.visit()
certificate = self.create_and_verify_certificate(
"Course Title Override",
0,
[self.make_signatory_data('first'), self.make_signatory_data('second')]
)
certificate.wait_for_certificate_delete_button()
self.assertEqual(len(self.certificates_page.certificates), 1)
# Delete certificate
certificate.delete_certificate()
self.certificates_page.visit()
self.assertEqual(len(self.certificates_page.certificates), 0)
def test_can_create_and_edit_signatories_of_certficate(self):
"""
Scenario: Ensure that the certificates can be created with signatories and edited correctly.
Given I have a course without certificates
When I click button 'Add your first Certificate'
And I set new the course title override and signatory and click the button 'Create'
Then I see the new certificate is added and has one signatory inside it
When I click 'Edit' button of signatory panel
And I set the name and click the button 'Save' icon
Then I see the signatory name updated with newly set name
When I refresh the certificates page
Then I can see course has one certificate with new signatory name
When I click 'Edit' button of signatory panel
And click on 'Close' button
Then I can see no change in signatory detail
"""
self.certificates_page.visit()
certificate = self.create_and_verify_certificate(
"Course Title Override",
0,
[self.make_signatory_data('first')]
)
self.assertEqual(len(self.certificates_page.certificates), 1)
# Edit the signatory in certificate
signatory = certificate.signatories[0]
signatory.edit()
signatory.name = 'Updated signatory name'
signatory.title = 'Update signatory title'
signatory.organization = 'Updated signatory organization'
signatory.save()
self.assertEqual(len(self.certificates_page.certificates), 1)
signatory = self.certificates_page.certificates[0].signatories[0]
self.assertIn("Updated signatory name", signatory.name)
self.assertIn("Update signatory title", signatory.title)
self.assertIn("Updated signatory organization", signatory.organization)
signatory.edit()
signatory.close()
self.assertIn("Updated signatory name", signatory.name)
def test_can_cancel_creation_of_certificate(self):
"""
Scenario: Ensure that creation of a certificate can be canceled correctly.
Given I have a course without certificates
When I click button 'Add your first Certificate'
And I set name of certificate and click the button 'Cancel'
Then I see that there is no certificates in the course
"""
self.certificates_page.visit()
self.certificates_page.click_first_certificate_button()
certificate = self.certificates_page.certificates[0]
certificate.course_title = "Title Override"
certificate.click_cancel_edit_certificate()
self.assertEqual(len(self.certificates_page.certificates), 0)
| agpl-3.0 |
EMATech/python-for-android | src/buildlib/jinja2.egg/jinja2/filters.py | 17 | 29827 | # -*- coding: utf-8 -*-
"""
jinja2.filters
~~~~~~~~~~~~~~
Bundled jinja filters.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
import math
from random import choice
from operator import itemgetter
from itertools import groupby
from jinja2.utils import Markup, escape, pformat, urlize, soft_unicode, \
unicode_urlencode
from jinja2.runtime import Undefined
from jinja2.exceptions import FilterArgumentError
from jinja2._compat import next, imap, string_types, text_type, iteritems
_word_re = re.compile(r'\w+(?u)')
def contextfilter(f):
"""Decorator for marking context dependent filters. The current
:class:`Context` will be passed as first argument.
"""
f.contextfilter = True
return f
def evalcontextfilter(f):
"""Decorator for marking eval-context dependent filters. An eval
context object is passed as first argument. For more information
about the eval context, see :ref:`eval-context`.
.. versionadded:: 2.4
"""
f.evalcontextfilter = True
return f
def environmentfilter(f):
"""Decorator for marking evironment dependent filters. The current
:class:`Environment` is passed to the filter as first argument.
"""
f.environmentfilter = True
return f
def make_attrgetter(environment, attribute):
"""Returns a callable that looks up the given attribute from a
passed object with the rules of the environment. Dots are allowed
to access attributes of attributes. Integer parts in paths are
looked up as integers.
"""
if not isinstance(attribute, string_types) \
or ('.' not in attribute and not attribute.isdigit()):
return lambda x: environment.getitem(x, attribute)
attribute = attribute.split('.')
def attrgetter(item):
for part in attribute:
if part.isdigit():
part = int(part)
item = environment.getitem(item, part)
return item
return attrgetter
def do_forceescape(value):
"""Enforce HTML escaping. This will probably double escape variables."""
if hasattr(value, '__html__'):
value = value.__html__()
return escape(text_type(value))
def do_urlencode(value):
"""Escape strings for use in URLs (uses UTF-8 encoding). It accepts both
dictionaries and regular strings as well as pairwise iterables.
.. versionadded:: 2.7
"""
itemiter = None
if isinstance(value, dict):
itemiter = iteritems(value)
elif not isinstance(value, string_types):
try:
itemiter = iter(value)
except TypeError:
pass
if itemiter is None:
return unicode_urlencode(value)
return '&'.join(unicode_urlencode(k) + '=' +
unicode_urlencode(v) for k, v in itemiter)
@evalcontextfilter
def do_replace(eval_ctx, s, old, new, count=None):
"""Return a copy of the value with all occurrences of a substring
replaced with a new one. The first argument is the substring
that should be replaced, the second is the replacement string.
If the optional third argument ``count`` is given, only the first
``count`` occurrences are replaced:
.. sourcecode:: jinja
{{ "Hello World"|replace("Hello", "Goodbye") }}
-> Goodbye World
{{ "aaaaargh"|replace("a", "d'oh, ", 2) }}
-> d'oh, d'oh, aaargh
"""
if count is None:
count = -1
if not eval_ctx.autoescape:
return text_type(s).replace(text_type(old), text_type(new), count)
if hasattr(old, '__html__') or hasattr(new, '__html__') and \
not hasattr(s, '__html__'):
s = escape(s)
else:
s = soft_unicode(s)
return s.replace(soft_unicode(old), soft_unicode(new), count)
def do_upper(s):
"""Convert a value to uppercase."""
return soft_unicode(s).upper()
def do_lower(s):
"""Convert a value to lowercase."""
return soft_unicode(s).lower()
@evalcontextfilter
def do_xmlattr(_eval_ctx, d, autospace=True):
"""Create an SGML/XML attribute string based on the items in a dict.
All values that are neither `none` nor `undefined` are automatically
escaped:
.. sourcecode:: html+jinja
<ul{{ {'class': 'my_list', 'missing': none,
'id': 'list-%d'|format(variable)}|xmlattr }}>
...
</ul>
Results in something like this:
.. sourcecode:: html
<ul class="my_list" id="list-42">
...
</ul>
As you can see it automatically prepends a space in front of the item
if the filter returned something unless the second parameter is false.
"""
rv = ' '.join(
'%s="%s"' % (escape(key), escape(value))
for key, value in iteritems(d)
if value is not None and not isinstance(value, Undefined)
)
if autospace and rv:
rv = ' ' + rv
if _eval_ctx.autoescape:
rv = Markup(rv)
return rv
def do_capitalize(s):
"""Capitalize a value. The first character will be uppercase, all others
lowercase.
"""
return soft_unicode(s).capitalize()
def do_title(s):
"""Return a titlecased version of the value. I.e. words will start with
uppercase letters, all remaining characters are lowercase.
"""
rv = []
for item in re.compile(r'([-\s]+)(?u)').split(s):
if not item:
continue
rv.append(item[0].upper() + item[1:].lower())
return ''.join(rv)
def do_dictsort(value, case_sensitive=False, by='key'):
"""Sort a dict and yield (key, value) pairs. Because python dicts are
unsorted you may want to use this function to order them by either
key or value:
.. sourcecode:: jinja
{% for item in mydict|dictsort %}
sort the dict by key, case insensitive
{% for item in mydict|dictsort(true) %}
sort the dict by key, case sensitive
{% for item in mydict|dictsort(false, 'value') %}
sort the dict by key, case insensitive, sorted
normally and ordered by value.
"""
if by == 'key':
pos = 0
elif by == 'value':
pos = 1
else:
raise FilterArgumentError('You can only sort by either '
'"key" or "value"')
def sort_func(item):
value = item[pos]
if isinstance(value, string_types) and not case_sensitive:
value = value.lower()
return value
return sorted(value.items(), key=sort_func)
@environmentfilter
def do_sort(environment, value, reverse=False, case_sensitive=False,
attribute=None):
"""Sort an iterable. Per default it sorts ascending, if you pass it
true as first argument it will reverse the sorting.
If the iterable is made of strings the third parameter can be used to
control the case sensitiveness of the comparison which is disabled by
default.
.. sourcecode:: jinja
{% for item in iterable|sort %}
...
{% endfor %}
It is also possible to sort by an attribute (for example to sort
by the date of an object) by specifying the `attribute` parameter:
.. sourcecode:: jinja
{% for item in iterable|sort(attribute='date') %}
...
{% endfor %}
.. versionchanged:: 2.6
The `attribute` parameter was added.
"""
if not case_sensitive:
def sort_func(item):
if isinstance(item, string_types):
item = item.lower()
return item
else:
sort_func = None
if attribute is not None:
getter = make_attrgetter(environment, attribute)
def sort_func(item, processor=sort_func or (lambda x: x)):
return processor(getter(item))
return sorted(value, key=sort_func, reverse=reverse)
def do_default(value, default_value='', boolean=False):
"""If the value is undefined it will return the passed default value,
otherwise the value of the variable:
.. sourcecode:: jinja
{{ my_variable|default('my_variable is not defined') }}
This will output the value of ``my_variable`` if the variable was
defined, otherwise ``'my_variable is not defined'``. If you want
to use default with variables that evaluate to false you have to
set the second parameter to `true`:
.. sourcecode:: jinja
{{ ''|default('the string was empty', true) }}
"""
if isinstance(value, Undefined) or (boolean and not value):
return default_value
return value
@evalcontextfilter
def do_join(eval_ctx, value, d='', attribute=None):
"""Return a string which is the concatenation of the strings in the
sequence. The separator between elements is an empty string per
default, you can define it with the optional parameter:
.. sourcecode:: jinja
{{ [1, 2, 3]|join('|') }}
-> 1|2|3
{{ [1, 2, 3]|join }}
-> 123
It is also possible to join certain attributes of an object:
.. sourcecode:: jinja
{{ users|join(', ', attribute='username') }}
.. versionadded:: 2.6
The `attribute` parameter was added.
"""
if attribute is not None:
value = imap(make_attrgetter(eval_ctx.environment, attribute), value)
# no automatic escaping? joining is a lot eaiser then
if not eval_ctx.autoescape:
return text_type(d).join(imap(text_type, value))
# if the delimiter doesn't have an html representation we check
# if any of the items has. If yes we do a coercion to Markup
if not hasattr(d, '__html__'):
value = list(value)
do_escape = False
for idx, item in enumerate(value):
if hasattr(item, '__html__'):
do_escape = True
else:
value[idx] = text_type(item)
if do_escape:
d = escape(d)
else:
d = text_type(d)
return d.join(value)
# no html involved, to normal joining
return soft_unicode(d).join(imap(soft_unicode, value))
def do_center(value, width=80):
"""Centers the value in a field of a given width."""
return text_type(value).center(width)
@environmentfilter
def do_first(environment, seq):
"""Return the first item of a sequence."""
try:
return next(iter(seq))
except StopIteration:
return environment.undefined('No first item, sequence was empty.')
@environmentfilter
def do_last(environment, seq):
"""Return the last item of a sequence."""
try:
return next(iter(reversed(seq)))
except StopIteration:
return environment.undefined('No last item, sequence was empty.')
@environmentfilter
def do_random(environment, seq):
"""Return a random item from the sequence."""
try:
return choice(seq)
except IndexError:
return environment.undefined('No random item, sequence was empty.')
def do_filesizeformat(value, binary=False):
"""Format the value like a 'human-readable' file size (i.e. 13 kB,
4.1 MB, 102 Bytes, etc). Per default decimal prefixes are used (Mega,
Giga, etc.), if the second parameter is set to `True` the binary
prefixes are used (Mebi, Gibi).
"""
bytes = float(value)
base = binary and 1024 or 1000
prefixes = [
(binary and 'KiB' or 'kB'),
(binary and 'MiB' or 'MB'),
(binary and 'GiB' or 'GB'),
(binary and 'TiB' or 'TB'),
(binary and 'PiB' or 'PB'),
(binary and 'EiB' or 'EB'),
(binary and 'ZiB' or 'ZB'),
(binary and 'YiB' or 'YB')
]
if bytes == 1:
return '1 Byte'
elif bytes < base:
return '%d Bytes' % bytes
else:
for i, prefix in enumerate(prefixes):
unit = base ** (i + 2)
if bytes < unit:
return '%.1f %s' % ((base * bytes / unit), prefix)
return '%.1f %s' % ((base * bytes / unit), prefix)
def do_pprint(value, verbose=False):
"""Pretty print a variable. Useful for debugging.
With Jinja 1.2 onwards you can pass it a parameter. If this parameter
is truthy the output will be more verbose (this requires `pretty`)
"""
return pformat(value, verbose=verbose)
@evalcontextfilter
def do_urlize(eval_ctx, value, trim_url_limit=None, nofollow=False):
"""Converts URLs in plain text into clickable links.
If you pass the filter an additional integer it will shorten the urls
to that number. Also a third argument exists that makes the urls
"nofollow":
.. sourcecode:: jinja
{{ mytext|urlize(40, true) }}
links are shortened to 40 chars and defined with rel="nofollow"
"""
rv = urlize(value, trim_url_limit, nofollow)
if eval_ctx.autoescape:
rv = Markup(rv)
return rv
def do_indent(s, width=4, indentfirst=False):
"""Return a copy of the passed string, each line indented by
4 spaces. The first line is not indented. If you want to
change the number of spaces or indent the first line too
you can pass additional parameters to the filter:
.. sourcecode:: jinja
{{ mytext|indent(2, true) }}
indent by two spaces and indent the first line too.
"""
indention = ' ' * width
rv = ('\n' + indention).join(s.splitlines())
if indentfirst:
rv = indention + rv
return rv
def do_truncate(s, length=255, killwords=False, end='...'):
"""Return a truncated copy of the string. The length is specified
with the first parameter which defaults to ``255``. If the second
parameter is ``true`` the filter will cut the text at length. Otherwise
it will discard the last word. If the text was in fact
truncated it will append an ellipsis sign (``"..."``). If you want a
different ellipsis sign than ``"..."`` you can specify it using the
third parameter.
.. sourcecode:: jinja
{{ "foo bar"|truncate(5) }}
-> "foo ..."
{{ "foo bar"|truncate(5, True) }}
-> "foo b..."
"""
if len(s) <= length:
return s
elif killwords:
return s[:length] + end
words = s.split(' ')
result = []
m = 0
for word in words:
m += len(word) + 1
if m > length:
break
result.append(word)
result.append(end)
return ' '.join(result)
@environmentfilter
def do_wordwrap(environment, s, width=79, break_long_words=True,
wrapstring=None):
"""
Return a copy of the string passed to the filter wrapped after
``79`` characters. You can override this default using the first
parameter. If you set the second parameter to `false` Jinja will not
split words apart if they are longer than `width`. By default, the newlines
will be the default newlines for the environment, but this can be changed
using the wrapstring keyword argument.
.. versionadded:: 2.7
Added support for the `wrapstring` parameter.
"""
if not wrapstring:
wrapstring = environment.newline_sequence
import textwrap
return wrapstring.join(textwrap.wrap(s, width=width, expand_tabs=False,
replace_whitespace=False,
break_long_words=break_long_words))
def do_wordcount(s):
"""Count the words in that string."""
return len(_word_re.findall(s))
def do_int(value, default=0):
"""Convert the value into an integer. If the
conversion doesn't work it will return ``0``. You can
override this default using the first parameter.
"""
try:
return int(value)
except (TypeError, ValueError):
# this quirk is necessary so that "42.23"|int gives 42.
try:
return int(float(value))
except (TypeError, ValueError):
return default
def do_float(value, default=0.0):
"""Convert the value into a floating point number. If the
conversion doesn't work it will return ``0.0``. You can
override this default using the first parameter.
"""
try:
return float(value)
except (TypeError, ValueError):
return default
def do_format(value, *args, **kwargs):
"""
Apply python string formatting on an object:
.. sourcecode:: jinja
{{ "%s - %s"|format("Hello?", "Foo!") }}
-> Hello? - Foo!
"""
if args and kwargs:
raise FilterArgumentError('can\'t handle positional and keyword '
'arguments at the same time')
return soft_unicode(value) % (kwargs or args)
def do_trim(value):
"""Strip leading and trailing whitespace."""
return soft_unicode(value).strip()
def do_striptags(value):
"""Strip SGML/XML tags and replace adjacent whitespace by one space.
"""
if hasattr(value, '__html__'):
value = value.__html__()
return Markup(text_type(value)).striptags()
def do_slice(value, slices, fill_with=None):
"""Slice an iterator and return a list of lists containing
those items. Useful if you want to create a div containing
three ul tags that represent columns:
.. sourcecode:: html+jinja
<div class="columwrapper">
{%- for column in items|slice(3) %}
<ul class="column-{{ loop.index }}">
{%- for item in column %}
<li>{{ item }}</li>
{%- endfor %}
</ul>
{%- endfor %}
</div>
If you pass it a second argument it's used to fill missing
values on the last iteration.
"""
seq = list(value)
length = len(seq)
items_per_slice = length // slices
slices_with_extra = length % slices
offset = 0
for slice_number in range(slices):
start = offset + slice_number * items_per_slice
if slice_number < slices_with_extra:
offset += 1
end = offset + (slice_number + 1) * items_per_slice
tmp = seq[start:end]
if fill_with is not None and slice_number >= slices_with_extra:
tmp.append(fill_with)
yield tmp
def do_batch(value, linecount, fill_with=None):
"""
A filter that batches items. It works pretty much like `slice`
just the other way round. It returns a list of lists with the
given number of items. If you provide a second parameter this
is used to fill up missing items. See this example:
.. sourcecode:: html+jinja
<table>
{%- for row in items|batch(3, ' ') %}
<tr>
{%- for column in row %}
<td>{{ column }}</td>
{%- endfor %}
</tr>
{%- endfor %}
</table>
"""
result = []
tmp = []
for item in value:
if len(tmp) == linecount:
yield tmp
tmp = []
tmp.append(item)
if tmp:
if fill_with is not None and len(tmp) < linecount:
tmp += [fill_with] * (linecount - len(tmp))
yield tmp
def do_round(value, precision=0, method='common'):
"""Round the number to a given precision. The first
parameter specifies the precision (default is ``0``), the
second the rounding method:
- ``'common'`` rounds either up or down
- ``'ceil'`` always rounds up
- ``'floor'`` always rounds down
If you don't specify a method ``'common'`` is used.
.. sourcecode:: jinja
{{ 42.55|round }}
-> 43.0
{{ 42.55|round(1, 'floor') }}
-> 42.5
Note that even if rounded to 0 precision, a float is returned. If
you need a real integer, pipe it through `int`:
.. sourcecode:: jinja
{{ 42.55|round|int }}
-> 43
"""
if not method in ('common', 'ceil', 'floor'):
raise FilterArgumentError('method must be common, ceil or floor')
if method == 'common':
return round(value, precision)
func = getattr(math, method)
return func(value * (10 ** precision)) / (10 ** precision)
@environmentfilter
def do_groupby(environment, value, attribute):
"""Group a sequence of objects by a common attribute.
If you for example have a list of dicts or objects that represent persons
with `gender`, `first_name` and `last_name` attributes and you want to
group all users by genders you can do something like the following
snippet:
.. sourcecode:: html+jinja
<ul>
{% for group in persons|groupby('gender') %}
<li>{{ group.grouper }}<ul>
{% for person in group.list %}
<li>{{ person.first_name }} {{ person.last_name }}</li>
{% endfor %}</ul></li>
{% endfor %}
</ul>
Additionally it's possible to use tuple unpacking for the grouper and
list:
.. sourcecode:: html+jinja
<ul>
{% for grouper, list in persons|groupby('gender') %}
...
{% endfor %}
</ul>
As you can see the item we're grouping by is stored in the `grouper`
attribute and the `list` contains all the objects that have this grouper
in common.
.. versionchanged:: 2.6
It's now possible to use dotted notation to group by the child
attribute of another attribute.
"""
expr = make_attrgetter(environment, attribute)
return sorted(map(_GroupTuple, groupby(sorted(value, key=expr), expr)))
class _GroupTuple(tuple):
__slots__ = ()
grouper = property(itemgetter(0))
list = property(itemgetter(1))
def __new__(cls, xxx_todo_changeme):
(key, value) = xxx_todo_changeme
return tuple.__new__(cls, (key, list(value)))
@environmentfilter
def do_sum(environment, iterable, attribute=None, start=0):
"""Returns the sum of a sequence of numbers plus the value of parameter
'start' (which defaults to 0). When the sequence is empty it returns
start.
It is also possible to sum up only certain attributes:
.. sourcecode:: jinja
Total: {{ items|sum(attribute='price') }}
.. versionchanged:: 2.6
The `attribute` parameter was added to allow suming up over
attributes. Also the `start` parameter was moved on to the right.
"""
if attribute is not None:
iterable = imap(make_attrgetter(environment, attribute), iterable)
return sum(iterable, start)
def do_list(value):
"""Convert the value into a list. If it was a string the returned list
will be a list of characters.
"""
return list(value)
def do_mark_safe(value):
"""Mark the value as safe which means that in an environment with automatic
escaping enabled this variable will not be escaped.
"""
return Markup(value)
def do_mark_unsafe(value):
"""Mark a value as unsafe. This is the reverse operation for :func:`safe`."""
return text_type(value)
def do_reverse(value):
"""Reverse the object or return an iterator the iterates over it the other
way round.
"""
if isinstance(value, string_types):
return value[::-1]
try:
return reversed(value)
except TypeError:
try:
rv = list(value)
rv.reverse()
return rv
except TypeError:
raise FilterArgumentError('argument must be iterable')
@environmentfilter
def do_attr(environment, obj, name):
"""Get an attribute of an object. ``foo|attr("bar")`` works like
``foo["bar"]`` just that always an attribute is returned and items are not
looked up.
See :ref:`Notes on subscriptions <notes-on-subscriptions>` for more details.
"""
try:
name = str(name)
except UnicodeError:
pass
else:
try:
value = getattr(obj, name)
except AttributeError:
pass
else:
if environment.sandboxed and not \
environment.is_safe_attribute(obj, name, value):
return environment.unsafe_undefined(obj, name)
return value
return environment.undefined(obj=obj, name=name)
@contextfilter
def do_map(*args, **kwargs):
"""Applies a filter on a sequence of objects or looks up an attribute.
This is useful when dealing with lists of objects but you are really
only interested in a certain value of it.
The basic usage is mapping on an attribute. Imagine you have a list
of users but you are only interested in a list of usernames:
.. sourcecode:: jinja
Users on this page: {{ users|map(attribute='username')|join(', ') }}
Alternatively you can let it invoke a filter by passing the name of the
filter and the arguments afterwards. A good example would be applying a
text conversion filter on a sequence:
.. sourcecode:: jinja
Users on this page: {{ titles|map('lower')|join(', ') }}
.. versionadded:: 2.7
"""
context = args[0]
seq = args[1]
if len(args) == 2 and 'attribute' in kwargs:
attribute = kwargs.pop('attribute')
if kwargs:
raise FilterArgumentError('Unexpected keyword argument %r' %
next(iter(kwargs)))
func = make_attrgetter(context.environment, attribute)
else:
try:
name = args[2]
args = args[3:]
except LookupError:
raise FilterArgumentError('map requires a filter argument')
func = lambda item: context.environment.call_filter(
name, item, args, kwargs, context=context)
if seq:
for item in seq:
yield func(item)
@contextfilter
def do_select(*args, **kwargs):
"""Filters a sequence of objects by appying a test to either the object
or the attribute and only selecting the ones with the test succeeding.
Example usage:
.. sourcecode:: jinja
{{ numbers|select("odd") }}
.. versionadded:: 2.7
"""
return _select_or_reject(args, kwargs, lambda x: x, False)
@contextfilter
def do_reject(*args, **kwargs):
"""Filters a sequence of objects by appying a test to either the object
or the attribute and rejecting the ones with the test succeeding.
Example usage:
.. sourcecode:: jinja
{{ numbers|reject("odd") }}
.. versionadded:: 2.7
"""
return _select_or_reject(args, kwargs, lambda x: not x, False)
@contextfilter
def do_selectattr(*args, **kwargs):
"""Filters a sequence of objects by appying a test to either the object
or the attribute and only selecting the ones with the test succeeding.
Example usage:
.. sourcecode:: jinja
{{ users|selectattr("is_active") }}
{{ users|selectattr("email", "none") }}
.. versionadded:: 2.7
"""
return _select_or_reject(args, kwargs, lambda x: x, True)
@contextfilter
def do_rejectattr(*args, **kwargs):
"""Filters a sequence of objects by appying a test to either the object
or the attribute and rejecting the ones with the test succeeding.
.. sourcecode:: jinja
{{ users|rejectattr("is_active") }}
{{ users|rejectattr("email", "none") }}
.. versionadded:: 2.7
"""
return _select_or_reject(args, kwargs, lambda x: not x, True)
def _select_or_reject(args, kwargs, modfunc, lookup_attr):
context = args[0]
seq = args[1]
if lookup_attr:
try:
attr = args[2]
except LookupError:
raise FilterArgumentError('Missing parameter for attribute name')
transfunc = make_attrgetter(context.environment, attr)
off = 1
else:
off = 0
transfunc = lambda x: x
try:
name = args[2 + off]
args = args[3 + off:]
func = lambda item: context.environment.call_test(
name, item, args, kwargs)
except LookupError:
func = bool
if seq:
for item in seq:
if modfunc(func(transfunc(item))):
yield item
FILTERS = {
'attr': do_attr,
'replace': do_replace,
'upper': do_upper,
'lower': do_lower,
'escape': escape,
'e': escape,
'forceescape': do_forceescape,
'capitalize': do_capitalize,
'title': do_title,
'default': do_default,
'd': do_default,
'join': do_join,
'count': len,
'dictsort': do_dictsort,
'sort': do_sort,
'length': len,
'reverse': do_reverse,
'center': do_center,
'indent': do_indent,
'title': do_title,
'capitalize': do_capitalize,
'first': do_first,
'last': do_last,
'map': do_map,
'random': do_random,
'reject': do_reject,
'rejectattr': do_rejectattr,
'filesizeformat': do_filesizeformat,
'pprint': do_pprint,
'truncate': do_truncate,
'wordwrap': do_wordwrap,
'wordcount': do_wordcount,
'int': do_int,
'float': do_float,
'string': soft_unicode,
'list': do_list,
'urlize': do_urlize,
'format': do_format,
'trim': do_trim,
'striptags': do_striptags,
'select': do_select,
'selectattr': do_selectattr,
'slice': do_slice,
'batch': do_batch,
'sum': do_sum,
'abs': abs,
'round': do_round,
'groupby': do_groupby,
'safe': do_mark_safe,
'xmlattr': do_xmlattr,
'urlencode': do_urlencode
}
| lgpl-2.1 |
Varabe/Guild-Manager | scripts/make_eweek_post.py | 1 | 5619 | #!/usr/bin/env python3
from lib.posts import getPostTime, getText, post
from lib.commands import vk, api, database
from lib.config import group_id, test_id
from lib.guilds import Eweek, Player
from lib.errors import ErrorManager
from re import search
from logging import getLogger
logger = getLogger("GM.make_eweek_post")
def make():
""" Создает пост с результатами еженедельника """
logger.debug("Getting eweek players...")
players = getPlayers()
challenges = getChallenges()
players = sortPlayers(players, *challenges)
logger.debug("Creating eweek post...")
post_text = createPost(players, challenges)
post_time = getPostTime()
post(post_text, post_time)
def getPlayers():
comments = getEweekPostComments()
participants = getParticipantsFromComments(comments)
result_comments = getCommentsFromResultTopic()
results = getResultsFromComments(result_comments)
players = makePlayers(results, participants)
return players
def getEweekPostComments():
""" Возвращает список комментариев зарегистрировавшихся на еженедельник игроков """
search_results = vk(api.wall.search, owner_id=-group_id, query="#aottg83_reg", count=1)
post_id = search_results['items'][0]['id']
comments = vk(api.wall.getComments, owner_id=-group_id, post_id=post_id, count=30)
return comments['items']
def getParticipantsFromComments(comments):
""" Возвращает ники и id записавшихся в словаре {ник:id} """
participants = dict()
for comment in comments:
text = comment['text'].splitlines()
from_id = comment['from_id']
participants[text[0]] = from_id
if len(text) > 1: # Если человек сначала написал ги, а потом ник
participants[text[1]] = from_id
return participants
def getCommentsFromResultTopic():
""" Берет комменты из обсуждения, где мы пишем результаты участников """
topic_id = 35693273
response = vk(api.board.getComments,
topic_id=topic_id,
group_id=test_id,
count=50)
return response['items']
def getResultsFromComments(comments):
comments = [c['text'] for c in comments]
pattern = r"\w+( \w+)? \d+" # Ник второйник? результат
results = []
for comment in comments:
if search(pattern, comment):
players = getPlayersFromComment(comment)
results.append(players)
return results
def getPlayersFromComment(comment):
comment = comment.split(" ")
player1 = Player(name=comment[0])
if len(comment) > 2:
player2 = Player(name=comment[1])
player1.score = eval(comment[2])
return player1, player2
else:
player1.score = eval(comment[1])
return [player1]
def makePlayers(results, participants):
for pair in results:
for player in pair:
if player.name in participants:
name = player.name
id = participants[player.name]
player.__init__(id=id, name=name) # костыль
if player.guild:
guild = player.guild.name
player.guild = "[" + guild + "]"
if not player.guild:
player.guild = ""
return results
def getChallenges():
# FIX CALLS TO XML
eweek_id = database.find("eweeks").find("this_week").text
eweek = Eweek(id=eweek_id)
return eweek.ch1, eweek.ch2, eweek.ch3
def sortPlayers(players, ch1, ch2, ch3):
if ch1 > ch2:
reverse = True
else:
reverse = False
players.sort(key=lambda x: x[0].score, reverse=reverse)
return players
def createPost(players, challenges):
""" Создает пост с результатами ежа """
post = "#aottg83_results\n"
post += getText("results") + "\n\n"
post += makeResults(players, challenges)
post += "\n\nПравила еженедельника: https://vk.com/page-64867627_47291741?f=Еженедельник"
return post
def makeResults(players, challenges):
compare = compareChallenges(*challenges)
post = ""
current_row = "first"
for index, pair in enumerate(players, start=1):
score = pair[0].score
line = "\n"
line += makeFancyResult(index, pair, score)
if index <= 3:
pair_row = "first"
else:
pair_row = getPairRow(compare, score, *challenges)
if current_row != pair_row:
post += "\n"
current_row = pair_row
post += line
return post
def compareChallenges(ch1, ch2, ch3):
""" Возвращает функцию для сравнения результатов ежа
Compare будет сравнивать челленджи с результатами,
определяя, будет ли лучшим больший или меньший результат
"""
if ch1 > ch2:
compare = lambda x, y: x >= y
else:
compare = lambda x, y: x <= y
return compare
def makeFancyResult(index, pair, score):
""" Создает строку с результатом пары и гильдией
Место игрока) гиперссылка -- результат [гильдия]
1) [id47|Varabe] -- 83 [SunseT]
"""
result = ""
if len(pair) > 1:
result += "{}) {} & {} -- {}".format(index, pair[0], pair[1], score)
else:
result += "{}) {} -- {}".format(index, pair[0], score)
for player in pair:
if player.guild:
result += " " + player.guild
return result
def getPairRow(compare, score, ch1, ch2, ch3):
""" Ряды нужны для разделения игроков по челленджам """
if compare(score, ch1):
return "first"
elif compare(score, ch2):
return "second"
elif compare(score, ch3):
return "third"
else:
return "fourth"
if __name__ == "__main__":
with ErrorManager("eweek"):
make()
| mit |
fernandog/Sick-Beard | lib/hachoir_core/stream/input.py | 90 | 19777 | from lib.hachoir_core.endian import BIG_ENDIAN, LITTLE_ENDIAN
from lib.hachoir_core.error import info
from lib.hachoir_core.log import Logger
from lib.hachoir_core.bits import str2long
from lib.hachoir_core.i18n import getTerminalCharset
from lib.hachoir_core.tools import lowerBound
from lib.hachoir_core.i18n import _
from os import dup, fdopen
from errno import ESPIPE
from weakref import ref as weakref_ref
from lib.hachoir_core.stream import StreamError
class InputStreamError(StreamError):
pass
class ReadStreamError(InputStreamError):
def __init__(self, size, address, got=None):
self.size = size
self.address = address
self.got = got
if self.got is not None:
msg = _("Can't read %u bits at address %u (got %u bits)") % (self.size, self.address, self.got)
else:
msg = _("Can't read %u bits at address %u") % (self.size, self.address)
InputStreamError.__init__(self, msg)
class NullStreamError(InputStreamError):
def __init__(self, source):
self.source = source
msg = _("Input size is nul (source='%s')!") % self.source
InputStreamError.__init__(self, msg)
class FileFromInputStream:
_offset = 0
_from_end = False
def __init__(self, stream):
self.stream = stream
self._setSize(stream.askSize(self))
def _setSize(self, size):
if size is None:
self._size = size
elif size % 8:
raise InputStreamError("Invalid size")
else:
self._size = size // 8
def tell(self):
if self._from_end:
while self._size is None:
self.stream._feed(max(self.stream._current_size << 1, 1 << 16))
self._from_end = False
self._offset += self._size
return self._offset
def seek(self, pos, whence=0):
if whence == 0:
self._from_end = False
self._offset = pos
elif whence == 1:
self._offset += pos
elif whence == 2:
self._from_end = True
self._offset = pos
else:
raise ValueError("seek() second argument must be 0, 1 or 2")
def read(self, size=None):
def read(address, size):
shift, data, missing = self.stream.read(8 * address, 8 * size)
if shift:
raise InputStreamError("TODO: handle non-byte-aligned data")
return data
if self._size or size is not None and not self._from_end:
# We don't want self.tell() to read anything
# and the size must be known if we read until the end.
pos = self.tell()
if size is None or None < self._size < pos + size:
size = self._size - pos
if size <= 0:
return ''
data = read(pos, size)
self._offset += len(data)
return data
elif self._from_end:
# TODO: not tested
max_size = - self._offset
if size is None or max_size < size:
size = max_size
if size <= 0:
return ''
data = '', ''
self._offset = max(0, self.stream._current_size // 8 + self._offset)
self._from_end = False
bs = max(max_size, 1 << 16)
while True:
d = read(self._offset, bs)
data = data[1], d
self._offset += len(d)
if self._size:
bs = self._size - self._offset
if not bs:
data = data[0] + data[1]
d = len(data) - max_size
return data[d:d+size]
else:
# TODO: not tested
data = [ ]
size = 1 << 16
while True:
d = read(self._offset, size)
data.append(d)
self._offset += len(d)
if self._size:
size = self._size - self._offset
if not size:
return ''.join(data)
class InputStream(Logger):
_set_size = None
_current_size = 0
def __init__(self, source=None, size=None, packets=None, **args):
self.source = source
self._size = size # in bits
if size == 0:
raise NullStreamError(source)
self.tags = tuple(args.get("tags", tuple()))
self.packets = packets
def askSize(self, client):
if self._size != self._current_size:
if self._set_size is None:
self._set_size = []
self._set_size.append(weakref_ref(client))
return self._size
def _setSize(self, size=None):
assert self._size is None or self._current_size <= self._size
if self._size != self._current_size:
self._size = self._current_size
if not self._size:
raise NullStreamError(self.source)
if self._set_size:
for client in self._set_size:
client = client()
if client:
client._setSize(self._size)
del self._set_size
size = property(lambda self: self._size, doc="Size of the stream in bits")
checked = property(lambda self: self._size == self._current_size)
def sizeGe(self, size, const=False):
return self._current_size >= size or \
not (None < self._size < size or const or self._feed(size))
def _feed(self, size):
return self.read(size-1,1)[2]
def read(self, address, size):
"""
Read 'size' bits at position 'address' (in bits)
from the beginning of the stream.
"""
raise NotImplementedError
def readBits(self, address, nbits, endian):
assert endian in (BIG_ENDIAN, LITTLE_ENDIAN)
shift, data, missing = self.read(address, nbits)
if missing:
raise ReadStreamError(nbits, address)
value = str2long(data, endian)
if endian is BIG_ENDIAN:
value >>= len(data) * 8 - shift - nbits
else:
value >>= shift
return value & (1 << nbits) - 1
def readInteger(self, address, signed, nbits, endian):
""" Read an integer number """
value = self.readBits(address, nbits, endian)
# Signe number. Example with nbits=8:
# if 128 <= value: value -= 256
if signed and (1 << (nbits-1)) <= value:
value -= (1 << nbits)
return value
def readBytes(self, address, nb_bytes):
shift, data, missing = self.read(address, 8 * nb_bytes)
if shift:
raise InputStreamError("TODO: handle non-byte-aligned data")
if missing:
raise ReadStreamError(8 * nb_bytes, address)
return data
def searchBytesLength(self, needle, include_needle,
start_address=0, end_address=None):
"""
If include_needle is True, add its length to the result.
Returns None is needle can't be found.
"""
pos = self.searchBytes(needle, start_address, end_address)
if pos is None:
return None
length = (pos - start_address) // 8
if include_needle:
length += len(needle)
return length
def searchBytes(self, needle, start_address=0, end_address=None):
"""
Search some bytes in [start_address;end_address[. Addresses must
be aligned to byte. Returns the address of the bytes if found,
None else.
"""
if start_address % 8:
raise InputStreamError("Unable to search bytes with address with bit granularity")
length = len(needle)
size = max(3 * length, 4096)
buffer = ''
if self._size and (end_address is None or self._size < end_address):
end_address = self._size
while True:
if end_address is not None:
todo = (end_address - start_address) >> 3
if todo < size:
if todo <= 0:
return None
size = todo
data = self.readBytes(start_address, size)
if end_address is None and self._size:
end_address = self._size
size = (end_address - start_address) >> 3
assert size > 0
data = data[:size]
start_address += 8 * size
buffer = buffer[len(buffer) - length + 1:] + data
found = buffer.find(needle)
if found >= 0:
return start_address + (found - len(buffer)) * 8
def file(self):
return FileFromInputStream(self)
class InputPipe(object):
"""
InputPipe makes input streams seekable by caching a certain
amount of data. The memory usage may be unlimited in worst cases.
A function (set_size) is called when the size of the stream is known.
InputPipe sees the input stream as an array of blocks of
size = (2 ^ self.buffer_size) and self.buffers maps to this array.
It also maintains a circular ordered list of non-discarded blocks,
sorted by access time.
Each element of self.buffers is an array of 3 elements:
* self.buffers[i][0] is the data.
len(self.buffers[i][0]) == 1 << self.buffer_size
(except at the end: the length may be smaller)
* self.buffers[i][1] is the index of a more recently used block
* self.buffers[i][2] is the opposite of self.buffers[1],
in order to have a double-linked list.
For any discarded block, self.buffers[i] = None
self.last is the index of the most recently accessed block.
self.first is the first (= smallest index) non-discarded block.
How InputPipe discards blocks:
* Just before returning from the read method.
* Only if there are more than self.buffer_nb_min blocks in memory.
* While self.buffers[self.first] is that least recently used block.
Property: There is no hole in self.buffers, except at the beginning.
"""
buffer_nb_min = 256
buffer_size = 16
last = None
size = None
def __init__(self, input, set_size=None):
self._input = input
self.first = self.address = 0
self.buffers = []
self.set_size = set_size
current_size = property(lambda self: len(self.buffers) << self.buffer_size)
def _append(self, data):
if self.last is None:
self.last = next = prev = 0
else:
prev = self.last
last = self.buffers[prev]
next = last[1]
self.last = self.buffers[next][2] = last[1] = len(self.buffers)
self.buffers.append([ data, next, prev ])
def _get(self, index):
if index >= len(self.buffers):
return ''
buf = self.buffers[index]
if buf is None:
raise InputStreamError(_("Error: Buffers too small. Can't seek backward."))
if self.last != index:
next = buf[1]
prev = buf[2]
self.buffers[next][2] = prev
self.buffers[prev][1] = next
first = self.buffers[self.last][1]
buf[1] = first
buf[2] = self.last
self.buffers[first][2] = index
self.buffers[self.last][1] = index
self.last = index
return buf[0]
def _flush(self):
lim = len(self.buffers) - self.buffer_nb_min
while self.first < lim:
buf = self.buffers[self.first]
if buf[2] != self.last:
break
info("Discarding buffer %u." % self.first)
self.buffers[self.last][1] = buf[1]
self.buffers[buf[1]][2] = self.last
self.buffers[self.first] = None
self.first += 1
def seek(self, address):
assert 0 <= address
self.address = address
def read(self, size):
end = self.address + size
for i in xrange(len(self.buffers), (end >> self.buffer_size) + 1):
data = self._input.read(1 << self.buffer_size)
if len(data) < 1 << self.buffer_size:
self.size = (len(self.buffers) << self.buffer_size) + len(data)
if self.set_size:
self.set_size(self.size)
if data:
self._append(data)
break
self._append(data)
block, offset = divmod(self.address, 1 << self.buffer_size)
data = ''.join(self._get(index)
for index in xrange(block, (end - 1 >> self.buffer_size) + 1)
)[offset:offset+size]
self._flush()
self.address += len(data)
return data
class InputIOStream(InputStream):
def __init__(self, input, size=None, **args):
if not hasattr(input, "seek"):
if size is None:
input = InputPipe(input, self._setSize)
else:
input = InputPipe(input)
elif size is None:
try:
input.seek(0, 2)
size = input.tell() * 8
except IOError, err:
if err.errno == ESPIPE:
input = InputPipe(input, self._setSize)
else:
charset = getTerminalCharset()
errmsg = unicode(str(err), charset)
source = args.get("source", "<inputio:%r>" % input)
raise InputStreamError(_("Unable to get size of %s: %s") % (source, errmsg))
self._input = input
InputStream.__init__(self, size=size, **args)
def __current_size(self):
if self._size:
return self._size
if self._input.size:
return 8 * self._input.size
return 8 * self._input.current_size
_current_size = property(__current_size)
def read(self, address, size):
assert size > 0
_size = self._size
address, shift = divmod(address, 8)
self._input.seek(address)
size = (size + shift + 7) >> 3
data = self._input.read(size)
got = len(data)
missing = size != got
if missing and _size == self._size:
raise ReadStreamError(8 * size, 8 * address, 8 * got)
return shift, data, missing
def file(self):
if hasattr(self._input, "fileno"):
new_fd = dup(self._input.fileno())
new_file = fdopen(new_fd, "r")
new_file.seek(0)
return new_file
return InputStream.file(self)
class StringInputStream(InputStream):
def __init__(self, data, source="<string>", **args):
self.data = data
InputStream.__init__(self, source=source, size=8*len(data), **args)
self._current_size = self._size
def read(self, address, size):
address, shift = divmod(address, 8)
size = (size + shift + 7) >> 3
data = self.data[address:address+size]
got = len(data)
if got != size:
raise ReadStreamError(8 * size, 8 * address, 8 * got)
return shift, data, False
class InputSubStream(InputStream):
def __init__(self, stream, offset, size=None, source=None, **args):
if offset is None:
offset = 0
if size is None and stream.size is not None:
size = stream.size - offset
if None < size <= 0:
raise ValueError("InputSubStream: offset is outside input stream")
self.stream = stream
self._offset = offset
if source is None:
source = "<substream input=%s offset=%s size=%s>" % (stream.source, offset, size)
InputStream.__init__(self, source=source, size=size, **args)
self.stream.askSize(self)
_current_size = property(lambda self: min(self._size, max(0, self.stream._current_size - self._offset)))
def read(self, address, size):
return self.stream.read(self._offset + address, size)
def InputFieldStream(field, **args):
if not field.parent:
return field.stream
stream = field.parent.stream
args["size"] = field.size
args.setdefault("source", stream.source + field.path)
return InputSubStream(stream, field.absolute_address, **args)
class FragmentedStream(InputStream):
def __init__(self, field, **args):
self.stream = field.parent.stream
data = field.getData()
self.fragments = [ (0, data.absolute_address, data.size) ]
self.next = field.next
args.setdefault("source", "%s%s" % (self.stream.source, field.path))
InputStream.__init__(self, **args)
if not self.next:
self._current_size = data.size
self._setSize()
def _feed(self, end):
if self._current_size < end:
if self.checked:
raise ReadStreamError(end - self._size, self._size)
a, fa, fs = self.fragments[-1]
while self.stream.sizeGe(fa + min(fs, end - a)):
a += fs
f = self.next
if a >= end:
self._current_size = end
if a == end and not f:
self._setSize()
return False
if f:
self.next = f.next
f = f.getData()
if not f:
self._current_size = a
self._setSize()
return True
fa = f.absolute_address
fs = f.size
self.fragments += [ (a, fa, fs) ]
self._current_size = a + max(0, self.stream.size - fa)
self._setSize()
return True
return False
def read(self, address, size):
assert size > 0
missing = self._feed(address + size)
if missing:
size = self._size - address
if size <= 0:
return 0, '', True
d = []
i = lowerBound(self.fragments, lambda x: x[0] <= address)
a, fa, fs = self.fragments[i-1]
a -= address
fa -= a
fs += a
s = None
while True:
n = min(fs, size)
u, v, w = self.stream.read(fa, n)
assert not w
if s is None:
s = u
else:
assert not u
d += [ v ]
size -= n
if not size:
return s, ''.join(d), missing
a, fa, fs = self.fragments[i]
i += 1
class ConcatStream(InputStream):
# TODO: concatene any number of any type of stream
def __init__(self, streams, **args):
if len(streams) > 2 or not streams[0].checked:
raise NotImplementedError
self.__size0 = streams[0].size
size1 = streams[1].askSize(self)
if size1 is not None:
args["size"] = self.__size0 + size1
self.__streams = streams
InputStream.__init__(self, **args)
_current_size = property(lambda self: self.__size0 + self.__streams[1]._current_size)
def read(self, address, size):
_size = self._size
s = self.__size0 - address
shift, data, missing = None, '', False
if s > 0:
s = min(size, s)
shift, data, w = self.__streams[0].read(address, s)
assert not w
a, s = 0, size - s
else:
a, s = -s, size
if s:
u, v, missing = self.__streams[1].read(a, s)
if missing and _size == self._size:
raise ReadStreamError(s, a)
if shift is None:
shift = u
else:
assert not u
data += v
return shift, data, missing
| gpl-3.0 |
cselis86/edx-platform | common/djangoapps/student/roles.py | 30 | 11451 | """
Classes used to model the roles used in the courseware. Each role is responsible for checking membership,
adding users, removing users, and listing members
"""
from abc import ABCMeta, abstractmethod
from django.contrib.auth.models import User
import logging
from student.models import CourseAccessRole
from xmodule_django.models import CourseKeyField
log = logging.getLogger(__name__)
# A list of registered access roles.
REGISTERED_ACCESS_ROLES = {}
def register_access_role(cls):
"""
Decorator that allows access roles to be registered within the roles module and referenced by their
string values.
Assumes that the decorated class has a "ROLE" attribute, defining its type.
"""
try:
role_name = getattr(cls, 'ROLE')
REGISTERED_ACCESS_ROLES[role_name] = cls
except AttributeError:
log.exception(u"Unable to register Access Role with attribute 'ROLE'.")
return cls
class RoleCache(object):
"""
A cache of the CourseAccessRoles held by a particular user
"""
def __init__(self, user):
self._roles = set(
CourseAccessRole.objects.filter(user=user).all()
)
def has_role(self, role, course_id, org):
"""
Return whether this RoleCache contains a role with the specified role, course_id, and org
"""
return any(
access_role.role == role and
access_role.course_id == course_id and
access_role.org == org
for access_role in self._roles
)
class AccessRole(object):
"""
Object representing a role with particular access to a resource
"""
__metaclass__ = ABCMeta
@abstractmethod
def has_user(self, user): # pylint: disable=unused-argument
"""
Return whether the supplied django user has access to this role.
"""
return False
@abstractmethod
def add_users(self, *users):
"""
Add the role to the supplied django users.
"""
pass
@abstractmethod
def remove_users(self, *users):
"""
Remove the role from the supplied django users.
"""
pass
@abstractmethod
def users_with_role(self):
"""
Return a django QuerySet for all of the users with this role
"""
return User.objects.none()
class GlobalStaff(AccessRole):
"""
The global staff role
"""
def has_user(self, user):
return user.is_staff
def add_users(self, *users):
for user in users:
if (user.is_authenticated() and user.is_active):
user.is_staff = True
user.save()
def remove_users(self, *users):
for user in users:
# don't check is_authenticated nor is_active on purpose
user.is_staff = False
user.save()
def users_with_role(self):
raise Exception("This operation is un-indexed, and shouldn't be used")
class RoleBase(AccessRole):
"""
Roles by type (e.g., instructor, beta_user) and optionally org, course_key
"""
def __init__(self, role_name, org='', course_key=None):
"""
Create role from required role_name w/ optional org and course_key. You may just provide a role
name if it's a global role (not constrained to an org or course). Provide org if constrained to
an org. Provide org and course if constrained to a course. Although, you should use the subclasses
for all of these.
"""
super(RoleBase, self).__init__()
self.org = org
self.course_key = course_key
self._role_name = role_name
def has_user(self, user):
"""
Return whether the supplied django user has access to this role.
"""
if not (user.is_authenticated() and user.is_active):
return False
# pylint: disable=protected-access
if not hasattr(user, '_roles'):
# Cache a list of tuples identifying the particular roles that a user has
# Stored as tuples, rather than django models, to make it cheaper to construct objects for comparison
user._roles = RoleCache(user)
return user._roles.has_role(self._role_name, self.course_key, self.org)
def add_users(self, *users):
"""
Add the supplied django users to this role.
"""
# silently ignores anonymous and inactive users so that any that are
# legit get updated.
from student.models import CourseAccessRole
for user in users:
if user.is_authenticated and user.is_active and not self.has_user(user):
entry = CourseAccessRole(user=user, role=self._role_name, course_id=self.course_key, org=self.org)
entry.save()
if hasattr(user, '_roles'):
del user._roles
def remove_users(self, *users):
"""
Remove the supplied django users from this role.
"""
entries = CourseAccessRole.objects.filter(
user__in=users, role=self._role_name, org=self.org, course_id=self.course_key
)
entries.delete()
for user in users:
if hasattr(user, '_roles'):
del user._roles
def users_with_role(self):
"""
Return a django QuerySet for all of the users with this role
"""
# Org roles don't query by CourseKey, so use CourseKeyField.Empty for that query
if self.course_key is None:
self.course_key = CourseKeyField.Empty
entries = User.objects.filter(
courseaccessrole__role=self._role_name,
courseaccessrole__org=self.org,
courseaccessrole__course_id=self.course_key
)
return entries
class CourseRole(RoleBase):
"""
A named role in a particular course
"""
def __init__(self, role, course_key):
"""
Args:
course_key (CourseKey)
"""
super(CourseRole, self).__init__(role, course_key.org, course_key)
@classmethod
def course_group_already_exists(self, course_key):
return CourseAccessRole.objects.filter(org=course_key.org, course_id=course_key).exists()
class OrgRole(RoleBase):
"""
A named role in a particular org independent of course
"""
def __init__(self, role, org):
super(OrgRole, self).__init__(role, org)
@register_access_role
class CourseStaffRole(CourseRole):
"""A Staff member of a course"""
ROLE = 'staff'
def __init__(self, *args, **kwargs):
super(CourseStaffRole, self).__init__(self.ROLE, *args, **kwargs)
@register_access_role
class CourseInstructorRole(CourseRole):
"""A course Instructor"""
ROLE = 'instructor'
def __init__(self, *args, **kwargs):
super(CourseInstructorRole, self).__init__(self.ROLE, *args, **kwargs)
@register_access_role
class CourseFinanceAdminRole(CourseRole):
"""A course staff member with privileges to review financial data."""
ROLE = 'finance_admin'
def __init__(self, *args, **kwargs):
super(CourseFinanceAdminRole, self).__init__(self.ROLE, *args, **kwargs)
@register_access_role
class CourseSalesAdminRole(CourseRole):
"""A course staff member with privileges to perform sales operations. """
ROLE = 'sales_admin'
def __init__(self, *args, **kwargs):
super(CourseSalesAdminRole, self).__init__(self.ROLE, *args, **kwargs)
@register_access_role
class CourseBetaTesterRole(CourseRole):
"""A course Beta Tester"""
ROLE = 'beta_testers'
def __init__(self, *args, **kwargs):
super(CourseBetaTesterRole, self).__init__(self.ROLE, *args, **kwargs)
@register_access_role
class LibraryUserRole(CourseRole):
"""
A user who can view a library and import content from it, but not edit it.
Used in Studio only.
"""
ROLE = 'library_user'
def __init__(self, *args, **kwargs):
super(LibraryUserRole, self).__init__(self.ROLE, *args, **kwargs)
class CourseCcxCoachRole(CourseRole):
"""A CCX Coach"""
ROLE = 'ccx_coach'
def __init__(self, *args, **kwargs):
super(CourseCcxCoachRole, self).__init__(self.ROLE, *args, **kwargs)
class OrgStaffRole(OrgRole):
"""An organization staff member"""
def __init__(self, *args, **kwargs):
super(OrgStaffRole, self).__init__('staff', *args, **kwargs)
class OrgInstructorRole(OrgRole):
"""An organization instructor"""
def __init__(self, *args, **kwargs):
super(OrgInstructorRole, self).__init__('instructor', *args, **kwargs)
class OrgLibraryUserRole(OrgRole):
"""
A user who can view any libraries in an org and import content from them, but not edit them.
Used in Studio only.
"""
ROLE = LibraryUserRole.ROLE
def __init__(self, *args, **kwargs):
super(OrgLibraryUserRole, self).__init__(self.ROLE, *args, **kwargs)
@register_access_role
class CourseCreatorRole(RoleBase):
"""
This is the group of people who have permission to create new courses (we may want to eventually
make this an org based role).
"""
ROLE = "course_creator_group"
def __init__(self, *args, **kwargs):
super(CourseCreatorRole, self).__init__(self.ROLE, *args, **kwargs)
class UserBasedRole(object):
"""
Backward mapping: given a user, manipulate the courses and roles
"""
def __init__(self, user, role):
"""
Create a UserBasedRole accessor: for a given user and role (e.g., "instructor")
"""
self.user = user
self.role = role
def has_course(self, course_key):
"""
Return whether the role's user has the configured role access to the passed course
"""
if not (self.user.is_authenticated() and self.user.is_active):
return False
# pylint: disable=protected-access
if not hasattr(self.user, '_roles'):
self.user._roles = RoleCache(self.user)
return self.user._roles.has_role(self.role, course_key, course_key.org)
def add_course(self, *course_keys):
"""
Grant this object's user the object's role for the supplied courses
"""
if self.user.is_authenticated and self.user.is_active:
for course_key in course_keys:
entry = CourseAccessRole(user=self.user, role=self.role, course_id=course_key, org=course_key.org)
entry.save()
if hasattr(self.user, '_roles'):
del self.user._roles
else:
raise ValueError("user is not active. Cannot grant access to courses")
def remove_courses(self, *course_keys):
"""
Remove the supplied courses from this user's configured role.
"""
entries = CourseAccessRole.objects.filter(user=self.user, role=self.role, course_id__in=course_keys)
entries.delete()
if hasattr(self.user, '_roles'):
del self.user._roles
def courses_with_role(self):
"""
Return a django QuerySet for all of the courses with this user x role. You can access
any of these properties on each result record:
* user (will be self.user--thus uninteresting)
* org
* course_id
* role (will be self.role--thus uninteresting)
"""
return CourseAccessRole.objects.filter(role=self.role, user=self.user)
| agpl-3.0 |
jborean93/ansible | test/units/playbook/test_play_context.py | 11 | 3029 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from ansible import constants as C
from ansible import context
from ansible.cli.arguments import option_helpers as opt_help
from ansible.errors import AnsibleError
from ansible.playbook.play_context import PlayContext
from ansible.playbook.play import Play
from ansible.plugins.loader import become_loader
from ansible.utils import context_objects as co
@pytest.fixture
def parser():
parser = opt_help.create_base_parser('testparser')
opt_help.add_runas_options(parser)
opt_help.add_meta_options(parser)
opt_help.add_runtask_options(parser)
opt_help.add_vault_options(parser)
opt_help.add_async_options(parser)
opt_help.add_connect_options(parser)
opt_help.add_subset_options(parser)
opt_help.add_check_options(parser)
opt_help.add_inventory_options(parser)
return parser
@pytest.fixture
def reset_cli_args():
co.GlobalCLIArgs._Singleton__instance = None
yield
co.GlobalCLIArgs._Singleton__instance = None
def test_play_context(mocker, parser, reset_cli_args):
options = parser.parse_args(['-vv', '--check'])
context._init_global_context(options)
play = Play.load({})
play_context = PlayContext(play=play)
assert play_context.remote_addr is None
assert play_context.remote_user is None
assert play_context.password == ''
assert play_context.private_key_file == C.DEFAULT_PRIVATE_KEY_FILE
assert play_context.timeout == C.DEFAULT_TIMEOUT
assert play_context.verbosity == 2
assert play_context.check_mode is True
mock_play = mocker.MagicMock()
mock_play.force_handlers = True
play_context = PlayContext(play=mock_play)
assert play_context.force_handlers is True
mock_task = mocker.MagicMock()
mock_task.connection = 'mocktask'
mock_task.remote_user = 'mocktask'
mock_task.port = 1234
mock_task.no_log = True
mock_task.become = True
mock_task.become_method = 'mocktask'
mock_task.become_user = 'mocktaskroot'
mock_task.become_pass = 'mocktaskpass'
mock_task._local_action = False
mock_task.delegate_to = None
all_vars = dict(
ansible_connection='mock_inventory',
ansible_ssh_port=4321,
)
mock_templar = mocker.MagicMock()
play_context = PlayContext()
play_context = play_context.set_task_and_variable_override(task=mock_task, variables=all_vars, templar=mock_templar)
assert play_context.connection == 'mock_inventory'
assert play_context.remote_user == 'mocktask'
assert play_context.no_log is True
mock_task.no_log = False
play_context = play_context.set_task_and_variable_override(task=mock_task, variables=all_vars, templar=mock_templar)
assert play_context.no_log is False
| gpl-3.0 |
stratton-oakcoin/oakcoin | test/functional/bip68-112-113-p2p.py | 1 | 27086 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Oakcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test activation of the first version bits soft fork.
This soft fork will activate the following BIPS:
BIP 68 - nSequence relative lock times
BIP 112 - CHECKSEQUENCEVERIFY
BIP 113 - MedianTimePast semantics for nLockTime
regtest lock-in with 108/144 block signalling
activation after a further 144 blocks
mine 82 blocks whose coinbases will be used to generate inputs for our tests
mine 61 blocks to transition from DEFINED to STARTED
mine 144 blocks only 100 of which are signaling readiness in order to fail to change state this period
mine 144 blocks with 108 signaling and verify STARTED->LOCKED_IN
mine 140 blocks and seed block chain with the 82 inputs will use for our tests at height 572
mine 3 blocks and verify still at LOCKED_IN and test that enforcement has not triggered
mine 1 block and test that enforcement has triggered (which triggers ACTIVE)
Test BIP 113 is enforced
Mine 4 blocks so next height is 580 and test BIP 68 is enforced for time and height
Mine 1 block so next height is 581 and test BIP 68 now passes time but not height
Mine 1 block so next height is 582 and test BIP 68 now passes time and height
Test that BIP 112 is enforced
Various transactions will be used to test that the BIPs rules are not enforced before the soft fork activates
And that after the soft fork activates transactions pass and fail as they should according to the rules.
For each BIP, transactions of versions 1 and 2 will be tested.
----------------
BIP 113:
bip113tx - modify the nLocktime variable
BIP 68:
bip68txs - 16 txs with nSequence relative locktime of 10 with various bits set as per the relative_locktimes below
BIP 112:
bip112txs_vary_nSequence - 16 txs with nSequence relative_locktimes of 10 evaluated against 10 OP_CSV OP_DROP
bip112txs_vary_nSequence_9 - 16 txs with nSequence relative_locktimes of 9 evaluated against 10 OP_CSV OP_DROP
bip112txs_vary_OP_CSV - 16 txs with nSequence = 10 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112txs_vary_OP_CSV_9 - 16 txs with nSequence = 9 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112tx_special - test negative argument to OP_CSV
"""
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import ToHex, CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import *
from io import BytesIO
import time
base_relative_locktime = 10
seq_disable_flag = 1<<31
seq_random_high_bit = 1<<25
seq_type_flag = 1<<22
seq_random_low_bit = 1<<18
# b31,b25,b22,b18 represent the 31st, 25th, 22nd and 18th bits respectively in the nSequence field
# relative_locktimes[b31][b25][b22][b18] is a base_relative_locktime with the indicated bits set if their indices are 1
relative_locktimes = []
for b31 in range(2):
b25times = []
for b25 in range(2):
b22times = []
for b22 in range(2):
b18times = []
for b18 in range(2):
rlt = base_relative_locktime
if (b31):
rlt = rlt | seq_disable_flag
if (b25):
rlt = rlt | seq_random_high_bit
if (b22):
rlt = rlt | seq_type_flag
if (b18):
rlt = rlt | seq_random_low_bit
b18times.append(rlt)
b22times.append(b18times)
b25times.append(b22times)
relative_locktimes.append(b25times)
def all_rlt_txs(txarray):
txs = []
for b31 in range(2):
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
txs.append(txarray[b31][b25][b22][b18])
return txs
class BIP68_112_113Test(ComparisonTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
self.extra_args = [['-whitelist=127.0.0.1', '-blockversion=4']]
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
test.run()
def send_generic_input_tx(self, node, coinbases):
amount = Decimal("49.99")
return node.sendrawtransaction(ToHex(self.sign_transaction(node, self.create_transaction(node, node.getblock(coinbases.pop())['tx'][0], self.nodeaddress, amount))))
def create_transaction(self, node, txid, to_address, amount):
inputs = [{ "txid" : txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(rawtx))
tx.deserialize(f)
return tx
def sign_transaction(self, node, unsignedtx):
rawtx = ToHex(unsignedtx)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def generate_blocks(self, number, version, test_blocks = []):
for i in range(number):
block = self.create_test_block([], version)
test_blocks.append([block, True])
self.last_block_time += 600
self.tip = block.sha256
self.tipheight += 1
return test_blocks
def create_test_block(self, txs, version = 536870912):
block = create_block(self.tip, create_coinbase(self.tipheight + 1), self.last_block_time + 600)
block.nVersion = version
block.vtx.extend(txs)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
return block
def create_bip68txs(self, bip68inputs, txversion, locktime_delta = 0):
txs = []
assert(len(bip68inputs) >= 16)
i = 0
for b31 in range(2):
b25txs = []
for b25 in range(2):
b22txs = []
for b22 in range(2):
b18txs = []
for b18 in range(2):
tx = self.create_transaction(self.nodes[0], bip68inputs[i], self.nodeaddress, Decimal("49.98"))
i += 1
tx.nVersion = txversion
tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta
b18txs.append(self.sign_transaction(self.nodes[0], tx))
b22txs.append(b18txs)
b25txs.append(b22txs)
txs.append(b25txs)
return txs
def create_bip112special(self, input, txversion):
tx = self.create_transaction(self.nodes[0], input, self.nodeaddress, Decimal("49.98"))
tx.nVersion = txversion
signtx = self.sign_transaction(self.nodes[0], tx)
signtx.vin[0].scriptSig = CScript([-1, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
return signtx
def create_bip112txs(self, bip112inputs, varyOP_CSV, txversion, locktime_delta = 0):
txs = []
assert(len(bip112inputs) >= 16)
i = 0
for b31 in range(2):
b25txs = []
for b25 in range(2):
b22txs = []
for b22 in range(2):
b18txs = []
for b18 in range(2):
tx = self.create_transaction(self.nodes[0], bip112inputs[i], self.nodeaddress, Decimal("49.98"))
i += 1
if (varyOP_CSV): # if varying OP_CSV, nSequence is fixed
tx.vin[0].nSequence = base_relative_locktime + locktime_delta
else: # vary nSequence instead, OP_CSV is fixed
tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta
tx.nVersion = txversion
signtx = self.sign_transaction(self.nodes[0], tx)
if (varyOP_CSV):
signtx.vin[0].scriptSig = CScript([relative_locktimes[b31][b25][b22][b18], OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
else:
signtx.vin[0].scriptSig = CScript([base_relative_locktime, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
b18txs.append(signtx)
b22txs.append(b18txs)
b25txs.append(b22txs)
txs.append(b25txs)
return txs
def get_tests(self):
long_past_time = int(time.time()) - 600 * 1000 # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future
self.nodes[0].setmocktime(long_past_time - 100) # enough so that the generated blocks will still all be before long_past_time
self.coinbase_blocks = self.nodes[0].generate(1 + 16 + 2*32 + 1) # 82 blocks generated for inputs
self.nodes[0].setmocktime(0) # set time back to present so yielded blocks aren't in the future as we advance last_block_time
self.tipheight = 82 # height of the next block to build
self.last_block_time = long_past_time
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.nodeaddress = self.nodes[0].getnewaddress()
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'defined')
test_blocks = self.generate_blocks(61, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 1
# Advanced from DEFINED to STARTED, height = 143
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started')
# Fail to achieve LOCKED_IN 100 out of 144 signal bit 0
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(50, 536870913) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(24, 536936448, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False) # 2
# Failed to advance past STARTED, height = 287
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started')
# 108 out of 144 signal bit 0 to achieve lock-in
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(58, 536870913) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(10, 536936448, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False) # 3
# Advanced from STARTED to LOCKED_IN, height = 431
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in')
# 140 more version 4 blocks
test_blocks = self.generate_blocks(140, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 4
### Inputs at height = 572
# Put inputs for all tests in the chain at height 572 (tip now = 571) (time increases by 600s per block)
# Note we reuse inputs for v1 and v2 txs so must test these separately
# 16 normal inputs
bip68inputs = []
for i in range(16):
bip68inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
# 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112basicinputs = []
for j in range(2):
inputs = []
for i in range(16):
inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
bip112basicinputs.append(inputs)
# 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112diverseinputs = []
for j in range(2):
inputs = []
for i in range(16):
inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
bip112diverseinputs.append(inputs)
# 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112specialinput = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)
# 1 normal input
bip113input = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)
self.nodes[0].setmocktime(self.last_block_time + 600)
inputblockhash = self.nodes[0].generate(1)[0] # 1 block generated for inputs to be in chain at height 572
self.nodes[0].setmocktime(0)
self.tip = int("0x" + inputblockhash, 0)
self.tipheight += 1
self.last_block_time += 600
assert_equal(len(self.nodes[0].getblock(inputblockhash,True)["tx"]), 82+1)
# 2 more version 4 blocks
test_blocks = self.generate_blocks(2, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 5
# Not yet advanced to ACTIVE, height = 574 (will activate for block 576, not 575)
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in')
# Test both version 1 and version 2 transactions for all tests
# BIP113 test transaction will be modified before each use to put in appropriate block time
bip113tx_v1 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98"))
bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE
bip113tx_v1.nVersion = 1
bip113tx_v2 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98"))
bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE
bip113tx_v2.nVersion = 2
# For BIP68 test all 16 relative sequence locktimes
bip68txs_v1 = self.create_bip68txs(bip68inputs, 1)
bip68txs_v2 = self.create_bip68txs(bip68inputs, 2)
# For BIP112 test:
# 16 relative sequence locktimes of 10 against 10 OP_CSV OP_DROP inputs
bip112txs_vary_nSequence_v1 = self.create_bip112txs(bip112basicinputs[0], False, 1)
bip112txs_vary_nSequence_v2 = self.create_bip112txs(bip112basicinputs[0], False, 2)
# 16 relative sequence locktimes of 9 against 10 OP_CSV OP_DROP inputs
bip112txs_vary_nSequence_9_v1 = self.create_bip112txs(bip112basicinputs[1], False, 1, -1)
bip112txs_vary_nSequence_9_v2 = self.create_bip112txs(bip112basicinputs[1], False, 2, -1)
# sequence lock time of 10 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
bip112txs_vary_OP_CSV_v1 = self.create_bip112txs(bip112diverseinputs[0], True, 1)
bip112txs_vary_OP_CSV_v2 = self.create_bip112txs(bip112diverseinputs[0], True, 2)
# sequence lock time of 9 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
bip112txs_vary_OP_CSV_9_v1 = self.create_bip112txs(bip112diverseinputs[1], True, 1, -1)
bip112txs_vary_OP_CSV_9_v2 = self.create_bip112txs(bip112diverseinputs[1], True, 2, -1)
# -1 OP_CSV OP_DROP input
bip112tx_special_v1 = self.create_bip112special(bip112specialinput, 1)
bip112tx_special_v2 = self.create_bip112special(bip112specialinput, 2)
### TESTING ###
##################################
### Before Soft Forks Activate ###
##################################
# All txs should pass
### Version 1 txs ###
success_txs = []
# add BIP113 tx and -1 CSV tx
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
success_txs.append(bip113signed1)
success_txs.append(bip112tx_special_v1)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v1))
# add BIP 112 with seq=10 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v1))
# try BIP 112 with seq=9 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 6
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Version 2 txs ###
success_txs = []
# add BIP113 tx and -1 CSV tx
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
success_txs.append(bip113signed2)
success_txs.append(bip112tx_special_v2)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v2))
# add BIP 112 with seq=10 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v2))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v2))
# try BIP 112 with seq=9 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 7
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# 1 more version 4 block to get us to height 575 so the fork should now be active for the next block
test_blocks = self.generate_blocks(1, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 8
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'active')
#################################
### After Soft Forks Activate ###
#################################
### BIP 113 ###
# BIP 113 tests should now fail regardless of version number if nLockTime isn't satisfied by new rules
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
for bip113tx in [bip113signed1, bip113signed2]:
yield TestInstance([[self.create_test_block([bip113tx]), False]]) # 9,10
# BIP 113 tests should now pass if the locktime is < MTP
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
for bip113tx in [bip113signed1, bip113signed2]:
yield TestInstance([[self.create_test_block([bip113tx]), True]]) # 11,12
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Next block height = 580 after 4 blocks of random version
test_blocks = self.generate_blocks(4, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 13
### BIP 68 ###
### Version 1 txs ###
# All still pass
success_txs = []
success_txs.extend(all_rlt_txs(bip68txs_v1))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 14
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Version 2 txs ###
bip68success_txs = []
# All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
bip68success_txs.append(bip68txs_v2[1][b25][b22][b18])
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 15
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# All txs without flag fail as we are at delta height = 8 < 10 and delta time = 8 * 600 < 10 * 512
bip68timetxs = []
for b25 in range(2):
for b18 in range(2):
bip68timetxs.append(bip68txs_v2[0][b25][1][b18])
for tx in bip68timetxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 16 - 19
bip68heighttxs = []
for b25 in range(2):
for b18 in range(2):
bip68heighttxs.append(bip68txs_v2[0][b25][0][b18])
for tx in bip68heighttxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 20 - 23
# Advance one block to 581
test_blocks = self.generate_blocks(1, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 24
# Height txs should fail and time txs should now pass 9 * 600 > 10 * 512
bip68success_txs.extend(bip68timetxs)
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 25
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
for tx in bip68heighttxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 26 - 29
# Advance one block to 582
test_blocks = self.generate_blocks(1, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 30
# All BIP 68 txs should pass
bip68success_txs.extend(bip68heighttxs)
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 31
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### BIP 112 ###
### Version 1 txs ###
# -1 OP_CSV tx should fail
yield TestInstance([[self.create_test_block([bip112tx_special_v1]), False]]) #32
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 1 txs should still pass
success_txs = []
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
success_txs.append(bip112txs_vary_OP_CSV_v1[1][b25][b22][b18])
success_txs.append(bip112txs_vary_OP_CSV_9_v1[1][b25][b22][b18])
yield TestInstance([[self.create_test_block(success_txs), True]]) # 33
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, version 1 txs should now fail
fail_txs = []
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1))
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1))
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
fail_txs.append(bip112txs_vary_OP_CSV_v1[0][b25][b22][b18])
fail_txs.append(bip112txs_vary_OP_CSV_9_v1[0][b25][b22][b18])
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 34 - 81
### Version 2 txs ###
# -1 OP_CSV tx should fail
yield TestInstance([[self.create_test_block([bip112tx_special_v2]), False]]) #82
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 2 txs should pass (all sequence locks are met)
success_txs = []
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
success_txs.append(bip112txs_vary_OP_CSV_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV
success_txs.append(bip112txs_vary_OP_CSV_9_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV_9
yield TestInstance([[self.create_test_block(success_txs), True]]) # 83
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
## SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ##
# All txs with nSequence 9 should fail either due to earlier mismatch or failing the CSV check
fail_txs = []
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) # 16/16 of vary_nSequence_9
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
fail_txs.append(bip112txs_vary_OP_CSV_9_v2[0][b25][b22][b18]) # 16/16 of vary_OP_CSV_9
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 84 - 107
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail
fail_txs = []
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
fail_txs.append(bip112txs_vary_nSequence_v2[1][b25][b22][b18]) # 8/16 of vary_nSequence
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 108-115
# If sequencelock types mismatch, tx should fail
fail_txs = []
for b25 in range(2):
for b18 in range(2):
fail_txs.append(bip112txs_vary_nSequence_v2[0][b25][1][b18]) # 12/16 of vary_nSequence
fail_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][1][b18]) # 12/16 of vary_OP_CSV
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 116-123
# Remaining txs should pass, just test masking works properly
success_txs = []
for b25 in range(2):
for b18 in range(2):
success_txs.append(bip112txs_vary_nSequence_v2[0][b25][0][b18]) # 16/16 of vary_nSequence
success_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][0][b18]) # 16/16 of vary_OP_CSV
yield TestInstance([[self.create_test_block(success_txs), True]]) # 124
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Additional test, of checking that comparison of two time types works properly
time_txs = []
for b25 in range(2):
for b18 in range(2):
tx = bip112txs_vary_OP_CSV_v2[0][b25][1][b18]
tx.vin[0].nSequence = base_relative_locktime | seq_type_flag
signtx = self.sign_transaction(self.nodes[0], tx)
time_txs.append(signtx)
yield TestInstance([[self.create_test_block(time_txs), True]]) # 125
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Missing aspects of test
## Testing empty stack fails
if __name__ == '__main__':
BIP68_112_113Test().main()
| mit |
hirofumi0810/tensorflow_end2end_speech_recognition | examples/svc/training/train_ctc.py | 1 | 15187 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Train the CTC model (SVC corpus)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os.path import join, isfile, abspath
import sys
import time
import tensorflow as tf
from setproctitle import setproctitle
import yaml
import shutil
sys.path.append(abspath('../../../'))
from experiments.svc.data.load_dataset_ctc import Dataset
from experiments.svc.metrics.ctc import do_eval_fmeasure, do_eval_fmeasure_time
from utils.io.labels.sparsetensor import list2sparsetensor
from utils.training.learning_rate_controller import Controller
from utils.training.plot import plot_loss, plot_ler
from utils.directory import mkdir_join, mkdir
from utils.parameter import count_total_parameters
from models.ctc.ctc import CTC
def do_train(model, params):
"""Run training.
Args:
model: the model to train
params (dict): A dictionary of parameters
"""
# Load dataset
train_data = Dataset(
data_type='train', label_type=params['label_type'],
batch_size=params['batch_size'], max_epoch=params['num_epoch'],
splice=params['splice'],
num_stack=params['num_stack'], num_skip=params['num_skip'],
shuffle=True)
dev_data = Dataset(
data_type='dev', label_type=params['label_type'],
batch_size=params['batch_size'], splice=params['splice'],
num_stack=params['num_stack'], num_skip=params['num_skip'],
shuffle=False)
test_data = Dataset(
data_type='dev', label_type=params['label_type'],
batch_size=params['batch_size'], splice=params['splice'],
num_stack=params['num_stack'], num_skip=params['num_skip'],
shuffle=False)
# Tell TensorFlow that the model will be built into the default graph
with tf.Graph().as_default():
# Define placeholders
model.create_placeholders()
learning_rate_pl = tf.placeholder(tf.float32, name='learning_rate')
# Add to the graph each operation (including model definition)
loss_op, logits = model.compute_loss(
model.inputs_pl_list[0],
model.labels_pl_list[0],
model.inputs_seq_len_pl_list[0],
model.keep_prob_pl_list[0])
train_op = model.train(
loss_op,
optimizer=params['optimizer'],
learning_rate=learning_rate_pl)
decode_op = model.decoder(logits,
model.inputs_seq_len_pl_list[0],
beam_width=params['beam_width'])
ler_op = model.compute_ler(decode_op, model.labels_pl_list[0])
posteriors_op = model.posteriors(logits, blank_prior=1)
# Define learning rate controller
lr_controller = Controller(
learning_rate_init=params['learning_rate'],
decay_start_epoch=params['decay_start_epoch'],
decay_rate=params['decay_rate'],
decay_patient_epoch=params['decay_patient_epoch'],
lower_better=False)
# Build the summary tensor based on the TensorFlow collection of
# summaries
summary_train = tf.summary.merge(model.summaries_train)
summary_dev = tf.summary.merge(model.summaries_dev)
# Add the variable initializer operation
init_op = tf.global_variables_initializer()
# Create a saver for writing training checkpoints
saver = tf.train.Saver(max_to_keep=None)
# Count total parameters
parameters_dict, total_parameters = count_total_parameters(
tf.trainable_variables())
for parameter_name in sorted(parameters_dict.keys()):
print("%s %d" % (parameter_name, parameters_dict[parameter_name]))
print("Total %d variables, %s M parameters" %
(len(parameters_dict.keys()),
"{:,}".format(total_parameters / 1000000)))
csv_steps, csv_loss_train, csv_loss_dev = [], [], []
csv_ler_train, csv_ler_dev = [], []
# Create a session for running operation on the graph
with tf.Session() as sess:
# Instantiate a SummaryWriter to output summaries and the graph
summary_writer = tf.summary.FileWriter(
model.save_path, sess.graph)
# Initialize parameters
sess.run(init_op)
# Train model
start_time_train = time.time()
start_time_epoch = time.time()
start_time_step = time.time()
fmean_dev_best = 0
fmean_time_dev_best = 0
learning_rate = float(params['learning_rate'])
for step, (data, is_new_epoch) in enumerate(train_data):
# Create feed dictionary for next mini batch (train)
inputs, labels, inputs_seq_len, _ = data
feed_dict_train = {
model.inputs_pl_list[0]: inputs[0],
model.labels_pl_list[0]: list2sparsetensor(
labels[0], padded_value=train_data.padded_value),
model.inputs_seq_len_pl_list[0]: inputs_seq_len[0],
model.keep_prob_pl_list[0]: 1 - float(params['dropout']),
learning_rate_pl: learning_rate
}
# Update parameters
sess.run(train_op, feed_dict=feed_dict_train)
if (step + 1) % params['print_step'] == 0:
# Create feed dictionary for next mini batch (dev)
(inputs, labels, inputs_seq_len, _), _ = dev_data.next()
feed_dict_dev = {
model.inputs_pl_list[0]: inputs[0],
model.labels_pl_list[0]: list2sparsetensor(
labels[0], padded_value=dev_data.padded_value),
model.inputs_seq_len_pl_list[0]: inputs_seq_len[0],
model.keep_prob_pl_list[0]: 1.0
}
# Compute loss
loss_train = sess.run(loss_op, feed_dict=feed_dict_train)
loss_dev = sess.run(loss_op, feed_dict=feed_dict_dev)
csv_steps.append(step)
csv_loss_train.append(loss_train)
csv_loss_dev.append(loss_dev)
# Change to evaluation mode
feed_dict_train[model.keep_prob_pl_list[0]] = 1.0
# Compute accuracy & update event files
ler_train, summary_str_train = sess.run(
[ler_op, summary_train], feed_dict=feed_dict_train)
ler_dev, summary_str_dev = sess.run(
[ler_op, summary_dev], feed_dict=feed_dict_dev)
csv_ler_train.append(ler_train)
csv_ler_dev.append(ler_dev)
summary_writer.add_summary(summary_str_train, step + 1)
summary_writer.add_summary(summary_str_dev, step + 1)
summary_writer.flush()
duration_step = time.time() - start_time_step
print("Step %d (epoch: %.3f): loss = %.3f (%.3f) / ler = %.3f (%.3f) / lr = %.5f (%.3f min)" %
(step + 1, train_data.epoch_detail, loss_train, loss_dev, ler_train, ler_dev,
learning_rate, duration_step / 60))
sys.stdout.flush()
start_time_step = time.time()
# Save checkpoint and evaluate model per epoch
if is_new_epoch:
duration_epoch = time.time() - start_time_epoch
print('-----EPOCH:%d (%.3f min)-----' %
(train_data.epoch, duration_epoch / 60))
# Save fugure of loss & ler
plot_loss(csv_loss_train, csv_loss_dev, csv_steps,
save_path=model.save_path)
plot_ler(csv_ler_train, csv_ler_dev, csv_steps,
label_type=params['label_type'],
save_path=model.save_path)
if train_data.epoch >= params['eval_start_epoch']:
start_time_eval = time.time()
print('=== Dev Data Evaluation ===')
fmean_dev_epoch, df_acc = do_eval_fmeasure(
session=sess,
decode_op=decode_op,
model=model,
dataset=dev_data,
eval_batch_size=params['batch_size'])
print(df_acc)
print(' F-measure: %f %%' % (fmean_dev_epoch))
if fmean_dev_epoch > fmean_dev_best:
fmean_dev_best = fmean_dev_epoch
print('■■■ ↑Best Score (F-measure)↑ ■■■')
# Save model only when best accuracy is
# obtained (check point)
checkpoint_file = join(
model.save_path, 'model.ckpt')
save_path = saver.save(
sess, checkpoint_file, global_step=train_data.epoch)
print("Model saved in file: %s" % save_path)
print('=== Test Data Evaluation ===')
fmean_test_epoch, df_acc = do_eval_fmeasure(
session=sess,
decode_op=decode_op,
model=model,
dataset=test_data,
eval_batch_size=params['batch_size'])
print(df_acc)
print(' F-measure: %f %%' % (fmean_test_epoch))
# fmean_time_dev_epoch, df_acc = do_eval_fmeasure_time(
# session=sess,
# decode_op=decode_op,
# posteriors_op=posteriors_op,
# model=model,
# dataset=dev_data,
# eval_batch_size=params['batch_size'])
# print(df_acc)
# print(' Time F-measure: %f %%' %
# (fmean_time_dev_epoch))
# if fmean_time_dev_best < fmean_time_dev_epoch:
# fmean_time_dev_best = fmean_time_dev_epoch
# print('■■■ ↑Best Score (Time F-measure)↑ ■■■')
# fmean_time_test_epoch, df_acc = do_eval_fmeasure_time(
# session=sess,
# decode_op=decode_op,
# posteriors_op=posteriors_op,
# model=model,
# dataset=test_data,
# eval_batch_size=params['batch_size'])
# print(df_acc)
# print(' Time F-measure: %f %%' %
# (fmean_time_test_epoch))
duration_eval = time.time() - start_time_eval
print('Evaluation time: %.3f min' %
(duration_eval / 60))
# Update learning rate
learning_rate = lr_controller.decay_lr(
learning_rate=learning_rate,
epoch=train_data.epoch,
value=fmean_dev_epoch)
start_time_epoch = time.time()
duration_train = time.time() - start_time_train
print('Total time: %.3f hour' % (duration_train / 3600))
# Training was finished correctly
with open(join(model.save_path, 'complete.txt'), 'w') as f:
f.write('')
def main(config_path, model_save_path):
# Load a config file (.yml)
with open(config_path, "r") as f:
config = yaml.load(f)
params = config['param']
# Except for a blank class
if params['feature'] == 'fbank':
input_size = 123
elif params['feature'] == 'is13':
input_size = 141
if params['label_type'] in ['original', 'phone3']:
params['num_classes'] = 3
elif params['label_type'] == 'phone4':
params['num_classes'] = 4
elif params['label_type'] == 'phone43':
params['num_classes'] = 43
# Model setting
model = CTC(encoder_type=params['encoder_type'],
input_size=input_size * params['num_stack'],
splice=params['splice'],
num_units=params['num_units'],
num_layers=params['num_layers'],
num_classes=params['num_classes'],
lstm_impl=params['lstm_impl'],
use_peephole=params['use_peephole'],
parameter_init=params['weight_init'],
clip_grad_norm=params['clip_grad_norm'],
clip_activation=params['clip_activation'],
num_proj=params['num_proj'],
weight_decay=params['weight_decay'])
# Set process name
setproctitle('tf_svc_' + model.name + '_' + params['label_type'])
model.name += '_' + str(params['num_units'])
model.name += '_' + str(params['num_layers'])
model.name += '_' + params['optimizer']
model.name += '_lr' + str(params['learning_rate'])
if params['num_proj'] != 0:
model.name += '_proj' + str(params['num_proj'])
if params['dropout'] != 0:
model.name += '_drop' + str(params['dropout'])
if params['num_stack'] != 1:
model.name += '_stack' + str(params['num_stack'])
if params['weight_decay'] != 0:
model.name += '_wd' + str(params['weight_decay'])
# Set save path
model.save_path = mkdir_join(
model_save_path, 'ctc', params['label_type'], model.name)
# Reset model directory
model_index = 0
new_model_path = model.save_path
while True:
if isfile(join(new_model_path, 'complete.txt')):
# Training of the first model have been finished
model_index += 1
new_model_path = model.save_path + '_' + str(model_index)
elif isfile(join(new_model_path, 'config.yml')):
# Training of the first model have not been finished yet
model_index += 1
new_model_path = model.save_path + '_' + str(model_index)
else:
break
model.save_path = mkdir(new_model_path)
# Save config file
shutil.copyfile(config_path, join(model.save_path, 'config.yml'))
sys.stdout = open(join(model.save_path, 'train.log'), 'w')
# TODO(hirofumi): change to logger
do_train(model=model, params=params)
if __name__ == '__main__':
args = sys.argv
if len(args) != 3:
raise ValueError('Length of args should be 3.')
main(config_path=args[1], model_save_path=args[2])
| mit |
ndingwall/scikit-learn | sklearn/cluster/tests/test_bicluster.py | 6 | 9508 | """Testing for Spectral Biclustering methods"""
import numpy as np
import pytest
from scipy.sparse import csr_matrix, issparse
from sklearn.model_selection import ParameterGrid
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.cluster import SpectralCoclustering
from sklearn.cluster import SpectralBiclustering
from sklearn.cluster._bicluster import _scale_normalize
from sklearn.cluster._bicluster import _bistochastic_normalize
from sklearn.cluster._bicluster import _log_normalize
from sklearn.metrics import (consensus_score, v_measure_score)
from sklearn.datasets import make_biclusters, make_checkerboard
class MockBiclustering(BiclusterMixin, BaseEstimator):
# Mock object for testing get_submatrix.
def __init__(self):
pass
def get_indices(self, i):
# Overridden to reproduce old get_submatrix test.
return (np.where([True, True, False, False, True])[0],
np.where([False, False, True, True])[0])
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
model = MockBiclustering()
for X in (data, csr_matrix(data), data.tolist()):
submatrix = model.get_submatrix(0, X)
if issparse(submatrix):
submatrix = submatrix.toarray()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert np.all(X != -1)
def _test_shape_indices(model):
# Test get_shape and get_indices on fitted model.
for i in range(model.n_clusters):
m, n = model.get_shape(i)
i_ind, j_ind = model.get_indices(i)
assert len(i_ind) == m
assert len(j_ind) == n
def test_spectral_coclustering():
# Test Dhillon's Spectral CoClustering on a simple problem.
param_grid = {'svd_method': ['randomized', 'arpack'],
'n_svd_vecs': [None, 20],
'mini_batch': [False, True],
'init': ['k-means++'],
'n_init': [10]}
random_state = 0
S, rows, cols = make_biclusters((30, 30), 3, noise=0.5,
random_state=random_state)
S -= S.min() # needs to be nonnegative before making it sparse
S = np.where(S < 1, 0, S) # threshold some values
for mat in (S, csr_matrix(S)):
for kwargs in ParameterGrid(param_grid):
model = SpectralCoclustering(n_clusters=3,
random_state=random_state,
**kwargs)
model.fit(mat)
assert model.rows_.shape == (3, 30)
assert_array_equal(model.rows_.sum(axis=0), np.ones(30))
assert_array_equal(model.columns_.sum(axis=0), np.ones(30))
assert consensus_score(model.biclusters_,
(rows, cols)) == 1
_test_shape_indices(model)
def test_spectral_biclustering():
# Test Kluger methods on a checkerboard dataset.
S, rows, cols = make_checkerboard((30, 30), 3, noise=0.5,
random_state=0)
non_default_params = {'method': ['scale', 'log'],
'svd_method': ['arpack'],
'n_svd_vecs': [20],
'mini_batch': [True]}
for mat in (S, csr_matrix(S)):
for param_name, param_values in non_default_params.items():
for param_value in param_values:
model = SpectralBiclustering(
n_clusters=3,
n_init=3,
init='k-means++',
random_state=0,
)
model.set_params(**dict([(param_name, param_value)]))
if issparse(mat) and model.get_params().get('method') == 'log':
# cannot take log of sparse matrix
with pytest.raises(ValueError):
model.fit(mat)
continue
else:
model.fit(mat)
assert model.rows_.shape == (9, 30)
assert model.columns_.shape == (9, 30)
assert_array_equal(model.rows_.sum(axis=0),
np.repeat(3, 30))
assert_array_equal(model.columns_.sum(axis=0),
np.repeat(3, 30))
assert consensus_score(model.biclusters_,
(rows, cols)) == 1
_test_shape_indices(model)
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100),
decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100),
decimal=1)
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(),
scaled.sum(axis=1).mean(),
decimal=1)
def test_scale_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled, _, _ = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_bistochastic_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled = _bistochastic_normalize(mat)
_do_bistochastic_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_log_normalize():
# adding any constant to a log-scaled matrix should make it
# bistochastic
generator = np.random.RandomState(0)
mat = generator.rand(100, 100)
scaled = _log_normalize(mat) + 1
_do_bistochastic_test(scaled)
def test_fit_best_piecewise():
model = SpectralBiclustering(random_state=0)
vectors = np.array([[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[0, 1, 2, 3, 4, 5]])
best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2)
assert_array_equal(best, vectors[:2])
def test_project_and_cluster():
model = SpectralBiclustering(random_state=0)
data = np.array([[1, 1, 1],
[1, 1, 1],
[3, 6, 3],
[3, 6, 3]])
vectors = np.array([[1, 0],
[0, 1],
[0, 0]])
for mat in (data, csr_matrix(data)):
labels = model._project_and_cluster(mat, vectors,
n_clusters=2)
assert_almost_equal(v_measure_score(labels, [0, 0, 1, 1]), 1.0)
def test_perfect_checkerboard():
# XXX Previously failed on build bot (not reproducible)
model = SpectralBiclustering(3, svd_method="arpack", random_state=0)
S, rows, cols = make_checkerboard((30, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert consensus_score(model.biclusters_,
(rows, cols)) == 1
S, rows, cols = make_checkerboard((40, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert consensus_score(model.biclusters_,
(rows, cols)) == 1
S, rows, cols = make_checkerboard((30, 40), 3, noise=0,
random_state=0)
model.fit(S)
assert consensus_score(model.biclusters_,
(rows, cols)) == 1
@pytest.mark.parametrize(
"args",
[{'n_clusters': (3, 3, 3)},
{'n_clusters': 'abc'},
{'n_clusters': (3, 'abc')},
{'method': 'unknown'},
{'n_components': 0},
{'n_best': 0},
{'svd_method': 'unknown'},
{'n_components': 3, 'n_best': 4}]
)
def test_errors(args):
data = np.arange(25).reshape((5, 5))
model = SpectralBiclustering(**args)
with pytest.raises(ValueError):
model.fit(data)
def test_wrong_shape():
model = SpectralBiclustering()
data = np.arange(27).reshape((3, 3, 3))
with pytest.raises(ValueError):
model.fit(data)
@pytest.mark.parametrize('est',
(SpectralBiclustering(), SpectralCoclustering()))
def test_n_features_in_(est):
X, _, _ = make_biclusters((3, 3), 3, random_state=0)
assert not hasattr(est, 'n_features_in_')
est.fit(X)
assert est.n_features_in_ == 3
@pytest.mark.parametrize("klass", [SpectralBiclustering, SpectralCoclustering])
@pytest.mark.parametrize("n_jobs", [None, 1])
def test_n_jobs_deprecated(klass, n_jobs):
# FIXME: remove in 0.25
depr_msg = ("'n_jobs' was deprecated in version 0.23 and will be removed "
"in 0.25.")
S, _, _ = make_biclusters((30, 30), 3, noise=0.5, random_state=0)
est = klass(random_state=0, n_jobs=n_jobs)
with pytest.warns(FutureWarning, match=depr_msg):
est.fit(S)
| bsd-3-clause |
TieWei/nova | nova/db/sqlalchemy/types.py | 14 | 2197 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Custom SQLAlchemy types."""
from sqlalchemy.dialects import postgresql
from sqlalchemy import types
from nova import utils
class IPAddress(types.TypeDecorator):
"""An SQLAlchemy type representing an IP-address."""
impl = types.String
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
return dialect.type_descriptor(postgresql.INET())
else:
return dialect.type_descriptor(types.String(39))
def process_bind_param(self, value, dialect):
"""Process/Formats the value before insert it into the db."""
if dialect.name == 'postgresql':
return value
# NOTE(maurosr): The purpose here is to convert ipv6 to the shortened
# form, not validate it.
elif utils.is_valid_ipv6(value):
return utils.get_shortened_ipv6(value)
return value
class CIDR(types.TypeDecorator):
"""An SQLAlchemy type representing a CIDR definition."""
impl = types.String
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
return dialect.type_descriptor(postgresql.INET())
else:
return dialect.type_descriptor(types.String(43))
def process_bind_param(self, value, dialect):
"""Process/Formats the value before insert it into the db."""
# NOTE(sdague): normalize all the inserts
if utils.is_valid_ipv6_cidr(value):
return utils.get_shortened_ipv6_cidr(value)
return value
| apache-2.0 |
Synforge/err | errbot/backends/test.py | 1 | 18173 | import importlib
import logging
import sys
import unittest
from os.path import sep, abspath
from queue import Queue
from tempfile import mkdtemp
from threading import Thread
import pytest
from errbot.rendering import text
from errbot.backends.base import Message, Room, Person, RoomOccupant, ONLINE
from errbot.core_plugins.wsview import reset_app
from errbot.errBot import ErrBot
from errbot.main import setup_bot
# Can't use __name__ because of Yapsy
log = logging.getLogger('errbot.backends.test')
QUIT_MESSAGE = '$STOP$'
STZ_MSG = 1
STZ_PRE = 2
STZ_IQ = 3
class TestPerson(Person):
"""
This is an identifier just represented as a string.
DO NOT USE THIS DIRECTLY AS IT IS NOT COMPATIBLE WITH MOST BACKENDS,
use self.build_identifier(identifier_as_string) instead.
Note to back-end implementors: You should provide a custom
<yourbackend>Identifier object that adheres to this interface.
You should not directly inherit from SimpleIdentifier, inherit
from object instead and make sure it includes all properties and
methods exposed by this class.
"""
def __init__(self, person, client=None, nick=None, fullname=None):
self._person = person
self._client = client
self._nick = nick
self._fullname = fullname
@property
def person(self):
"""This needs to return the part of the identifier pointing to a person."""
return self._person
@property
def client(self):
"""This needs to return the part of the identifier pointing to a client from which a person is sending a message from.
Returns None is unspecified"""
return self._client
@property
def nick(self):
"""This needs to return a short display name for this identifier e.g. gbin.
Returns None is unspecified"""
return self._nick
@property
def fullname(self):
"""This needs to return a long display name for this identifier e.g. Guillaume Binet.
Returns None is unspecified"""
return self._fullname
aclattr = person
def __unicode__(self):
if self.client:
return self._person + "/" + self._client
return self._person
__str__ = __unicode__
def __eq__(self, other):
return self.person == other.person
# noinspection PyAbstractClass
class TestOccupant(TestPerson, RoomOccupant):
""" This is a MUC occupant represented as a string.
DO NOT USE THIS DIRECTLY AS IT IS NOT COMPATIBLE WITH MOST BACKENDS,
"""
def __init__(self, person, room):
super().__init__(person)
self._room = room
@property
def room(self):
return self._room
def __unicode__(self):
return self._person + '@' + str(self._room)
__str__ = __unicode__
def __eq__(self, other):
return self.person == other.person and self.room == other.room
class TestRoom(Room):
def invite(self, *args):
pass
def __init__(self, name, occupants=None, topic=None, bot=None):
"""
:param name: Name of the room
:param occupants: Occupants of the room
:param topic: The MUC's topic
"""
if occupants is None:
occupants = []
self._occupants = occupants
self._topic = topic
self._bot = bot
self._name = name
self._bot_mucid = TestOccupant(self._bot.bot_config.BOT_IDENTITY['username'], self._name)
@property
def occupants(self):
return self._occupants
def find_croom(self):
""" find back the canonical room from a this room"""
for croom in self._bot._rooms:
if croom == self:
return croom
return None
@property
def joined(self):
room = self.find_croom()
if room:
return self._bot_mucid in room.occupants
return False
def join(self, username=None, password=None):
if self.joined:
logging.warning("Attempted to join room '{!s}', but already in this room".format(self))
return
if not self.exists:
log.debug("Room {!s} doesn't exist yet, creating it".format(self))
self.create()
room = self.find_croom()
room._occupants.append(self._bot_mucid)
log.info("Joined room {!s}".format(self))
self._bot.callback_room_joined(room)
def leave(self, reason=None):
if not self.joined:
logging.warning("Attempted to leave room '{!s}', but not in this room".format(self))
return
room = self.find_croom()
room._occupants.remove(self._bot_mucid)
log.info("Left room {!s}".format(self))
self._bot.callback_room_left(room)
@property
def exists(self):
return self.find_croom() is not None
def create(self):
if self.exists:
logging.warning("Room {!s} already created".format(self))
return
self._bot._rooms.append(self)
log.info("Created room {!s}".format(self))
def destroy(self):
if not self.exists:
logging.warning("Cannot destroy room {!s}, it doesn't exist".format(self))
return
self._bot._rooms.remove(self)
log.info("Destroyed room {!s}".format(self))
@property
def topic(self):
return self._topic
@topic.setter
def topic(self, topic):
self._topic = topic
room = self.find_croom()
room._topic = self._topic
log.info("Topic for room {!s} set to '{}'".format(self, topic))
self._bot.callback_room_topic(self)
def __unicode__(self):
return self._name
def __str__(self):
return self._name
def __eq__(self, other):
return self._name == other._name
class TestBackend(ErrBot):
def change_presence(self, status: str = ONLINE, message: str = '') -> None:
pass
def __init__(self, config):
config.BOT_LOG_LEVEL = logging.DEBUG
config.CHATROOM_PRESENCE = ('testroom',) # we are testing with simple identfiers
config.BOT_IDENTITY = {'username': 'err'} # we are testing with simple identfiers
self.bot_identifier = self.build_identifier('Err') # whatever
super().__init__(config)
self.incoming_stanza_queue = Queue()
self.outgoing_message_queue = Queue()
self.sender = self.build_identifier(config.BOT_ADMINS[0]) # By default, assume this is the admin talking
self.reset_rooms()
self.md = text()
def send_message(self, mess):
log.info("\n\n\nMESSAGE:\n%s\n\n\n", mess.body)
super().send_message(mess)
self.outgoing_message_queue.put(self.md.convert(mess.body))
def serve_forever(self):
self.connect_callback() # notify that the connection occured
try:
while True:
print('waiting on queue')
stanza_type, entry = self.incoming_stanza_queue.get()
print('message received')
if entry == QUIT_MESSAGE:
log.info("Stop magic message received, quitting...")
break
if stanza_type is STZ_MSG:
msg = Message(entry)
msg.frm = self.sender
msg.to = self.bot_identifier # To me only
self.callback_message(msg)
# implements the mentions.
mentioned = [self.build_identifier(word[1:]) for word in entry.split() if word.startswith('@')]
if mentioned:
self.callback_mention(msg, mentioned)
elif stanza_type is STZ_PRE:
log.info("Presence stanza received.")
self.callback_presence(entry)
elif stanza_type is STZ_IQ:
log.info("IQ stanza received.")
else:
log.error("Unknown stanza type.")
except EOFError:
pass
except KeyboardInterrupt:
pass
finally:
log.debug("Trigger disconnect callback")
self.disconnect_callback()
log.debug("Trigger shutdown")
self.shutdown()
def connect(self):
return
def build_identifier(self, text_representation):
return TestPerson(text_representation)
def build_reply(self, mess, text=None, private=False):
msg = self.build_message(text)
msg.frm = self.bot_identifier
msg.to = mess.frm
return msg
@property
def mode(self):
return 'test'
def rooms(self):
return [r for r in self._rooms if r.joined]
def query_room(self, room):
try:
return [r for r in self._rooms if str(r) == str(room)][0]
except IndexError:
r = TestRoom(room, bot=self)
return r
def prefix_groupchat_reply(self, message, identifier):
super().prefix_groupchat_reply(message, identifier)
message.body = '@{0} {1}'.format(identifier.nick, message.body)
def pop_message(self, timeout=5, block=True):
return self.outgoing_message_queue.get(timeout=timeout, block=block)
def push_message(self, msg):
self.incoming_stanza_queue.put((STZ_MSG, msg), timeout=5)
def push_presence(self, presence):
""" presence must at least duck type base.Presence
"""
self.incoming_stanza_queue.put((STZ_PRE, presence), timeout=5)
def zap_queues(self):
while not self.incoming_stanza_queue.empty():
msg = self.incoming_stanza_queue.get(block=False)
log.error('Message left in the incoming queue during a test : %s' % msg)
while not self.outgoing_message_queue.empty():
msg = self.outgoing_message_queue.get(block=False)
log.error('Message left in the outgoing queue during a test : %s' % msg)
def reset_rooms(self):
"""Reset/clear all rooms"""
self._rooms = []
class ShallowConfig(object):
pass
class TestBot(object):
"""
A minimal bot utilizing the TestBackend, for use with unit testing.
Only one instance of this class should globally be active at any one
time.
End-users should not use this class directly. Use
:func:`~errbot.backends.test.testbot` or
:class:`~errbot.backends.test.FullStackTest` instead, which use this
class under the hood.
"""
bot_thread = None
def __init__(self, extra_plugin_dir=None, loglevel=logging.DEBUG, extra_config=None):
self.setup(extra_plugin_dir=extra_plugin_dir, loglevel=loglevel, extra_config=extra_config)
def setup(self, extra_plugin_dir=None, loglevel=logging.DEBUG, extra_config=None):
"""
:param extra_config: Piece of extra configuration you want to inject to the config.
:param extra_plugin_dir: Path to a directory from which additional
plugins should be loaded.
:param loglevel: Logging verbosity. Expects one of the constants
defined by the logging module.
"""
tempdir = mkdtemp()
# This is for test isolation.
config = ShallowConfig()
config.__dict__.update(importlib.import_module('errbot.config-template').__dict__)
config.BOT_DATA_DIR = tempdir
config.BOT_LOG_FILE = tempdir + sep + 'log.txt'
config.STORAGE = 'Memory'
if extra_config is not None:
log.debug('Merging %s to the bot config.' % repr(extra_config))
for k, v in extra_config.items():
setattr(config, k, v)
# reset logging to console
logging.basicConfig(format='%(levelname)s:%(message)s')
file = logging.FileHandler(config.BOT_LOG_FILE, encoding='utf-8')
self.logger = logging.getLogger('')
self.logger.setLevel(loglevel)
self.logger.addHandler(file)
config.BOT_EXTRA_PLUGIN_DIR = extra_plugin_dir
config.BOT_LOG_LEVEL = loglevel
self.bot_config = config
def start(self):
"""
Start the bot
Calling this method when the bot has already started will result
in an Exception being raised.
"""
if self.bot_thread is not None:
raise Exception("Bot has already been started")
self.bot = setup_bot('Test', self.logger, self.bot_config)
self.bot_thread = Thread(target=self.bot.serve_forever, name='TestBot main thread')
self.bot_thread.setDaemon(True)
self.bot_thread.start()
self.bot.push_message("!echo ready")
# Ensure bot is fully started and plugins are loaded before returning
assert self.bot.pop_message(timeout=60) == "ready"
def stop(self):
"""
Stop the bot
Calling this method before the bot has started will result in an
Exception being raised.
"""
if self.bot_thread is None:
raise Exception("Bot has not yet been started")
self.bot.push_message(QUIT_MESSAGE)
self.bot_thread.join()
reset_app() # empty the bottle ... hips!
log.info("Main bot thread quits")
self.bot.zap_queues()
self.bot.reset_rooms()
self.bot_thread = None
def pop_message(self, timeout=5, block=True):
return self.bot.pop_message(timeout, block)
def push_message(self, msg):
return self.bot.push_message(msg)
def push_presence(self, presence):
""" presence must at least duck type base.Presence
"""
return self.bot.push_presence(presence)
def zap_queues(self):
return self.bot.zap_queues()
def assertCommand(self, command, response, timeout=5):
"""Assert the given command returns the given response"""
self.bot.push_message(command)
assert response in self.bot.pop_message(timeout)
def assertCommandFound(self, command, timeout=5):
"""Assert the given command does not exist"""
self.bot.push_message(command)
assert 'not found' not in self.bot.pop_message(timeout)
class FullStackTest(unittest.TestCase, TestBot):
"""
Test class for use with Python's unittest module to write tests
against a fully functioning bot.
For example, if you wanted to test the builtin `!about` command,
you could write a test file with the following::
from errbot.backends.test import FullStackTest
class TestCommands(FullStackTest):
def test_about(self):
self.push_message('!about')
self.assertIn('Err version', self.pop_message())
"""
def setUp(self, extra_plugin_dir=None, extra_test_file=None, loglevel=logging.DEBUG, extra_config=None):
"""
:param extra_plugin_dir: Path to a directory from which additional
plugins should be loaded.
:param extra_test_file: [Deprecated but kept for backward-compatibility,
use extra_plugin_dir instead]
Path to an additional plugin which should be loaded.
:param loglevel: Logging verbosity. Expects one of the constants
defined by the logging module.
:param extra_config: Piece of extra bot config in a dict.
"""
if extra_plugin_dir is None and extra_test_file is not None:
extra_plugin_dir = sep.join(abspath(extra_test_file).split(sep)[:-2])
self.setup(extra_plugin_dir=extra_plugin_dir, loglevel=loglevel, extra_config=extra_config)
self.start()
def tearDown(self):
self.stop()
@pytest.fixture
def testbot(request):
"""
Pytest fixture to write tests against a fully functioning bot.
For example, if you wanted to test the builtin `!about` command,
you could write a test file with the following::
from errbot.backends.test import testbot
def test_about(testbot):
testbot.push_message('!about')
assert "Err version" in testbot.pop_message()
It's possible to provide additional configuration to this fixture,
by setting variables at module level or as class attributes (the
latter taking precedence over the former). For example::
from errbot.backends.test import testbot
extra_plugin_dir = '/foo/bar'
def test_about(testbot):
testbot.pushMessage('!about')
assert "Err version" in testbot.pop_message()
..or::
from errbot.backends.test import testbot
extra_plugin_dir = '/foo/bar'
class Tests(object):
# Wins over `extra_plugin_dir = '/foo/bar'` above
extra_plugin_dir = '/foo/baz'
def test_about(self, testbot):
testbot.push_message('!about')
assert "Err version" in testbot.pop_message()
..to load additional plugins from the directory `/foo/bar` or
`/foo/baz` respectively. This works for the following items, which are
passed to the constructor of :class:`~errbot.backends.test.TestBot`:
* `extra_plugin_dir`
* `loglevel`
"""
def on_finish():
bot.stop()
# setup the logging to something digestable.
logger = logging.getLogger('')
logging.getLogger('yapsy').setLevel(logging.ERROR) # this one is way too verbose in debug
logging.getLogger('MARKDOWN').setLevel(logging.ERROR) # this one is way too verbose in debug
logging.getLogger('Rocket.Errors').setLevel(logging.ERROR) # this one is way too verbose in debug
logger.setLevel(logging.DEBUG)
console_hdlr = logging.StreamHandler(sys.stdout)
console_hdlr.setFormatter(logging.Formatter("%(levelname)-8s %(name)-25s %(message)s"))
logger.handlers = []
logger.addHandler(console_hdlr)
kwargs = {}
for attr, default in (('extra_plugin_dir', None), ('loglevel', logging.DEBUG),):
if hasattr(request, 'instance'):
kwargs[attr] = getattr(request.instance, attr, None)
if kwargs[attr] is None:
kwargs[attr] = getattr(request.module, attr, default)
bot = TestBot(**kwargs)
bot.start()
request.addfinalizer(on_finish)
return bot
| gpl-3.0 |
percy-g2/Novathor_xperia_u8500 | 6.1.1.B.1.54/external/webkit/Source/WebKit/chromium/scripts/concatenate_js_files.py | 15 | 4355 | #!/usr/bin/env python
#
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# This script concatenates in place JS files in the order specified
# using <script> tags in a given 'order.html' file.
from HTMLParser import HTMLParser
from cStringIO import StringIO
import jsmin
import os.path
import sys
class OrderedJSFilesExtractor(HTMLParser):
def __init__(self, order_html_name):
HTMLParser.__init__(self)
self.ordered_js_files = []
order_html = open(order_html_name, 'r')
self.feed(order_html.read())
def handle_starttag(self, tag, attrs):
if tag == 'script':
attrs_dict = dict(attrs)
if ('type' in attrs_dict and attrs_dict['type'] == 'text/javascript' and 'src' in attrs_dict):
self.ordered_js_files.append(attrs_dict['src'])
class PathExpander:
def __init__(self, paths):
self.paths = paths
def expand(self, filename):
last_path = None
expanded_name = None
for path in self.paths:
fname = "%s/%s" % (path, filename)
if (os.access(fname, os.F_OK)):
if (last_path != None):
raise Exception('Ambiguous file %s: found in %s and %s' %
(filename, last_path, path))
expanded_name = fname
last_path = path
return expanded_name
def main(argv):
if len(argv) < 3:
print('usage: %s order.html input_source_dir_1 input_source_dir_2 ... '
'output_file' % argv[0])
return 1
output_file_name = argv.pop()
input_order_file_name = argv[1]
extractor = OrderedJSFilesExtractor(input_order_file_name)
extractor.ordered_js_files.append('DevTools.js')
extractor.ordered_js_files.append('Tests.js')
expander = PathExpander(argv[2:])
output = StringIO()
for input_file_name in extractor.ordered_js_files:
full_path = expander.expand(input_file_name)
if (full_path is None):
raise Exception('File %s referenced in %s not found on any source paths, '
'check source tree for consistency' %
(input_file_name, input_order_file_name))
output.write('/* %s */\n\n' % input_file_name)
input_file = open(full_path, 'r')
output.write(input_file.read())
output.write('\n')
input_file.close()
output_file = open(output_file_name, 'w')
output_file.write(jsmin.jsmin(output.getvalue()))
output_file.close()
output.close()
# Touch output file directory to make sure that Xcode will copy
# modified resource files.
if sys.platform == 'darwin':
output_dir_name = os.path.dirname(output_file_name)
os.utime(output_dir_name, None)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| gpl-2.0 |
digitalghost/pycv-gameRobot | testtouch.py | 1 | 1881 | import subprocess
import time
# Imports the monkeyrunner modules used by this program
from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice
# Connects to the current device, returning a MonkeyDevice object
print "Start Connction to Phone"
device = MonkeyRunner.waitForConnection()
print "Connected to Phone"
device.wake()
pWidth = device.getProperty("display.width")
pHeight = device.getProperty("display.height")
print "device width:" + str(pWidth) + ", Height:" + str(pHeight) + ", Density:" + str(device.getProperty("display.density"))
cnt = 0
fn = "./"
while 1:
startTick = time.time()
print "Start take snapshot for count:" + str(cnt)
screenShot = device.takeSnapshot()
endTick = time.time()
print "End take snapshot for count:" + str(cnt) + ", Elapse secs:" + str(endTick-startTick)
# Writes the screenshot to a file
fn = "./snapshots/s" + str(cnt) + ".png"
startTick = time.time()
print "Start write to file :" + fn
screenShot.writeToFile(fn,'png')
endTick = time.time()
print "Ended write to file, Elapse secs:" + str(endTick-startTick)
# external template Matching
process = subprocess.Popen(['python','./cvTplMatch.py','./tpl.png',fn],stdout=subprocess.PIPE)
cmdData = process.communicate()[0]
cmdStr = str(cmdData)[:-1]
cmdRC = process.returncode
if str(cmdStr) == "NULL":
print "No matched template image,touch nothing, wait for next check..."
else:
arr = cmdStr.split(",")
centerX = int(int(arr[0]) + (int(arr[2]) - int(arr[0]))/2)
centerY = int(int(arr[1]) + (int(arr[3]) - int(arr[1]))/2)
device.touch(centerX,centerY,MonkeyDevice.DOWN_AND_UP)
print "Touch the screen point at: " + cmdStr
cnt += 1
print "Start to Sleep 5 secs"
MonkeyRunner.sleep(5)
| gpl-3.0 |
FireWRT/OpenWrt-Firefly-Libraries | staging_dir/host/lib/python3.4/test/ssl_servers.py | 84 | 7044 | import os
import sys
import ssl
import pprint
import socket
import urllib.parse
# Rename HTTPServer to _HTTPServer so as to avoid confusion with HTTPSServer.
from http.server import (HTTPServer as _HTTPServer,
SimpleHTTPRequestHandler, BaseHTTPRequestHandler)
from test import support
threading = support.import_module("threading")
here = os.path.dirname(__file__)
HOST = support.HOST
CERTFILE = os.path.join(here, 'keycert.pem')
# This one's based on HTTPServer, which is based on SocketServer
class HTTPSServer(_HTTPServer):
def __init__(self, server_address, handler_class, context):
_HTTPServer.__init__(self, server_address, handler_class)
self.context = context
def __str__(self):
return ('<%s %s:%s>' %
(self.__class__.__name__,
self.server_name,
self.server_port))
def get_request(self):
# override this to wrap socket with SSL
try:
sock, addr = self.socket.accept()
sslconn = self.context.wrap_socket(sock, server_side=True)
except OSError as e:
# socket errors are silenced by the caller, print them here
if support.verbose:
sys.stderr.write("Got an error:\n%s\n" % e)
raise
return sslconn, addr
class RootedHTTPRequestHandler(SimpleHTTPRequestHandler):
# need to override translate_path to get a known root,
# instead of using os.curdir, since the test could be
# run from anywhere
server_version = "TestHTTPS/1.0"
root = here
# Avoid hanging when a request gets interrupted by the client
timeout = 5
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
# abandon query parameters
path = urllib.parse.urlparse(path)[2]
path = os.path.normpath(urllib.parse.unquote(path))
words = path.split('/')
words = filter(None, words)
path = self.root
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
path = os.path.join(path, word)
return path
def log_message(self, format, *args):
# we override this to suppress logging unless "verbose"
if support.verbose:
sys.stdout.write(" server (%s:%d %s):\n [%s] %s\n" %
(self.server.server_address,
self.server.server_port,
self.request.cipher(),
self.log_date_time_string(),
format%args))
class StatsRequestHandler(BaseHTTPRequestHandler):
"""Example HTTP request handler which returns SSL statistics on GET
requests.
"""
server_version = "StatsHTTPS/1.0"
def do_GET(self, send_body=True):
"""Serve a GET request."""
sock = self.rfile.raw._sock
context = sock.context
stats = {
'session_cache': context.session_stats(),
'cipher': sock.cipher(),
'compression': sock.compression(),
}
body = pprint.pformat(stats)
body = body.encode('utf-8')
self.send_response(200)
self.send_header("Content-type", "text/plain; charset=utf-8")
self.send_header("Content-Length", str(len(body)))
self.end_headers()
if send_body:
self.wfile.write(body)
def do_HEAD(self):
"""Serve a HEAD request."""
self.do_GET(send_body=False)
def log_request(self, format, *args):
if support.verbose:
BaseHTTPRequestHandler.log_request(self, format, *args)
class HTTPSServerThread(threading.Thread):
def __init__(self, context, host=HOST, handler_class=None):
self.flag = None
self.server = HTTPSServer((host, 0),
handler_class or RootedHTTPRequestHandler,
context)
self.port = self.server.server_port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
if self.flag:
self.flag.set()
try:
self.server.serve_forever(0.05)
finally:
self.server.server_close()
def stop(self):
self.server.shutdown()
def make_https_server(case, *, context=None, certfile=CERTFILE,
host=HOST, handler_class=None):
if context is None:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
# We assume the certfile contains both private key and certificate
context.load_cert_chain(certfile)
server = HTTPSServerThread(context, host, handler_class)
flag = threading.Event()
server.start(flag)
flag.wait()
def cleanup():
if support.verbose:
sys.stdout.write('stopping HTTPS server\n')
server.stop()
if support.verbose:
sys.stdout.write('joining HTTPS thread\n')
server.join()
case.addCleanup(cleanup)
return server
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description='Run a test HTTPS server. '
'By default, the current directory is served.')
parser.add_argument('-p', '--port', type=int, default=4433,
help='port to listen on (default: %(default)s)')
parser.add_argument('-q', '--quiet', dest='verbose', default=True,
action='store_false', help='be less verbose')
parser.add_argument('-s', '--stats', dest='use_stats_handler', default=False,
action='store_true', help='always return stats page')
parser.add_argument('--curve-name', dest='curve_name', type=str,
action='store',
help='curve name for EC-based Diffie-Hellman')
parser.add_argument('--dh', dest='dh_file', type=str, action='store',
help='PEM file containing DH parameters')
args = parser.parse_args()
support.verbose = args.verbose
if args.use_stats_handler:
handler_class = StatsRequestHandler
else:
handler_class = RootedHTTPRequestHandler
handler_class.root = os.getcwd()
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
if args.curve_name:
context.set_ecdh_curve(args.curve_name)
if args.dh_file:
context.load_dh_params(args.dh_file)
server = HTTPSServer(("", args.port), handler_class, context)
if args.verbose:
print("Listening on https://localhost:{0.port}".format(args))
server.serve_forever(0.1)
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.