repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
TNosredna/CouchPotatoServer | libs/pyutil/test/out_of_shape/test_strutil.py | 106 | 1713 | #!/usr/bin/env python
# Copyright (c) 2004-2009 Zooko "Zooko" Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
import unittest
from pyutil.assertutil import _assert
from pyutil import strutil
class Teststrutil(unittest.TestCase):
def test_short_input(self):
self.failUnless(strutil.pop_trailing_newlines("\r\n") == "")
self.failUnless(strutil.pop_trailing_newlines("\r") == "")
self.failUnless(strutil.pop_trailing_newlines("x\r\n") == "x")
self.failUnless(strutil.pop_trailing_newlines("x\r") == "x")
def test_split(self):
_assert(strutil.split_on_newlines("x\r\ny") == ["x", "y",], strutil.split_on_newlines("x\r\ny"))
_assert(strutil.split_on_newlines("x\r\ny\r\n") == ["x", "y", '',], strutil.split_on_newlines("x\r\ny\r\n"))
_assert(strutil.split_on_newlines("x\n\ny\n\n") == ["x", '', "y", '', '',], strutil.split_on_newlines("x\n\ny\n\n"))
def test_commonprefix(self):
_assert(strutil.commonprefix(["foo","foobarooo", "foosplat",]) == 'foo', strutil.commonprefix(["foo","foobarooo", "foosplat",]))
_assert(strutil.commonprefix(["foo","afoobarooo", "foosplat",]) == '', strutil.commonprefix(["foo","afoobarooo", "foosplat",]))
def test_commonsuffix(self):
_assert(strutil.commonsuffix(["foo","foobarooo", "foosplat",]) == '', strutil.commonsuffix(["foo","foobarooo", "foosplat",]))
_assert(strutil.commonsuffix(["foo","foobarooo", "foosplato",]) == 'o', strutil.commonsuffix(["foo","foobarooo", "foosplato",]))
_assert(strutil.commonsuffix(["foo","foobarooofoo", "foosplatofoo",]) == 'foo', strutil.commonsuffix(["foo","foobarooofoo", "foosplatofoo",]))
| gpl-3.0 |
lgierth/cjdns | node_build/dependencies/libuv/build/gyp/test/subdirectory/gyptest-top-default.py | 261 | 1363 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies building a target and a subsidiary dependent target from a
.gyp file in a subdirectory, without specifying an explicit output build
directory, and using the generated solution or project file at the top
of the tree as the entry point.
There is a difference here in the default behavior of the underlying
build tools. Specifically, when building the entire "solution", Xcode
puts the output of each project relative to the .xcodeproj directory,
while Visual Studio (and our implementation of Make) put it
in a build directory relative to the "solution"--that is, the entry-point
from which you built the entire tree.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('prog1.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('prog1.gyp', chdir='relocate/src')
test.run_built_executable('prog1',
stdout="Hello from prog1.c\n",
chdir='relocate/src')
if test.format == 'xcode':
chdir = 'relocate/src/subdir'
else:
chdir = 'relocate/src'
test.run_built_executable('prog2',
chdir=chdir,
stdout="Hello from prog2.c\n")
test.pass_test()
| gpl-3.0 |
iconmix/skins-addons | script.iconmixtools/resources/lib/unidecode/xd7.py | 252 | 4559 | data = (
'hwen', # 0x00
'hwenj', # 0x01
'hwenh', # 0x02
'hwed', # 0x03
'hwel', # 0x04
'hwelg', # 0x05
'hwelm', # 0x06
'hwelb', # 0x07
'hwels', # 0x08
'hwelt', # 0x09
'hwelp', # 0x0a
'hwelh', # 0x0b
'hwem', # 0x0c
'hweb', # 0x0d
'hwebs', # 0x0e
'hwes', # 0x0f
'hwess', # 0x10
'hweng', # 0x11
'hwej', # 0x12
'hwec', # 0x13
'hwek', # 0x14
'hwet', # 0x15
'hwep', # 0x16
'hweh', # 0x17
'hwi', # 0x18
'hwig', # 0x19
'hwigg', # 0x1a
'hwigs', # 0x1b
'hwin', # 0x1c
'hwinj', # 0x1d
'hwinh', # 0x1e
'hwid', # 0x1f
'hwil', # 0x20
'hwilg', # 0x21
'hwilm', # 0x22
'hwilb', # 0x23
'hwils', # 0x24
'hwilt', # 0x25
'hwilp', # 0x26
'hwilh', # 0x27
'hwim', # 0x28
'hwib', # 0x29
'hwibs', # 0x2a
'hwis', # 0x2b
'hwiss', # 0x2c
'hwing', # 0x2d
'hwij', # 0x2e
'hwic', # 0x2f
'hwik', # 0x30
'hwit', # 0x31
'hwip', # 0x32
'hwih', # 0x33
'hyu', # 0x34
'hyug', # 0x35
'hyugg', # 0x36
'hyugs', # 0x37
'hyun', # 0x38
'hyunj', # 0x39
'hyunh', # 0x3a
'hyud', # 0x3b
'hyul', # 0x3c
'hyulg', # 0x3d
'hyulm', # 0x3e
'hyulb', # 0x3f
'hyuls', # 0x40
'hyult', # 0x41
'hyulp', # 0x42
'hyulh', # 0x43
'hyum', # 0x44
'hyub', # 0x45
'hyubs', # 0x46
'hyus', # 0x47
'hyuss', # 0x48
'hyung', # 0x49
'hyuj', # 0x4a
'hyuc', # 0x4b
'hyuk', # 0x4c
'hyut', # 0x4d
'hyup', # 0x4e
'hyuh', # 0x4f
'heu', # 0x50
'heug', # 0x51
'heugg', # 0x52
'heugs', # 0x53
'heun', # 0x54
'heunj', # 0x55
'heunh', # 0x56
'heud', # 0x57
'heul', # 0x58
'heulg', # 0x59
'heulm', # 0x5a
'heulb', # 0x5b
'heuls', # 0x5c
'heult', # 0x5d
'heulp', # 0x5e
'heulh', # 0x5f
'heum', # 0x60
'heub', # 0x61
'heubs', # 0x62
'heus', # 0x63
'heuss', # 0x64
'heung', # 0x65
'heuj', # 0x66
'heuc', # 0x67
'heuk', # 0x68
'heut', # 0x69
'heup', # 0x6a
'heuh', # 0x6b
'hyi', # 0x6c
'hyig', # 0x6d
'hyigg', # 0x6e
'hyigs', # 0x6f
'hyin', # 0x70
'hyinj', # 0x71
'hyinh', # 0x72
'hyid', # 0x73
'hyil', # 0x74
'hyilg', # 0x75
'hyilm', # 0x76
'hyilb', # 0x77
'hyils', # 0x78
'hyilt', # 0x79
'hyilp', # 0x7a
'hyilh', # 0x7b
'hyim', # 0x7c
'hyib', # 0x7d
'hyibs', # 0x7e
'hyis', # 0x7f
'hyiss', # 0x80
'hying', # 0x81
'hyij', # 0x82
'hyic', # 0x83
'hyik', # 0x84
'hyit', # 0x85
'hyip', # 0x86
'hyih', # 0x87
'hi', # 0x88
'hig', # 0x89
'higg', # 0x8a
'higs', # 0x8b
'hin', # 0x8c
'hinj', # 0x8d
'hinh', # 0x8e
'hid', # 0x8f
'hil', # 0x90
'hilg', # 0x91
'hilm', # 0x92
'hilb', # 0x93
'hils', # 0x94
'hilt', # 0x95
'hilp', # 0x96
'hilh', # 0x97
'him', # 0x98
'hib', # 0x99
'hibs', # 0x9a
'his', # 0x9b
'hiss', # 0x9c
'hing', # 0x9d
'hij', # 0x9e
'hic', # 0x9f
'hik', # 0xa0
'hit', # 0xa1
'hip', # 0xa2
'hih', # 0xa3
'[?]', # 0xa4
'[?]', # 0xa5
'[?]', # 0xa6
'[?]', # 0xa7
'[?]', # 0xa8
'[?]', # 0xa9
'[?]', # 0xaa
'[?]', # 0xab
'[?]', # 0xac
'[?]', # 0xad
'[?]', # 0xae
'[?]', # 0xaf
'[?]', # 0xb0
'[?]', # 0xb1
'[?]', # 0xb2
'[?]', # 0xb3
'[?]', # 0xb4
'[?]', # 0xb5
'[?]', # 0xb6
'[?]', # 0xb7
'[?]', # 0xb8
'[?]', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'[?]', # 0xbe
'[?]', # 0xbf
'[?]', # 0xc0
'[?]', # 0xc1
'[?]', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| gpl-3.0 |
joachimneu/pelican-plugins | liquid_tags/b64img.py | 312 | 3085 | """
Image Tag
---------
This implements a Liquid-style image tag for Pelican,
based on the liquid img tag which is based on the octopress image tag [1]_
Syntax
------
{% b64img [class name(s)] [http[s]:/]/path/to/image [width [height]] [title text | "title text" ["alt text"]] %}
Examples
--------
{% b64img /images/ninja.png Ninja Attack! %}
{% b64img left half http://site.com/images/ninja.png Ninja Attack! %}
{% b64img left half http://site.com/images/ninja.png 150 150 "Ninja Attack!" "Ninja in attack posture" %}
Output
------
<img src="data:;base64,....">
<img class="left half" src="data:;base64,..." title="Ninja Attack!" alt="Ninja Attack!">
<img class="left half" src="data:;base64,..." width="150" height="150" title="Ninja Attack!" alt="Ninja in attack posture">
[1] https://github.com/imathis/octopress/blob/master/plugins/image_tag.rb
"""
import re
import base64
import urllib2
from .mdx_liquid_tags import LiquidTags
import six
SYNTAX = '{% b64img [class name(s)] [http[s]:/]/path/to/image [width [height]] [title text | "title text" ["alt text"]] %}'
# Regular expression to match the entire syntax
ReImg = re.compile("""(?P<class>\S.*\s+)?(?P<src>(?:https?:\/\/|\/|\S+\/)\S+)(?:\s+(?P<width>\d+))?(?:\s+(?P<height>\d+))?(?P<title>\s+.+)?""")
# Regular expression to split the title and alt text
ReTitleAlt = re.compile("""(?:"|')(?P<title>[^"']+)?(?:"|')\s+(?:"|')(?P<alt>[^"']+)?(?:"|')""")
def _get_file(src):
""" Return content from local or remote file. """
try:
if '://' in src or src[0:2] == '//': # Most likely this is remote file
response = urllib2.urlopen(src)
return response.read()
else:
with open(src, 'rb') as fh:
return fh.read()
except Exception as e:
raise RuntimeError('Error generating base64image: {}'.format(e))
def base64image(src):
""" Generate base64 encoded image from srouce file. """
return base64.b64encode(_get_file(src))
@LiquidTags.register('b64img')
def b64img(preprocessor, tag, markup):
attrs = None
# Parse the markup string
match = ReImg.search(markup)
if match:
attrs = dict([(key, val.strip())
for (key, val) in six.iteritems(match.groupdict()) if val])
else:
raise ValueError('Error processing input. '
'Expected syntax: {0}'.format(SYNTAX))
# Check if alt text is present -- if so, split it from title
if 'title' in attrs:
match = ReTitleAlt.search(attrs['title'])
if match:
attrs.update(match.groupdict())
if not attrs.get('alt'):
attrs['alt'] = attrs['title']
attrs['src'] = 'data:;base64,{}'.format(base64image(attrs['src']))
# Return the formatted text
return "<img {0}>".format(' '.join('{0}="{1}"'.format(key, val)
for (key, val) in six.iteritems(attrs)))
#----------------------------------------------------------------------
# This import allows image tag to be a Pelican plugin
from .liquid_tags import register
| agpl-3.0 |
mhugent/Quantum-GIS | python/plugins/processing/algs/qgis/ftools/PointDistance.py | 2 | 8193 | # -*- coding: utf-8 -*-
"""
***************************************************************************
PointDistance.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import math
from qgis.core import *
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.parameters.ParameterNumber import ParameterNumber
from processing.parameters.ParameterVector import ParameterVector
from processing.parameters.ParameterSelection import ParameterSelection
from processing.parameters.ParameterTableField import ParameterTableField
from processing.outputs.OutputTable import OutputTable
from processing.tools import dataobjects, vector
class PointDistance(GeoAlgorithm):
INPUT_LAYER = 'INPUT_LAYER'
INPUT_FIELD = 'INPUT_FIELD'
TARGET_LAYER = 'TARGET_LAYER'
TARGET_FIELD = 'TARGET_FIELD'
MATRIX_TYPE = 'MATRIX_TYPE'
NEAREST_POINTS = 'NEAREST_POINTS'
DISTANCE_MATRIX = 'DISTANCE_MATRIX'
MAT_TYPES = ['Linear (N*k x 3) distance matrix',
'Standard (N x T) distance matrix',
'Summary distance matrix (mean, std. dev., min, max)']
def defineCharacteristics(self):
self.name = 'Distance matrix'
self.group = 'Vector analysis tools'
self.addParameter(ParameterVector(self.INPUT_LAYER, 'Input point layer'
, [ParameterVector.VECTOR_TYPE_POINT]))
self.addParameter(ParameterTableField(self.INPUT_FIELD,
'Input unique ID field', self.INPUT_LAYER,
ParameterTableField.DATA_TYPE_ANY))
self.addParameter(ParameterVector(self.TARGET_LAYER,
'Target point layer',
ParameterVector.VECTOR_TYPE_POINT))
self.addParameter(ParameterTableField(self.TARGET_FIELD,
'Target unique ID field', self.TARGET_LAYER,
ParameterTableField.DATA_TYPE_ANY))
self.addParameter(ParameterSelection(self.MATRIX_TYPE,
'Output matrix type', self.MAT_TYPES, 0))
self.addParameter(ParameterNumber(self.NEAREST_POINTS,
'Use only the nearest (k) target points', 0, 9999,
0))
self.addOutput(OutputTable(self.DISTANCE_MATRIX, 'Distance matrix'))
def processAlgorithm(self, progress):
inLayer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT_LAYER))
inField = self.getParameterValue(self.INPUT_FIELD)
targetLayer = dataobjects.getObjectFromUri(
self.getParameterValue(self.TARGET_LAYER))
targetField = self.getParameterValue(self.TARGET_FIELD)
matType = self.getParameterValue(self.MATRIX_TYPE)
nPoints = self.getParameterValue(self.NEAREST_POINTS)
outputFile = self.getOutputFromName(self.DISTANCE_MATRIX)
if nPoints < 1:
nPoints = len(vector.features(targetLayer))
self.writer = outputFile.getTableWriter([])
if matType == 0:
# Linear distance matrix
self.linearMatrix(inLayer, inField, targetLayer, targetField,
matType, nPoints, progress)
elif matType == 1:
# Standard distance matrix
self.regularMatrix(inLayer, inField, targetLayer, targetField,
nPoints, progress)
elif matType == 2:
# Summary distance matrix
self.linearMatrix(inLayer, inField, targetLayer, targetField,
matType, nPoints, progress)
def linearMatrix(self, inLayer, inField, targetLayer, targetField,
matType, nPoints, progress):
if matType == 0:
self.writer.addRecord(['InputID', 'TargetID', 'Distance'])
else:
self.writer.addRecord(['InputID', 'MEAN', 'STDDEV', 'MIN', 'MAX'])
index = vector.spatialindex(targetLayer)
inIdx = inLayer.fieldNameIndex(inField)
outIdx = targetLayer.fieldNameIndex(inField)
outFeat = QgsFeature()
inGeom = QgsGeometry()
outGeom = QgsGeometry()
distArea = QgsDistanceArea()
features = vector.features(inLayer)
current = 0
total = 100.0 / float(len(features))
for inFeat in features:
inGeom = inFeat.geometry()
inID = inFeat.attributes()[inIdx]
featList = index.nearestNeighbor(inGeom.asPoint(), nPoints)
distList = []
vari = 0.0
for i in featList:
request = QgsFeatureRequest().setFilterFid(i)
outFeat = targetLayer.getFeatures(request).next()
outID = outFeat.attributes()[outIdx]
outGeom = outFeat.geometry()
dist = distArea.measureLine(inGeom.asPoint(),
outGeom.asPoint())
if matType == 0:
self.writer.addRecord([unicode(inID), unicode(outID),
unicode(dist)])
else:
distList.append(float(dist))
if matType == 2:
mean = sum(distList) / len(distList)
for i in distList:
vari += (i - mean) * (i - mean)
vari = math.sqrt(vari / len(distList))
self.writer.addRecord([unicode(inID), unicode(mean),
unicode(vari), unicode(min(distList)),
unicode(max(distList))])
current += 1
progress.setPercentage(int(current * total))
def regularMatrix(self, inLayer, inField, targetLayer, targetField,
nPoints, progress):
index = vector.spatialindex(targetLayer)
inIdx = inLayer.fieldNameIndex(inField)
outIdx = targetLayer.fieldNameIndex(inField)
outFeat = QgsFeature()
inGeom = QgsGeometry()
outGeom = QgsGeometry()
distArea = QgsDistanceArea()
first = True
current = 0
features = vector.features(inLayer)
total = 100.0 / float(len(features))
for inFeat in features:
inGeom = inFeat.geometry()
inID = inFeat.attributes()[inIdx]
if first:
featList = index.nearestNeighbor(inGeom.asPoint(), nPoints)
first = False
data = ['ID']
for i in featList:
request = QgsFeatureRequest().setFilterFid(i)
outFeat = targetLayer.getFeatures(request).next()
data.append(unicode(outFeat.attributes[outIdx]))
self.writer.addRecord(data)
data = [unicode(inID)]
for i in featList:
request = QgsFeatureRequest().setFilterFid(i)
outFeat = targetLayer.getFeatures(request).next()
outGeom = outFeat.geometry()
dist = distArea.measureLine(inGeom.asPoint(),
outGeom.asPoint())
data.append(unicode(float(dist)))
self.writer.addRecord(data)
current += 1
progress.setPercentage(int(current * total))
| gpl-2.0 |
IT-Department-Projects/OOAD-Project | Flask_App/oakcrest/lib/python2.7/site-packages/requests/packages/chardet/langhebrewmodel.py | 2763 | 11318 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Simon Montagu
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Shoshannah Forbes - original C code (?)
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Windows-1255 language model
# Character Mapping Table:
win1255_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 69, 91, 79, 80, 92, 89, 97, 90, 68,111,112, 82, 73, 95, 85, # 40
78,121, 86, 71, 67,102,107, 84,114,103,115,253,253,253,253,253, # 50
253, 50, 74, 60, 61, 42, 76, 70, 64, 53,105, 93, 56, 65, 54, 49, # 60
66,110, 51, 43, 44, 63, 81, 77, 98, 75,108,253,253,253,253,253, # 70
124,202,203,204,205, 40, 58,206,207,208,209,210,211,212,213,214,
215, 83, 52, 47, 46, 72, 32, 94,216,113,217,109,218,219,220,221,
34,116,222,118,100,223,224,117,119,104,125,225,226, 87, 99,227,
106,122,123,228, 55,229,230,101,231,232,120,233, 48, 39, 57,234,
30, 59, 41, 88, 33, 37, 36, 31, 29, 35,235, 62, 28,236,126,237,
238, 38, 45,239,240,241,242,243,127,244,245,246,247,248,249,250,
9, 8, 20, 16, 3, 2, 24, 14, 22, 1, 25, 15, 4, 11, 6, 23,
12, 19, 13, 26, 18, 27, 21, 17, 7, 10, 5,251,252,128, 96,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.4004%
# first 1024 sequences: 1.5981%
# rest sequences: 0.087%
# negative sequences: 0.0015%
HebrewLangModel = (
0,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,3,2,1,2,0,1,0,0,
3,0,3,1,0,0,1,3,2,0,1,1,2,0,2,2,2,1,1,1,1,2,1,1,1,2,0,0,2,2,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,
1,2,1,2,1,2,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,
1,2,1,3,1,1,0,0,2,0,0,0,1,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,1,2,2,1,3,
1,2,1,1,2,2,0,0,2,2,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,1,0,1,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,2,2,2,3,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,3,2,2,3,2,2,2,1,2,2,2,2,
1,2,1,1,2,2,0,1,2,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,0,2,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,0,2,2,2,
0,2,1,2,2,2,0,0,2,1,0,0,0,0,1,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,2,1,2,3,2,2,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,2,0,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,2,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,2,2,3,2,1,2,1,1,1,
0,1,1,1,1,1,3,0,1,0,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,1,1,0,0,1,0,0,1,0,0,0,0,
0,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,1,2,3,3,2,3,3,3,3,2,3,2,1,2,0,2,1,2,
0,2,0,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,1,2,2,3,3,2,3,2,3,2,2,3,1,2,2,0,2,2,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,2,2,3,3,3,3,1,3,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,2,3,2,2,2,1,2,2,0,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,1,3,2,3,3,2,3,3,2,2,1,2,2,2,2,2,2,
0,2,1,2,1,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,2,3,3,2,3,3,3,3,2,3,2,3,3,3,3,3,2,2,2,2,2,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,1,2,3,3,3,3,3,3,3,2,3,2,3,2,1,2,3,0,2,1,2,2,
0,2,1,1,2,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,2,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,3,2,1,3,1,2,2,2,1,2,3,3,1,2,1,2,2,2,2,
0,1,1,1,1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,0,2,3,3,3,1,3,3,3,1,2,2,2,2,1,1,2,2,2,2,2,2,
0,2,0,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,2,3,3,3,2,1,2,3,2,3,2,2,2,2,1,2,1,1,1,2,2,
0,2,1,1,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,1,0,0,0,0,0,
1,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,3,1,2,2,2,2,3,2,3,1,1,2,2,1,2,2,1,1,0,2,2,2,2,
0,1,0,1,2,2,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,0,0,1,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,2,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,0,1,0,1,1,0,1,1,0,0,0,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,2,2,1,2,2,2,2,2,2,2,1,2,2,1,2,2,1,1,1,1,1,1,1,1,2,1,1,0,3,3,3,
0,3,0,2,2,2,2,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
2,2,2,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,2,1,2,2,2,1,1,1,2,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,2,2,2,2,2,2,0,2,2,0,0,0,0,0,0,
0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,1,0,2,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,3,1,1,2,2,2,2,2,1,2,2,2,1,1,2,2,2,2,2,2,2,1,2,2,1,0,1,1,1,1,0,
0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,1,1,1,1,2,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,1,1,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,1,2,1,2,1,1,1,1,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,1,2,2,2,2,2,2,2,2,2,2,1,2,1,2,1,1,2,1,1,1,2,1,2,1,2,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,1,2,2,2,1,2,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,2,1,2,1,1,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,0,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,1,1,1,0,1,0,0,0,1,1,0,1,1,0,0,0,0,0,1,1,0,0,
0,1,1,1,2,1,2,2,2,0,2,0,2,0,1,1,2,1,1,1,1,2,1,0,1,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,1,0,0,0,0,0,1,0,1,2,2,0,1,0,0,1,1,2,2,1,2,0,2,0,0,0,1,2,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,2,1,2,0,2,0,0,1,1,1,1,1,1,0,1,0,0,0,1,0,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,1,2,2,0,0,1,0,0,0,1,0,0,1,
1,1,2,1,0,1,1,1,0,1,0,1,1,1,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,0,0,1,0,1,1,1,1,0,0,0,0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,1,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,1,0,1,0,0,1,1,2,1,1,2,0,1,0,0,0,1,1,0,1,
1,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,0,0,2,1,1,2,0,2,0,0,0,1,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,2,2,1,2,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,2,1,1,1,0,2,1,1,0,0,0,2,1,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,0,2,1,1,0,1,0,0,0,1,1,0,1,
2,2,1,1,1,0,1,1,0,1,1,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,0,1,2,1,0,2,0,0,0,1,1,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,
0,1,0,0,2,0,2,1,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,1,0,1,0,0,1,0,0,0,1,0,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,1,0,1,1,0,0,1,0,0,2,1,1,1,1,1,0,1,0,0,0,0,1,0,1,
0,1,1,1,2,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,1,0,0,0,0,0,1,1,1,1,1,0,1,0,0,0,1,1,0,0,
)
Win1255HebrewModel = {
'charToOrderMap': win1255_CharToOrderMap,
'precedenceMatrix': HebrewLangModel,
'mTypicalPositiveRatio': 0.984004,
'keepEnglishLetter': False,
'charsetName': "windows-1255"
}
# flake8: noqa
| mit |
liamgh/liamgreenhughes-sl4a-tf101 | python/src/Mac/Demo/resources/listres.py | 39 | 1598 | # List all resources
from Carbon import Res
from Carbon.Resources import *
def list1resources():
ntypes = Res.Count1Types()
for itype in range(1, 1+ntypes):
type = Res.Get1IndType(itype)
print "Type:", repr(type)
nresources = Res.Count1Resources(type)
for i in range(1, 1 + nresources):
Res.SetResLoad(0)
res = Res.Get1IndResource(type, i)
Res.SetResLoad(1)
info(res)
def listresources():
ntypes = Res.CountTypes()
for itype in range(1, 1+ntypes):
type = Res.GetIndType(itype)
print "Type:", repr(type)
nresources = Res.CountResources(type)
for i in range(1, 1 + nresources):
Res.SetResLoad(0)
res = Res.GetIndResource(type, i)
Res.SetResLoad(1)
info(res)
def info(res):
print res.GetResInfo(), res.SizeResource(), decodeattrs(res.GetResAttrs())
attrnames = {
resChanged: 'Changed',
resPreload: 'Preload',
resProtected: 'Protected',
resLocked: 'Locked',
resPurgeable: 'Purgeable',
resSysHeap: 'SysHeap',
}
def decodeattrs(attrs):
names = []
for bit in range(16):
mask = 1<<bit
if attrs & mask:
if attrnames.has_key(mask):
names.append(attrnames[mask])
else:
names.append(hex(mask))
return names
def test():
print "=== Local resourcess ==="
list1resources()
print "=== All resources ==="
listresources()
if __name__ == '__main__':
test()
| apache-2.0 |
jawrainey/sris | settings.py | 1 | 1602 | import os
class Config(object):
"""
The shared configuration settings for the flask app.
"""
# Service settings
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__)))
SERVICE_ONTOLOGY = PROJECT_ROOT + '/sris/config/ontology.json'
# Database settings
CLIENT_NAME = 'client'
SERVICE_NAME = 'service'
# These need to be set by you!
ACCOUNT_SID = os.environ.get('ACCOUNT_SID', None)
AUTH_TOKEN = os.environ.get('AUTH_TOKEN', None)
NUM = os.environ.get('NUM', None)
class ProdConfig(Config):
"""
Setup the production configuration for the flask app.
Args:
Config (object): Inherit the default shared configuration settings.
"""
DEBUG = False
# These are set server-side for ease-of-use when using PaaS.
SQLALCHEMY_BINDS = {
Config.CLIENT_NAME: os.environ.get('CLIENT_DATABASE_URL', None),
Config.SERVICE_NAME: os.environ.get('SERVICE_DATABASE_URL', None)
}
class DevConfig(Config):
"""
Setup the development configuration for the flask app.
Args:
Config (object): Inherit the default shared configuration settings.
"""
DEBUG = True
# Store these in the root directly.
CLIENT_DB = os.path.join(Config.PROJECT_ROOT, Config.CLIENT_NAME + '.db')
SERVICE_DB = os.path.join(Config.PROJECT_ROOT, Config.SERVICE_NAME + '.db')
# Support for multiple databases (client & service)
SQLALCHEMY_BINDS = {
Config.CLIENT_NAME: 'sqlite:///{0}'.format(CLIENT_DB),
Config.SERVICE_NAME: 'sqlite:///{0}'.format(SERVICE_DB)
}
| mit |
gaddman/ansible | test/units/module_utils/network/slxos/test_slxos.py | 45 | 4718 | #
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from os import path
import json
from mock import MagicMock, patch, call
from units.compat import unittest
from ansible.module_utils.network.slxos import slxos
class TestPluginCLIConfSLXOS(unittest.TestCase):
""" Test class for SLX-OS CLI Conf Methods
"""
def test_get_connection_established(self):
""" Test get_connection with established connection
"""
module = MagicMock()
connection = slxos.get_connection(module)
self.assertEqual(connection, module.slxos_connection)
@patch('ansible.module_utils.network.slxos.slxos.Connection')
def test_get_connection_new(self, connection):
""" Test get_connection with new connection
"""
socket_path = "little red riding hood"
module = MagicMock(spec=[
'fail_json',
])
module._socket_path = socket_path
connection().get_capabilities.return_value = '{"network_api": "cliconf"}'
returned_connection = slxos.get_connection(module)
connection.assert_called_with(socket_path)
self.assertEqual(returned_connection, module.slxos_connection)
@patch('ansible.module_utils.network.slxos.slxos.Connection')
def test_get_connection_incorrect_network_api(self, connection):
""" Test get_connection with incorrect network_api response
"""
socket_path = "little red riding hood"
module = MagicMock(spec=[
'fail_json',
])
module._socket_path = socket_path
module.fail_json.side_effect = TypeError
connection().get_capabilities.return_value = '{"network_api": "nope"}'
with self.assertRaises(TypeError):
slxos.get_connection(module)
@patch('ansible.module_utils.network.slxos.slxos.Connection')
def test_get_capabilities(self, connection):
""" Test get_capabilities
"""
socket_path = "little red riding hood"
module = MagicMock(spec=[
'fail_json',
])
module._socket_path = socket_path
module.fail_json.side_effect = TypeError
capabilities = {'network_api': 'cliconf'}
connection().get_capabilities.return_value = json.dumps(capabilities)
capabilities_returned = slxos.get_capabilities(module)
self.assertEqual(capabilities, capabilities_returned)
@patch('ansible.module_utils.network.slxos.slxos.Connection')
def test_run_commands(self, connection):
""" Test get_capabilities
"""
module = MagicMock()
commands = [
'hello',
'dolly',
'well hello',
'dolly',
'its so nice to have you back',
'where you belong',
]
responses = [
'Dolly, never go away again1',
'Dolly, never go away again2',
'Dolly, never go away again3',
'Dolly, never go away again4',
'Dolly, never go away again5',
'Dolly, never go away again6',
]
module.slxos_connection.get.side_effect = responses
run_command_responses = slxos.run_commands(module, commands)
calls = []
for command in commands:
calls.append(call(
command,
None,
None
))
module.slxos_connection.get.assert_has_calls(calls)
self.assertEqual(responses, run_command_responses)
@patch('ansible.module_utils.network.slxos.slxos.Connection')
def test_load_config(self, connection):
""" Test load_config
"""
module = MagicMock()
commands = [
'what does it take',
'to be',
'number one?',
'two is not a winner',
'and three nobody remember',
]
slxos.load_config(module, commands)
module.slxos_connection.edit_config.assert_called_once_with(commands)
| gpl-3.0 |
eg-zhang/h2o-2 | py/testdir_ec2_only/test_GBM_manyfiles_s3n.py | 9 | 7490 | import unittest
import random, sys, time, re
sys.path.extend(['.','..','../..','py'])
import h2o_browse as h2b, h2o_gbm
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_glm, h2o_util, h2o_rf, h2o_jobs as h2j
doPredict = False
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(3, java_heap_GB=4)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GBM_manyfiles_train_test(self):
bucket = 'home-0xdiag-datasets'
modelKey = 'GBMModelKey'
if h2o.localhost:
files = [
# None forces numCols to be used. assumes you set it from Inspect
('manyfiles-nflx-gz', 'file_1[0-9][0-9].dat.gz', 'file_100.hex', 1800, None, 'file_1.dat.gz', 'file_1_test.hex')
]
else:
files = [
# None forces numCols to be used. assumes you set it from Inspect
('manyfiles-nflx-gz', 'file_[0-9].dat.gz', 'file_10.hex', 1800, None, 'file_1[0-9].dat.gz', 'file_10_test.hex')
]
# if I got to hdfs, it's here
# hdfs://172.16.2.176/datasets/manyfiles-nflx-gz/file_99.dat.gz
# h2b.browseTheCloud()
for (importFolderPath, trainFilename, trainKey, timeoutSecs, response, testFilename, testKey) in files:
# PARSE train****************************************
start = time.time()
xList = []
eList = []
fList = []
# Parse (train)****************************************
csvPathname = importFolderPath + "/" + trainFilename
parseTrainResult = h2i.import_parse(bucket=bucket, path=csvPathname, schema='s3n',
hex_key=trainKey, timeoutSecs=timeoutSecs, doSummary=False)
elapsed = time.time() - start
print "train parse end on ", trainFilename, 'took', elapsed, 'seconds',\
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
print "train parse result:", parseTrainResult['destination_key']
### h2o_cmd.runSummary(key=parsTraineResult['destination_key'])
inspect = h2o_cmd.runInspect(key=parseTrainResult['destination_key'])
print "\n" + csvPathname, \
" numRows:", "{:,}".format(inspect['numRows']), \
" numCols:", "{:,}".format(inspect['numCols'])
numRows = inspect['numRows']
numCols = inspect['numCols']
# Make col 378 it something we can do binomial regression on!
execExpr = '%s[,378] = %s[,378]>15 ? 1 : 0' % (trainKey, trainKey)
resultExec = h2o_cmd.runExec(str=execExpr, timeoutSecs=500)
# Parse (test)****************************************
parseTestResult = h2i.import_parse(bucket=bucket, path=importFolderPath + "/" + testFilename, schema='s3n',
hex_key=testKey, timeoutSecs=timeoutSecs, doSummary=False)
elapsed = time.time() - start
print "test parse end on ", testFilename, 'took', elapsed, 'seconds',\
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
print "test parse result:", parseTestResult['destination_key']
# Make col 378 it something we can do binomial regression on!
print "Slow! exec is converting all imported keys?, not just what was parsed"
execExpr = '%s[,378] = %s[,378]>15 ? 1 : 0' % (testKey, testKey, testKey)
resultExec = h2o_cmd.runExec(str=execExpr, timeoutSecs=300)
# Note ..no inspect of test data here..so translate happens later?
# GBM (train iterate)****************************************
# if not response:
# response = numCols - 1
response = 378
print "Using the same response %s for train and test (which should have a output value too)" % response
ntrees = 10
for max_depth in [5,10,20,40]:
params = {
'learn_rate': .2,
'nbins': 1024,
'ntrees': ntrees,
'max_depth': max_depth,
'min_rows': 10,
'response': response,
# 'ignored_cols':
}
print "Using these parameters for GBM: ", params
kwargs = params.copy()
# GBM train****************************************
trainStart = time.time()
gbmTrainResult = h2o_cmd.runGBM(parseResult=parseTrainResult,
timeoutSecs=timeoutSecs, destination_key=modelKey, **kwargs)
trainElapsed = time.time() - trainStart
print "GBM training completed in", trainElapsed, "seconds. On dataset: ", trainFilename
gbmTrainView = h2o_cmd.runGBMView(model_key=modelKey)
# errrs from end of list? is that the last tree?
errsLast = gbmTrainView['gbm_model']['errs'][-1]
print "GBM 'errsLast'", errsLast
cm = gbmTrainView['gbm_model']['cm']
pctWrongTrain = h2o_gbm.pp_cm_summary(cm);
print "Last line of this cm might be NAs, not CM"
print "\nTrain\n==========\n"
print h2o_gbm.pp_cm(cm)
# GBM test****************************************
if doPredict:
predictKey = 'Predict.hex'
### h2o_cmd.runInspect(key=parseTestResult['destination_key'])
start = time.time()
gbmTestResult = h2o_cmd.runPredict(
data_key=parseTestResult['destination_key'],
model_key=modelKey,
destination_key=predictKey,
timeoutSecs=timeoutSecs)
elapsed = time.time() - start
print "GBM predict completed in", elapsed, "seconds. On dataset: ", testFilename
print "This is crazy!"
gbmPredictCMResult =h2o.nodes[0].predict_confusion_matrix(
actual=parseTestResult['destination_key'],
vactual=response,
predict=predictKey,
vpredict='predict', # choices are 0 and 'predict'
)
# errrs from end of list? is that the last tree?
# all we get is cm
cm = gbmPredictCMResult['cm']
# These will move into the h2o_gbm.py
pctWrong = h2o_gbm.pp_cm_summary(cm);
print "Last line of this cm is really NAs, not CM"
print "\nTest\n==========\n"
print h2o_gbm.pp_cm(cm)
# xList.append(ntrees)
xList.append(max_depth)
eList.append(pctWrong)
fList.append(trainElapsed)
if doPredict:
xLabel = 'max_depth'
eLabel = 'pctWrong'
fLabel = 'trainElapsed'
eListTitle = ""
fListTitle = ""
h2o_gbm.plotLists(xList, xLabel, eListTitle, eList, eLabel, fListTitle, fList, fLabel)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
kostya0shift/SyncToGit | synctogit/Config.py | 1 | 1728 | from __future__ import absolute_import
try:
import configparser
except:
import ConfigParser as configparser
class _NotSet(object):
pass
class ConfigException(Exception):
pass
class Config:
def __init__(self, conffile):
self.conffile = conffile
self.conf = configparser.ConfigParser()
with open(self.conffile, 'r') as f:
self.conf.readfp(f)
def _get(self, section, key, getter, default=_NotSet()):
if not self.conf.has_section(section):
if isinstance(default, _NotSet):
raise ConfigException('Section %s is missing' % section)
else:
return default
if not self.conf.has_option(section, key):
if isinstance(default, _NotSet):
raise ConfigException('Key %s from section %s is missing' % (key, section))
else:
v = default
else:
v = getter(section, key)
return v
def get_int(self, section, key, default=_NotSet()):
v = self._get(section, key, self.conf.getint, default)
return int(v)
def get_string(self, section, key, default=_NotSet()):
v = self._get(section, key, self.conf.get, default)
return "" + v
def get_boolean(self, section, key, default=_NotSet()):
v = self._get(section, key, self.conf.getboolean, default)
return bool(v)
def _write(self):
with open(self.conffile, 'w') as f:
self.conf.write(f)
def set(self, section, key, value):
self.conf.set(section, key, value)
self._write()
def unset(self, section, key):
self.conf.remove_option(section, key)
self._write()
| mit |
openthread/openthread | tools/harness-automation/cases_R140/ed_6_5_2.py | 18 | 2051 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class ED_6_5_2(HarnessCase):
role = HarnessCase.ROLE_ED
case = '6 5 2'
golden_devices_required = 2
def on_dialog(self, dialog, title):
if title.startswith('Reset DUT'):
self.dut.stop()
return False
elif title.startswith('Rejoin Now'):
self.dut.start()
return False
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
jaidevd/scikit-learn | sklearn/cluster/bicluster.py | 26 | 19870 | """Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils import check_random_state
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
return self
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
A = safe_sparse_dot(array.T, array)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, v = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
vt = v.T
if np.any(np.isnan(u)):
A = safe_sparse_dot(array, array.T)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, u = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause |
EliasTouil/simpleBlog | simpleBlog/Lib/site-packages/pip/_vendor/requests/packages/urllib3/exceptions.py | 515 | 5599 | from __future__ import absolute_import
# Base Exceptions
class HTTPError(Exception):
"Base exception used by this module."
pass
class HTTPWarning(Warning):
"Base warning used by this module."
pass
class PoolError(HTTPError):
"Base exception for errors caused within a pool."
def __init__(self, pool, message):
self.pool = pool
HTTPError.__init__(self, "%s: %s" % (pool, message))
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, None)
class RequestError(PoolError):
"Base exception for PoolErrors that have associated URLs."
def __init__(self, pool, url, message):
self.url = url
PoolError.__init__(self, pool, message)
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, self.url, None)
class SSLError(HTTPError):
"Raised when SSL certificate fails in an HTTPS connection."
pass
class ProxyError(HTTPError):
"Raised when the connection to a proxy fails."
pass
class DecodeError(HTTPError):
"Raised when automatic decoding based on Content-Type fails."
pass
class ProtocolError(HTTPError):
"Raised when something unexpected happens mid-request/response."
pass
#: Renamed to ProtocolError but aliased for backwards compatibility.
ConnectionError = ProtocolError
# Leaf Exceptions
class MaxRetryError(RequestError):
"""Raised when the maximum number of retries is exceeded.
:param pool: The connection pool
:type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
:param string url: The requested Url
:param exceptions.Exception reason: The underlying error
"""
def __init__(self, pool, url, reason=None):
self.reason = reason
message = "Max retries exceeded with url: %s (Caused by %r)" % (
url, reason)
RequestError.__init__(self, pool, url, message)
class HostChangedError(RequestError):
"Raised when an existing pool gets a request for a foreign host."
def __init__(self, pool, url, retries=3):
message = "Tried to open a foreign host with url: %s" % url
RequestError.__init__(self, pool, url, message)
self.retries = retries
class TimeoutStateError(HTTPError):
""" Raised when passing an invalid state to a timeout """
pass
class TimeoutError(HTTPError):
""" Raised when a socket timeout error occurs.
Catching this error will catch both :exc:`ReadTimeoutErrors
<ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
"""
pass
class ReadTimeoutError(TimeoutError, RequestError):
"Raised when a socket timeout occurs while receiving data from a server"
pass
# This timeout error does not have a URL attached and needs to inherit from the
# base HTTPError
class ConnectTimeoutError(TimeoutError):
"Raised when a socket timeout occurs while connecting to a server"
pass
class NewConnectionError(ConnectTimeoutError, PoolError):
"Raised when we fail to establish a new connection. Usually ECONNREFUSED."
pass
class EmptyPoolError(PoolError):
"Raised when a pool runs out of connections and no more are allowed."
pass
class ClosedPoolError(PoolError):
"Raised when a request enters a pool after the pool has been closed."
pass
class LocationValueError(ValueError, HTTPError):
"Raised when there is something wrong with a given URL input."
pass
class LocationParseError(LocationValueError):
"Raised when get_host or similar fails to parse the URL input."
def __init__(self, location):
message = "Failed to parse: %s" % location
HTTPError.__init__(self, message)
self.location = location
class ResponseError(HTTPError):
"Used as a container for an error reason supplied in a MaxRetryError."
GENERIC_ERROR = 'too many error responses'
SPECIFIC_ERROR = 'too many {status_code} error responses'
class SecurityWarning(HTTPWarning):
"Warned when perfoming security reducing actions"
pass
class SubjectAltNameWarning(SecurityWarning):
"Warned when connecting to a host with a certificate missing a SAN."
pass
class InsecureRequestWarning(SecurityWarning):
"Warned when making an unverified HTTPS request."
pass
class SystemTimeWarning(SecurityWarning):
"Warned when system time is suspected to be wrong"
pass
class InsecurePlatformWarning(SecurityWarning):
"Warned when certain SSL configuration is not available on a platform."
pass
class SNIMissingWarning(HTTPWarning):
"Warned when making a HTTPS request without SNI available."
pass
class DependencyWarning(HTTPWarning):
"""
Warned when an attempt is made to import a module with missing optional
dependencies.
"""
pass
class ResponseNotChunked(ProtocolError, ValueError):
"Response needs to be chunked in order to read it as chunks."
pass
class ProxySchemeUnknown(AssertionError, ValueError):
"ProxyManager does not support the supplied scheme"
# TODO(t-8ch): Stop inheriting from AssertionError in v2.0.
def __init__(self, scheme):
message = "Not supported proxy scheme %s" % scheme
super(ProxySchemeUnknown, self).__init__(message)
class HeaderParsingError(HTTPError):
"Raised by assert_header_parsing, but we convert it to a log.warning statement."
def __init__(self, defects, unparsed_data):
message = '%s, unparsed data: %r' % (defects or 'Unknown', unparsed_data)
super(HeaderParsingError, self).__init__(message)
| gpl-3.0 |
Achuth17/scikit-bio | skbio/tree/tests/test_tree.py | 3 | 40995 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from unittest import TestCase, main
import numpy as np
import numpy.testing as nptest
from scipy.stats import pearsonr
from six import StringIO
from skbio import DistanceMatrix, TreeNode
from skbio.tree import (DuplicateNodeError, NoLengthError,
TreeError, MissingNodeError, NoParentError)
class TreeTests(TestCase):
def setUp(self):
"""Prep the self"""
self.simple_t = TreeNode.read(StringIO(u"((a,b)i1,(c,d)i2)root;"))
nodes = dict([(x, TreeNode(x)) for x in 'abcdefgh'])
nodes['a'].append(nodes['b'])
nodes['b'].append(nodes['c'])
nodes['c'].append(nodes['d'])
nodes['c'].append(nodes['e'])
nodes['c'].append(nodes['f'])
nodes['f'].append(nodes['g'])
nodes['a'].append(nodes['h'])
self.TreeNode = nodes
self.TreeRoot = nodes['a']
def rev_f(items):
items.reverse()
def rotate_f(items):
tmp = items[-1]
items[1:] = items[:-1]
items[0] = tmp
self.rev_f = rev_f
self.rotate_f = rotate_f
self.complex_tree = TreeNode.read(StringIO(u"(((a,b)int1,(x,y,(w,z)int"
"2,(c,d)int3)int4),(e,f)int"
"5);"))
def test_count(self):
"""Get node counts"""
exp = 7
obs = self.simple_t.count()
self.assertEqual(obs, exp)
exp = 4
obs = self.simple_t.count(tips=True)
self.assertEqual(obs, exp)
def test_copy(self):
"""copy a tree"""
self.simple_t.children[0].length = 1.2
self.simple_t.children[1].children[0].length = 0.5
cp = self.simple_t.copy()
gen = zip(cp.traverse(include_self=True),
self.simple_t.traverse(include_self=True))
for a, b in gen:
self.assertIsNot(a, b)
self.assertEqual(a.name, b.name)
self.assertEqual(a.length, b.length)
def test_append(self):
"""Append a node to a tree"""
second_tree = TreeNode.read(StringIO(u"(x,y)z;"))
self.simple_t.append(second_tree)
self.assertEqual(self.simple_t.children[0].name, 'i1')
self.assertEqual(self.simple_t.children[1].name, 'i2')
self.assertEqual(self.simple_t.children[2].name, 'z')
self.assertEqual(len(self.simple_t.children), 3)
self.assertEqual(self.simple_t.children[2].children[0].name, 'x')
self.assertEqual(self.simple_t.children[2].children[1].name, 'y')
self.assertEqual(second_tree.parent, self.simple_t)
def test_extend(self):
"""Extend a few nodes"""
second_tree = TreeNode.read(StringIO(u"(x1,y1)z1;"))
third_tree = TreeNode.read(StringIO(u"(x2,y2)z2;"))
first_tree = TreeNode.read(StringIO(u"(x1,y1)z1;"))
fourth_tree = TreeNode.read(StringIO(u"(x2,y2)z2;"))
self.simple_t.extend([second_tree, third_tree])
first_tree.extend(fourth_tree.children)
self.assertEqual(0, len(fourth_tree.children))
self.assertEqual(first_tree.children[0].name, 'x1')
self.assertEqual(first_tree.children[1].name, 'y1')
self.assertEqual(first_tree.children[2].name, 'x2')
self.assertEqual(first_tree.children[3].name, 'y2')
self.assertEqual(self.simple_t.children[0].name, 'i1')
self.assertEqual(self.simple_t.children[1].name, 'i2')
self.assertEqual(self.simple_t.children[2].name, 'z1')
self.assertEqual(self.simple_t.children[3].name, 'z2')
self.assertEqual(len(self.simple_t.children), 4)
self.assertEqual(self.simple_t.children[2].children[0].name, 'x1')
self.assertEqual(self.simple_t.children[2].children[1].name, 'y1')
self.assertEqual(self.simple_t.children[3].children[0].name, 'x2')
self.assertEqual(self.simple_t.children[3].children[1].name, 'y2')
self.assertIs(second_tree.parent, self.simple_t)
self.assertIs(third_tree.parent, self.simple_t)
def test_extend_empty(self):
"""Extend on the empty case should work"""
self.simple_t.extend([])
self.assertEqual(self.simple_t.children[0].name, 'i1')
self.assertEqual(self.simple_t.children[1].name, 'i2')
self.assertEqual(len(self.simple_t.children), 2)
def test_iter(self):
"""iter wraps children"""
exp = ['i1', 'i2']
obs = [n.name for n in self.simple_t]
self.assertEqual(obs, exp)
def test_gops(self):
"""Basic TreeNode operations should work as expected"""
p = TreeNode()
self.assertEqual(str(p), ';\n')
p.name = 'abc'
self.assertEqual(str(p), 'abc;\n')
p.length = 3
self.assertEqual(str(p), 'abc:3;\n') # don't suppress branch from root
q = TreeNode()
p.append(q)
self.assertEqual(str(p), '()abc:3;\n')
r = TreeNode()
q.append(r)
self.assertEqual(str(p), '(())abc:3;\n')
r.name = 'xyz'
self.assertEqual(str(p), '((xyz))abc:3;\n')
q.length = 2
self.assertEqual(str(p), '((xyz):2)abc:3;\n')
def test_pop(self):
"""Pop off a node"""
second_tree = TreeNode.read(StringIO(u"(x1,y1)z1;"))
third_tree = TreeNode.read(StringIO(u"(x2,y2)z2;"))
self.simple_t.extend([second_tree, third_tree])
i1 = self.simple_t.pop(0)
z2 = self.simple_t.pop()
self.assertEqual(i1.name, 'i1')
self.assertEqual(z2.name, 'z2')
self.assertEqual(i1.children[0].name, 'a')
self.assertEqual(i1.children[1].name, 'b')
self.assertEqual(z2.children[0].name, 'x2')
self.assertEqual(z2.children[1].name, 'y2')
self.assertEqual(self.simple_t.children[0].name, 'i2')
self.assertEqual(self.simple_t.children[1].name, 'z1')
self.assertEqual(len(self.simple_t.children), 2)
def test_remove(self):
"""Remove nodes"""
self.assertTrue(self.simple_t.remove(self.simple_t.children[0]))
self.assertEqual(len(self.simple_t.children), 1)
n = TreeNode()
self.assertFalse(self.simple_t.remove(n))
def test_remove_deleted(self):
"""Remove nodes by function"""
def f(node):
return node.name in ['b', 'd']
self.simple_t.remove_deleted(f)
exp = "((a)i1,(c)i2)root;\n"
obs = str(self.simple_t)
self.assertEqual(obs, exp)
def test_adopt(self):
"""Adopt a node!"""
n1 = TreeNode(name='n1')
n2 = TreeNode(name='n2')
n3 = TreeNode(name='n3')
self.simple_t._adopt(n1)
self.simple_t.children[-1]._adopt(n2)
n2._adopt(n3)
# adopt doesn't update .children
self.assertEqual(len(self.simple_t.children), 2)
self.assertIs(n1.parent, self.simple_t)
self.assertIs(n2.parent, self.simple_t.children[-1])
self.assertIs(n3.parent, n2)
def test_remove_node(self):
"""Remove a node by index"""
n = self.simple_t._remove_node(-1)
self.assertEqual(n.parent, None)
self.assertEqual(len(self.simple_t.children), 1)
self.assertEqual(len(n.children), 2)
self.assertNotIn(n, self.simple_t.children)
def test_prune(self):
"""Collapse single descendent nodes"""
# check the identity case
cp = self.simple_t.copy()
self.simple_t.prune()
gen = zip(cp.traverse(include_self=True),
self.simple_t.traverse(include_self=True))
for a, b in gen:
self.assertIsNot(a, b)
self.assertEqual(a.name, b.name)
self.assertEqual(a.length, b.length)
# create a single descendent by removing tip 'a'
n = self.simple_t.children[0]
n.remove(n.children[0])
self.simple_t.prune()
self.assertEqual(len(self.simple_t.children), 2)
self.assertEqual(self.simple_t.children[0].name, 'i2')
self.assertEqual(self.simple_t.children[1].name, 'b')
def test_prune_length(self):
"""Collapse single descendent nodes"""
# check the identity case
cp = self.simple_t.copy()
self.simple_t.prune()
gen = zip(cp.traverse(include_self=True),
self.simple_t.traverse(include_self=True))
for a, b in gen:
self.assertIsNot(a, b)
self.assertEqual(a.name, b.name)
self.assertEqual(a.length, b.length)
for n in self.simple_t.traverse():
n.length = 1.0
# create a single descendent by removing tip 'a'
n = self.simple_t.children[0]
n.remove(n.children[0])
self.simple_t.prune()
self.assertEqual(len(self.simple_t.children), 2)
self.assertEqual(self.simple_t.children[0].name, 'i2')
self.assertEqual(self.simple_t.children[1].name, 'b')
self.assertEqual(self.simple_t.children[1].length, 2.0)
def test_subset(self):
"""subset should return set of leaves that descends from node"""
t = self.simple_t
self.assertEqual(t.subset(), frozenset('abcd'))
c = t.children[0]
self.assertEqual(c.subset(), frozenset('ab'))
leaf = c.children[1]
self.assertEqual(leaf.subset(), frozenset(''))
def test_subsets(self):
"""subsets should return all subsets descending from a set"""
t = self.simple_t
self.assertEqual(t.subsets(), frozenset(
[frozenset('ab'), frozenset('cd')]))
def test_is_tip(self):
"""see if we're a tip or not"""
self.assertFalse(self.simple_t.is_tip())
self.assertFalse(self.simple_t.children[0].is_tip())
self.assertTrue(self.simple_t.children[0].children[0].is_tip())
def test_is_root(self):
"""see if we're at the root or not"""
self.assertTrue(self.simple_t.is_root())
self.assertFalse(self.simple_t.children[0].is_root())
self.assertFalse(self.simple_t.children[0].children[0].is_root())
def test_root(self):
"""Get the root!"""
root = self.simple_t
self.assertIs(root, self.simple_t.root())
self.assertIs(root, self.simple_t.children[0].root())
self.assertIs(root, self.simple_t.children[1].children[1].root())
def test_invalidate_lookup_caches(self):
root = self.simple_t
root.create_caches()
self.assertNotEqual(root._tip_cache, {})
self.assertNotEqual(root._non_tip_cache, {})
root.invalidate_caches()
self.assertEqual(root._tip_cache, {})
self.assertEqual(root._non_tip_cache, {})
def test_invalidate_attr_caches(self):
tree = TreeNode.read(StringIO(u"((a,b,(c,d)e)f,(g,h)i)root;"))
def f(n):
return [n.name] if n.is_tip() else []
tree.cache_attr(f, 'tip_names')
tree.invalidate_caches()
for n in tree.traverse(include_self=True):
self.assertFalse(hasattr(n, 'tip_names'))
def test_create_caches_duplicate_tip_names(self):
with self.assertRaises(DuplicateNodeError):
TreeNode.read(StringIO(u'(a, a);')).create_caches()
def test_find_all(self):
t = TreeNode.read(StringIO(u"((a,b)c,((d,e)c)c,(f,(g,h)c)a)root;"))
exp = [t.children[0],
t.children[1].children[0],
t.children[1],
t.children[2].children[1]]
obs = t.find_all('c')
self.assertEqual(obs, exp)
identity = t.find_all(t)
self.assertEqual(len(identity), 1)
self.assertEqual(identity[0], t)
identity_name = t.find_all('root')
self.assertEqual(len(identity_name), 1)
self.assertEqual(identity_name[0], t)
exp = [t.children[2],
t.children[0].children[0]]
obs = t.find_all('a')
self.assertEqual(obs, exp)
with self.assertRaises(MissingNodeError):
t.find_all('missing')
def test_find(self):
"""Find a node in a tree"""
t = TreeNode.read(StringIO(u"((a,b)c,(d,e)f);"))
exp = t.children[0]
obs = t.find('c')
self.assertEqual(obs, exp)
exp = t.children[0].children[1]
obs = t.find('b')
self.assertEqual(obs, exp)
with self.assertRaises(MissingNodeError):
t.find('does not exist')
def test_find_cache_bug(self):
"""First implementation did not force the cache to be at the root"""
t = TreeNode.read(StringIO(u"((a,b)c,(d,e)f,(g,h)f);"))
exp_tip_cache_keys = set(['a', 'b', 'd', 'e', 'g', 'h'])
exp_non_tip_cache_keys = set(['c', 'f'])
tip_a = t.children[0].children[0]
tip_a.create_caches()
self.assertEqual(tip_a._tip_cache, {})
self.assertEqual(set(t._tip_cache), exp_tip_cache_keys)
self.assertEqual(set(t._non_tip_cache), exp_non_tip_cache_keys)
self.assertEqual(t._non_tip_cache['f'], [t.children[1], t.children[2]])
def test_find_by_id(self):
"""Find a node by id"""
t1 = TreeNode.read(StringIO(u"((,),(,,));"))
t2 = TreeNode.read(StringIO(u"((,),(,,));"))
exp = t1.children[1]
obs = t1.find_by_id(6) # right inner node with 3 children
self.assertEqual(obs, exp)
exp = t2.children[1]
obs = t2.find_by_id(6) # right inner node with 3 children
self.assertEqual(obs, exp)
with self.assertRaises(MissingNodeError):
t1.find_by_id(100)
def test_find_by_func(self):
"""Find nodes by a function"""
t = TreeNode.read(StringIO(u"((a,b)c,(d,e)f);"))
def func(x):
return x.parent == t.find('c')
exp = ['a', 'b']
obs = [n.name for n in t.find_by_func(func)]
self.assertEqual(obs, exp)
def test_ancestors(self):
"""Get all the ancestors"""
exp = ['i1', 'root']
obs = self.simple_t.children[0].children[0].ancestors()
self.assertEqual([o.name for o in obs], exp)
exp = ['root']
obs = self.simple_t.children[0].ancestors()
self.assertEqual([o.name for o in obs], exp)
exp = []
obs = self.simple_t.ancestors()
self.assertEqual([o.name for o in obs], exp)
def test_siblings(self):
"""Get the siblings"""
exp = []
obs = self.simple_t.siblings()
self.assertEqual(obs, exp)
exp = ['i2']
obs = self.simple_t.children[0].siblings()
self.assertEqual([o.name for o in obs], exp)
exp = ['c']
obs = self.simple_t.children[1].children[1].siblings()
self.assertEqual([o.name for o in obs], exp)
self.simple_t.append(TreeNode(name="foo"))
self.simple_t.append(TreeNode(name="bar"))
exp = ['i1', 'foo', 'bar']
obs = self.simple_t.children[1].siblings()
self.assertEqual([o.name for o in obs], exp)
def test_ascii_art(self):
"""Make some ascii trees"""
# unlabeled internal node
tr = TreeNode.read(StringIO(u"(B:0.2,(C:0.3,D:0.4):0.6)F;"))
obs = tr.ascii_art(show_internal=True, compact=False)
exp = " /-B\n-F-------|\n | /-C\n "\
" \\--------|\n \\-D"
self.assertEqual(obs, exp)
obs = tr.ascii_art(show_internal=True, compact=True)
exp = "-F------- /-B\n \-------- /-C\n \-D"
self.assertEqual(obs, exp)
obs = tr.ascii_art(show_internal=False, compact=False)
exp = " /-B\n---------|\n | /-C\n "\
" \\--------|\n \\-D"
self.assertEqual(obs, exp)
def test_ascii_art_three_children(self):
obs = TreeNode.read(StringIO(u'(a,(b,c,d));')).ascii_art()
self.assertEqual(obs, exp_ascii_art_three_children)
def test_accumulate_to_ancestor(self):
"""Get the distance from a node to its ancestor"""
t = TreeNode.read(StringIO(
u"((a:0.1,b:0.2)c:0.3,(d:0.4,e)f:0.5)root;"))
a = t.find('a')
b = t.find('b')
exp_to_root = 0.1 + 0.3
obs_to_root = a.accumulate_to_ancestor(t)
self.assertEqual(obs_to_root, exp_to_root)
with self.assertRaises(NoParentError):
a.accumulate_to_ancestor(b)
def test_distance(self):
"""Get the distance between two nodes"""
t = TreeNode.read(StringIO(
u"((a:0.1,b:0.2)c:0.3,(d:0.4,e)f:0.5)root;"))
tips = sorted([n for n in t.tips()], key=lambda x: x.name)
nptest.assert_almost_equal(tips[0].distance(tips[0]), 0.0)
nptest.assert_almost_equal(tips[0].distance(tips[1]), 0.3)
nptest.assert_almost_equal(tips[0].distance(tips[2]), 1.3)
with self.assertRaises(NoLengthError):
tips[0].distance(tips[3])
nptest.assert_almost_equal(tips[1].distance(tips[0]), 0.3)
nptest.assert_almost_equal(tips[1].distance(tips[1]), 0.0)
nptest.assert_almost_equal(tips[1].distance(tips[2]), 1.4)
with self.assertRaises(NoLengthError):
tips[1].distance(tips[3])
self.assertEqual(tips[2].distance(tips[0]), 1.3)
self.assertEqual(tips[2].distance(tips[1]), 1.4)
self.assertEqual(tips[2].distance(tips[2]), 0.0)
with self.assertRaises(NoLengthError):
tips[2].distance(tips[3])
def test_lowest_common_ancestor(self):
"""TreeNode lowestCommonAncestor should return LCA for set of tips"""
t1 = TreeNode.read(StringIO(u"((a,(b,c)d)e,f,(g,h)i)j;"))
t2 = t1.copy()
t3 = t1.copy()
t4 = t1.copy()
input1 = ['a'] # return self
input2 = ['a', 'b'] # return e
input3 = ['b', 'c'] # return d
input4 = ['a', 'h', 'g'] # return j
exp1 = t1.find('a')
exp2 = t2.find('e')
exp3 = t3.find('d')
exp4 = t4
obs1 = t1.lowest_common_ancestor(input1)
obs2 = t2.lowest_common_ancestor(input2)
obs3 = t3.lowest_common_ancestor(input3)
obs4 = t4.lowest_common_ancestor(input4)
self.assertEqual(obs1, exp1)
self.assertEqual(obs2, exp2)
self.assertEqual(obs3, exp3)
self.assertEqual(obs4, exp4)
# verify multiple calls work
t_mul = t1.copy()
exp_1 = t_mul.find('d')
exp_2 = t_mul.find('i')
obs_1 = t_mul.lowest_common_ancestor(['b', 'c'])
obs_2 = t_mul.lowest_common_ancestor(['g', 'h'])
self.assertEqual(obs_1, exp_1)
self.assertEqual(obs_2, exp_2)
# empty case
with self.assertRaises(ValueError):
t1.lowest_common_ancestor([])
def test_get_max_distance(self):
"""get_max_distance should get max tip distance across tree"""
tree = TreeNode.read(StringIO(
u"((a:0.1,b:0.2)c:0.3,(d:0.4,e:0.5)f:0.6)root;"))
dist, nodes = tree.get_max_distance()
nptest.assert_almost_equal(dist, 1.6)
self.assertEqual(sorted([n.name for n in nodes]), ['b', 'e'])
def test_set_max_distance(self):
"""set_max_distance sets MaxDistTips across tree"""
tree = TreeNode.read(StringIO(
u"((a:0.1,b:0.2)c:0.3,(d:0.4,e:0.5)f:0.6)root;"))
tree._set_max_distance()
tip_a, tip_b = tree.MaxDistTips
self.assertEqual(tip_a[0] + tip_b[0], 1.6)
self.assertEqual(sorted([tip_a[1].name, tip_b[1].name]), ['b', 'e'])
def test_shear(self):
"""Shear the nodes"""
t = TreeNode.read(StringIO(u'((H:1,G:1):2,(R:0.5,M:0.7):3);'))
obs = str(t.shear(['G', 'M']))
exp = '(G:3.0,M:3.7);\n'
self.assertEqual(obs, exp)
def test_compare_tip_distances(self):
t = TreeNode.read(StringIO(u'((H:1,G:1):2,(R:0.5,M:0.7):3);'))
t2 = TreeNode.read(StringIO(u'(((H:1,G:1,O:1):2,R:3):1,X:4);'))
obs = t.compare_tip_distances(t2)
# note: common taxa are H, G, R (only)
m1 = np.array([[0, 2, 6.5], [2, 0, 6.5], [6.5, 6.5, 0]])
m2 = np.array([[0, 2, 6], [2, 0, 6], [6, 6, 0]])
r = pearsonr(m1.flat, m2.flat)[0]
self.assertAlmostEqual(obs, (1 - r) / 2)
def test_compare_tip_distances_sample(self):
t = TreeNode.read(StringIO(u'((H:1,G:1):2,(R:0.5,M:0.7):3);'))
t2 = TreeNode.read(StringIO(u'(((H:1,G:1,O:1):2,R:3):1,X:4);'))
obs = t.compare_tip_distances(t2, sample=3, shuffle_f=sorted)
# note: common taxa are H, G, R (only)
m1 = np.array([[0, 2, 6.5], [2, 0, 6.5], [6.5, 6.5, 0]])
m2 = np.array([[0, 2, 6], [2, 0, 6], [6, 6, 0]])
r = pearsonr(m1.flat, m2.flat)[0]
self.assertAlmostEqual(obs, (1 - r) / 2)
# 4 common taxa, still picking H, G, R
s = u'((H:1,G:1):2,(R:0.5,M:0.7,Q:5):3);'
t = TreeNode.read(StringIO(s))
s3 = u'(((H:1,G:1,O:1):2,R:3,Q:10):1,X:4);'
t3 = TreeNode.read(StringIO(s3))
obs = t.compare_tip_distances(t3, sample=3, shuffle_f=sorted)
def test_compare_tip_distances_no_common_tips(self):
t = TreeNode.read(StringIO(u'((H:1,G:1):2,(R:0.5,M:0.7):3);'))
t2 = TreeNode.read(StringIO(u'(((Z:1,Y:1,X:1):2,W:3):1,V:4);'))
with self.assertRaises(ValueError):
t.compare_tip_distances(t2)
def test_compare_tip_distances_single_common_tip(self):
t = TreeNode.read(StringIO(u'((H:1,G:1):2,(R:0.5,M:0.7):3);'))
t2 = TreeNode.read(StringIO(u'(((R:1,Y:1,X:1):2,W:3):1,V:4);'))
self.assertEqual(t.compare_tip_distances(t2), 1)
self.assertEqual(t2.compare_tip_distances(t), 1)
def test_tip_tip_distances_endpoints(self):
"""Test getting specifc tip distances with tipToTipDistances"""
t = TreeNode.read(StringIO(u'((H:1,G:1):2,(R:0.5,M:0.7):3);'))
nodes = [t.find('H'), t.find('G'), t.find('M')]
names = ['H', 'G', 'M']
exp = DistanceMatrix(np.array([[0, 2.0, 6.7],
[2.0, 0, 6.7],
[6.7, 6.7, 0.0]]), ['H', 'G', 'M'])
obs = t.tip_tip_distances(endpoints=names)
self.assertEqual(obs, exp)
obs = t.tip_tip_distances(endpoints=nodes)
self.assertEqual(obs, exp)
def test_tip_tip_distances_non_tip_endpoints(self):
t = TreeNode.read(StringIO(u'((H:1,G:1)foo:2,(R:0.5,M:0.7):3);'))
with self.assertRaises(ValueError):
t.tip_tip_distances(endpoints=['foo'])
def test_tip_tip_distances_no_length(self):
t = TreeNode.read(StringIO(u"((a,b)c,(d,e)f);"))
with self.assertRaises(NoLengthError):
t.tip_tip_distances()
def test_neighbors(self):
"""Get neighbors of a node"""
t = TreeNode.read(StringIO(u"((a,b)c,(d,e)f);"))
exp = t.children
obs = t.neighbors()
self.assertEqual(obs, exp)
exp = t.children[0].children + [t]
obs = t.children[0].neighbors()
self.assertEqual(obs, exp)
exp = [t.children[0].children[0]] + [t]
obs = t.children[0].neighbors(ignore=t.children[0].children[1])
self.assertEqual(obs, exp)
exp = [t.children[0]]
obs = t.children[0].children[0].neighbors()
self.assertEqual(obs, exp)
def test_has_children(self):
"""Test if has children"""
t = TreeNode.read(StringIO(u"((a,b)c,(d,e)f);"))
self.assertTrue(t.has_children())
self.assertTrue(t.children[0].has_children())
self.assertTrue(t.children[1].has_children())
self.assertFalse(t.children[0].children[0].has_children())
self.assertFalse(t.children[0].children[1].has_children())
self.assertFalse(t.children[1].children[0].has_children())
self.assertFalse(t.children[1].children[1].has_children())
def test_tips(self):
"""Tip traversal of tree"""
exp = ['a', 'b', 'c', 'd']
obs = [n.name for n in self.simple_t.tips()]
self.assertEqual(obs, exp)
obs2 = [n.name for n in self.simple_t.traverse(False, False)]
self.assertEqual(obs2, exp)
def test_pre_and_postorder(self):
"""Pre and post order traversal of the tree"""
exp = ['root', 'i1', 'a', 'b', 'i1', 'i2', 'c', 'd', 'i2', 'root']
obs = [n.name for n in self.simple_t.pre_and_postorder()]
self.assertEqual(obs, exp)
obs2 = [n.name for n in self.simple_t.traverse(True, True)]
self.assertEqual(obs2, exp)
def test_pre_and_postorder_no_children(self):
t = TreeNode('brofist')
# include self
exp = ['brofist']
obs = [n.name for n in t.pre_and_postorder()]
self.assertEqual(obs, exp)
# do not include self
obs = list(t.pre_and_postorder(include_self=False))
self.assertEqual(obs, [])
def test_levelorder(self):
"""Test level order traversal of the tree"""
exp = ['root', 'i1', 'i2', 'a', 'b', 'c', 'd']
obs = [n.name for n in self.simple_t.levelorder()]
self.assertEqual(obs, exp)
def test_index_tree(self):
"""index_tree should produce correct index and node map"""
# test for first tree: contains singleton outgroup
t1 = TreeNode.read(StringIO(u'(((a,b),c),(d,e));'))
t2 = TreeNode.read(StringIO(u'(((a,b),(c,d)),(e,f));'))
t3 = TreeNode.read(StringIO(u'(((a,b,c),(d)),(e,f));'))
id_1, child_1 = t1.index_tree()
nodes_1 = [n.id for n in t1.traverse(self_before=False,
self_after=True)]
self.assertEqual(nodes_1, [0, 1, 2, 3, 6, 4, 5, 7, 8])
self.assertEqual(child_1, [(2, 0, 1), (6, 2, 3), (7, 4, 5), (8, 6, 7)])
# test for second tree: strictly bifurcating
id_2, child_2 = t2.index_tree()
nodes_2 = [n.id for n in t2.traverse(self_before=False,
self_after=True)]
self.assertEqual(nodes_2, [0, 1, 4, 2, 3, 5, 8, 6, 7, 9, 10])
self.assertEqual(child_2, [(4, 0, 1), (5, 2, 3), (8, 4, 5), (9, 6, 7),
(10, 8, 9)])
# test for third tree: contains trifurcation and single-child parent
id_3, child_3 = t3.index_tree()
nodes_3 = [n.id for n in t3.traverse(self_before=False,
self_after=True)]
self.assertEqual(nodes_3, [0, 1, 2, 4, 3, 5, 8, 6, 7, 9, 10])
self.assertEqual(child_3, [(4, 0, 2), (5, 3, 3), (8, 4, 5), (9, 6, 7),
(10, 8, 9)])
def test_root_at(self):
"""Form a new root"""
t = TreeNode.read(StringIO(u"(((a,b)c,(d,e)f)g,h)i;"))
with self.assertRaises(TreeError):
t.root_at(t.find('h'))
exp = "(a,b,((d,e)f,(h)g)c)root;\n"
rooted = t.root_at('c')
obs = str(rooted)
self.assertEqual(obs, exp)
def test_root_at_midpoint(self):
"""Root at the midpoint"""
tree1 = self.TreeRoot
for n in tree1.traverse():
n.length = 1
result = tree1.root_at_midpoint()
self.assertEqual(result.distance(result.find('e')), 1.5)
self.assertEqual(result.distance(result.find('g')), 2.5)
exp_dist = tree1.tip_tip_distances()
obs_dist = result.tip_tip_distances()
self.assertEqual(obs_dist, exp_dist)
def test_root_at_midpoint_no_lengths(self):
# should get same tree back (a copy)
nwk = u'(a,b)c;\n'
t = TreeNode.read(StringIO(nwk))
obs = t.root_at_midpoint()
self.assertEqual(str(obs), nwk)
def test_compare_subsets(self):
"""compare_subsets should return the fraction of shared subsets"""
t = TreeNode.read(StringIO(u'((H,G),(R,M));'))
t2 = TreeNode.read(StringIO(u'(((H,G),R),M);'))
t4 = TreeNode.read(StringIO(u'(((H,G),(O,R)),X);'))
result = t.compare_subsets(t)
self.assertEqual(result, 0)
result = t2.compare_subsets(t2)
self.assertEqual(result, 0)
result = t.compare_subsets(t2)
self.assertEqual(result, 0.5)
result = t.compare_subsets(t4)
self.assertEqual(result, 1 - 2. / 5)
result = t.compare_subsets(t4, exclude_absent_taxa=True)
self.assertEqual(result, 1 - 2. / 3)
result = t.compare_subsets(self.TreeRoot, exclude_absent_taxa=True)
self.assertEqual(result, 1)
result = t.compare_subsets(self.TreeRoot)
self.assertEqual(result, 1)
def test_compare_rfd(self):
"""compare_rfd should return the Robinson Foulds distance"""
t = TreeNode.read(StringIO(u'((H,G),(R,M));'))
t2 = TreeNode.read(StringIO(u'(((H,G),R),M);'))
t4 = TreeNode.read(StringIO(u'(((H,G),(O,R)),X);'))
obs = t.compare_rfd(t2)
exp = 2.0
self.assertEqual(obs, exp)
self.assertEqual(t.compare_rfd(t2), t2.compare_rfd(t))
obs = t.compare_rfd(t2, proportion=True)
exp = 0.5
self.assertEqual(obs, exp)
with self.assertRaises(ValueError):
t.compare_rfd(t4)
def test_assign_ids(self):
"""Assign IDs to the tree"""
t1 = TreeNode.read(StringIO(u"(((a,b),c),(e,f),(g));"))
t2 = TreeNode.read(StringIO(u"(((a,b),c),(e,f),(g));"))
t3 = TreeNode.read(StringIO(u"((g),(e,f),(c,(a,b)));"))
t1_copy = t1.copy()
t1.assign_ids()
t2.assign_ids()
t3.assign_ids()
t1_copy.assign_ids()
self.assertEqual([(n.name, n.id) for n in t1.traverse()],
[(n.name, n.id) for n in t2.traverse()])
self.assertEqual([(n.name, n.id) for n in t1.traverse()],
[(n.name, n.id) for n in t1_copy.traverse()])
self.assertNotEqual([(n.name, n.id) for n in t1.traverse()],
[(n.name, n.id) for n in t3.traverse()])
def test_assign_ids_index_tree(self):
"""assign_ids and index_tree should assign the same IDs"""
t1 = TreeNode.read(StringIO(u'(((a,b),c),(d,e));'))
t2 = TreeNode.read(StringIO(u'(((a,b),(c,d)),(e,f));'))
t3 = TreeNode.read(StringIO(u'(((a,b,c),(d)),(e,f));'))
t1_copy = t1.copy()
t2_copy = t2.copy()
t3_copy = t3.copy()
t1.assign_ids()
t1_copy.index_tree()
t2.assign_ids()
t2_copy.index_tree()
t3.assign_ids()
t3_copy.index_tree()
self.assertEqual([n.id for n in t1.traverse()],
[n.id for n in t1_copy.traverse()])
self.assertEqual([n.id for n in t2.traverse()],
[n.id for n in t2_copy.traverse()])
self.assertEqual([n.id for n in t3.traverse()],
[n.id for n in t3_copy.traverse()])
def test_unrooted_deepcopy(self):
"""Do an unrooted_copy"""
t = TreeNode.read(StringIO(u"((a,(b,c)d)e,(f,g)h)i;"))
exp = "(b,c,(a,((f,g)h)e)d)root;\n"
obs = t.find('d').unrooted_deepcopy()
self.assertEqual(str(obs), exp)
t_ids = {id(n) for n in t.traverse()}
obs_ids = {id(n) for n in obs.traverse()}
self.assertEqual(t_ids.intersection(obs_ids), set())
def test_descending_branch_length(self):
"""Calculate descending branch_length"""
tr = TreeNode.read(StringIO(u"(((A:.1,B:1.2)C:.6,(D:.9,E:.6)F:.9)G:2.4"
",(H:.4,I:.5)J:1.3)K;"))
tdbl = tr.descending_branch_length()
sdbl = tr.descending_branch_length(['A', 'E'])
nptest.assert_almost_equal(tdbl, 8.9)
nptest.assert_almost_equal(sdbl, 2.2)
self.assertRaises(ValueError, tr.descending_branch_length,
['A', 'DNE'])
self.assertRaises(ValueError, tr.descending_branch_length, ['A', 'C'])
tr = TreeNode.read(StringIO(u"(((A,B:1.2)C:.6,(D:.9,E:.6)F:.9)G:2.4,(H"
":.4,I:.5)J:1.3)K;"))
tdbl = tr.descending_branch_length()
nptest.assert_almost_equal(tdbl, 8.8)
tr = TreeNode.read(StringIO(u"(((A,B:1.2)C:.6,(D:.9,E:.6)F)G:2.4,(H:.4"
",I:.5)J:1.3)K;"))
tdbl = tr.descending_branch_length()
nptest.assert_almost_equal(tdbl, 7.9)
tr = TreeNode.read(StringIO(u"(((A,B:1.2)C:.6,(D:.9,E:.6)F)G:2.4,(H:.4"
",I:.5)J:1.3)K;"))
tdbl = tr.descending_branch_length(['A', 'D', 'E'])
nptest.assert_almost_equal(tdbl, 2.1)
tr = TreeNode.read(StringIO(u"(((A,B:1.2)C:.6,(D:.9,E:.6)F:.9)G:2.4,(H"
":.4,I:.5)J:1.3)K;"))
tdbl = tr.descending_branch_length(['I', 'D', 'E'])
nptest.assert_almost_equal(tdbl, 6.6)
# test with a situation where we have unnamed internal nodes
tr = TreeNode.read(StringIO(u"(((A,B:1.2):.6,(D:.9,E:.6)F):2.4,(H:.4,I"
":.5)J:1.3);"))
tdbl = tr.descending_branch_length()
nptest.assert_almost_equal(tdbl, 7.9)
def test_to_array(self):
"""Convert a tree to arrays"""
t = TreeNode.read(StringIO(
u'(((a:1,b:2,c:3)x:4,(d:5)y:6)z:7,(e:8,f:9)z:10);'))
id_index, child_index = t.index_tree()
arrayed = t.to_array()
self.assertEqual(id_index, arrayed['id_index'])
self.assertEqual(child_index, arrayed['child_index'])
exp = np.array([1, 2, 3, 5, 4, 6, 8, 9, 7, 10, np.nan])
obs = arrayed['length']
nptest.assert_equal(obs, exp)
exp = np.array(['a', 'b', 'c', 'd', 'x',
'y', 'e', 'f', 'z', 'z', None])
obs = arrayed['name']
nptest.assert_equal(obs, exp)
exp = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
obs = arrayed['id']
nptest.assert_equal(obs, exp)
def test_to_array_attrs(self):
t = TreeNode.read(StringIO(
u'(((a:1,b:2,c:3)x:4,(d:5)y:6)z:7,(e:8,f:9)z:10);'))
id_index, child_index = t.index_tree()
arrayed = t.to_array(attrs=[('name', object)])
# should only have id_index, child_index, and name since we specified
# attrs
self.assertEqual(len(arrayed), 3)
self.assertEqual(id_index, arrayed['id_index'])
self.assertEqual(child_index, arrayed['child_index'])
exp = np.array(['a', 'b', 'c', 'd', 'x',
'y', 'e', 'f', 'z', 'z', None])
obs = arrayed['name']
nptest.assert_equal(obs, exp)
# invalid attrs
with self.assertRaises(AttributeError):
t.to_array(attrs=[('name', object), ('brofist', int)])
def test_from_taxonomy(self):
input_lineages = {'1': ['a', 'b', 'c', 'd', 'e', 'f', 'g'],
'2': ['a', 'b', 'c', None, None, 'x', 'y'],
'3': ['h', 'i', 'j', 'k', 'l', 'm', 'n'],
'4': ['h', 'i', 'j', 'k', 'l', 'm', 'q'],
'5': ['h', 'i', 'j', 'k', 'l', 'm', 'n']}
exp = TreeNode.read(StringIO(u"((((((((1)g)f)e)d,((((2)y)x)))c)b)a,"
"(((((((3,5)n,(4)q)m)l)k)j)i)h);"))
root = TreeNode.from_taxonomy(input_lineages.items())
self.assertEqual(root.compare_subsets(exp), 0.0)
def test_to_taxonomy(self):
input_lineages = {'1': ['a', 'b', 'c', 'd', 'e', 'f', 'g'],
'2': ['a', 'b', 'c', None, None, 'x', 'y'],
'3': ['h', 'i', 'j', 'k', 'l', 'm', 'n'],
'4': ['h', 'i', 'j', 'k', 'l', 'm', 'q'],
'5': ['h', 'i', 'j', 'k', 'l', 'm', 'n']}
tree = TreeNode.from_taxonomy(input_lineages.items())
exp = sorted(input_lineages.items())
obs = [(n.name, lin) for n, lin in tree.to_taxonomy(allow_empty=True)]
self.assertEqual(sorted(obs), exp)
def test_to_taxonomy_filter(self):
input_lineages = {'1': ['a', 'b', 'c', 'd', 'e', 'f', 'g'],
'2': ['a', 'b', 'c', None, None, 'x', 'y'],
'3': ['h', 'i', 'j', 'k', 'l'], # test jagged
'4': ['h', 'i', 'j', 'k', 'l', 'm', 'q'],
'5': ['h', 'i', 'j', 'k', 'l', 'm', 'n']}
tree = TreeNode.from_taxonomy(input_lineages.items())
def f(node, lin):
return 'k' in lin or 'x' in lin
exp = [('2', ['a', 'b', 'c', 'x', 'y']),
('3', ['h', 'i', 'j', 'k', 'l']),
('4', ['h', 'i', 'j', 'k', 'l', 'm', 'q']),
('5', ['h', 'i', 'j', 'k', 'l', 'm', 'n'])]
obs = [(n.name, lin) for n, lin in tree.to_taxonomy(filter_f=f)]
self.assertEqual(sorted(obs), exp)
def test_linkage_matrix(self):
# Ensure matches: http://www.southampton.ac.uk/~re1u06/teaching/upgma/
id_list = ['A', 'B', 'C', 'D', 'E', 'F', 'G']
linkage = np.asarray([[1.0, 5.0, 1.0, 2.0],
[0.0, 3.0, 8.0, 2.0],
[6.0, 7.0, 12.5, 3.0],
[8.0, 9.0, 16.5, 5.0],
[2.0, 10.0, 29.0, 6.0],
[4.0, 11.0, 34.0, 7.0]])
tree = TreeNode.from_linkage_matrix(linkage, id_list)
self.assertEqual("(E:17.0,(C:14.5,((A:4.0,D:4.0):4.25,(G:6.25,(B:0.5,"
"F:0.5):5.75):2.0):6.25):2.5);\n",
str(tree))
def test_shuffle_invalid_iter(self):
shuffler = self.simple_t.shuffle(n=-1)
with self.assertRaises(ValueError):
next(shuffler)
def test_shuffle_n_2(self):
exp = ["((a,b)i1,(d,c)i2)root;\n",
"((a,b)i1,(c,d)i2)root;\n",
"((a,b)i1,(d,c)i2)root;\n",
"((a,b)i1,(c,d)i2)root;\n",
"((a,b)i1,(d,c)i2)root;\n"]
obs_g = self.simple_t.shuffle(k=2, shuffle_f=self.rev_f, n=np.inf)
obs = [str(next(obs_g)) for i in range(5)]
self.assertEqual(obs, exp)
def test_shuffle_n_none(self):
exp = ["((d,c)i1,(b,a)i2)root;\n",
"((a,b)i1,(c,d)i2)root;\n",
"((d,c)i1,(b,a)i2)root;\n",
"((a,b)i1,(c,d)i2)root;\n"]
obs_g = self.simple_t.shuffle(shuffle_f=self.rev_f, n=4)
obs = [str(next(obs_g)) for i in range(4)]
self.assertEqual(obs, exp)
def test_shuffle_complex(self):
exp = ["(((a,b)int1,(x,y,(w,z)int2,(f,e)int3)int4),(d,c)int5);\n",
"(((a,b)int1,(x,y,(w,z)int2,(c,d)int3)int4),(e,f)int5);\n",
"(((a,b)int1,(x,y,(w,z)int2,(f,e)int3)int4),(d,c)int5);\n",
"(((a,b)int1,(x,y,(w,z)int2,(c,d)int3)int4),(e,f)int5);\n"]
obs_g = self.complex_tree.shuffle(shuffle_f=self.rev_f,
names=['c', 'd', 'e', 'f'], n=4)
obs = [str(next(obs_g)) for i in range(4)]
self.assertEqual(obs, exp)
def test_shuffle_names(self):
exp = ["((c,a)i1,(b,d)i2)root;\n",
"((b,c)i1,(a,d)i2)root;\n",
"((a,b)i1,(c,d)i2)root;\n",
"((c,a)i1,(b,d)i2)root;\n"]
obs_g = self.simple_t.shuffle(names=['a', 'b', 'c'],
shuffle_f=self.rotate_f, n=np.inf)
obs = [str(next(obs_g)) for i in range(4)]
self.assertEqual(obs, exp)
def test_shuffle_raises(self):
with self.assertRaises(ValueError):
next(self.simple_t.shuffle(k=1))
with self.assertRaises(ValueError):
next(self.simple_t.shuffle(k=5, names=['a', 'b']))
with self.assertRaises(MissingNodeError):
next(self.simple_t.shuffle(names=['x', 'y']))
sample = """
(
(
xyz:0.28124,
(
def:0.24498,
mno:0.03627)
:0.17710)
:0.04870,
abc:0.05925,
(
ghi:0.06914,
jkl:0.13776)
:0.09853);
"""
node_data_sample = """
(
(
xyz:0.28124,
(
def:0.24498,
mno:0.03627)
'A':0.17710)
B:0.04870,
abc:0.05925,
(
ghi:0.06914,
jkl:0.13776)
C:0.09853);
"""
minimal = "();"
no_names = "((,),(,));"
missing_tip_name = "((a,b),(c,));"
empty = '();'
single = '(abc:3);'
double = '(abc:3, def:4);'
onenest = '(abc:3, (def:4, ghi:5):6 );'
nodedata = '(abc:3, (def:4, ghi:5)jkl:6 );'
exp_ascii_art_three_children = """\
/-a
|
---------| /-b
| |
\--------|--c
|
\-d\
"""
if __name__ == '__main__':
main()
| bsd-3-clause |
grandcat/robotics_g7 | explorer/src/explorer/msg/_Stop_EKF.py | 1 | 3225 | """autogenerated by genpy from explorer/Stop_EKF.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class Stop_EKF(genpy.Message):
_md5sum = "08f794ec9172dc90262fcf16b6e9022c"
_type = "explorer/Stop_EKF"
_has_header = False #flag to mark the presence of a Header object
_full_text = """bool stop
float32 rotation_angle
"""
__slots__ = ['stop','rotation_angle']
_slot_types = ['bool','float32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
stop,rotation_angle
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Stop_EKF, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.stop is None:
self.stop = False
if self.rotation_angle is None:
self.rotation_angle = 0.
else:
self.stop = False
self.rotation_angle = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_Bf.pack(_x.stop, _x.rotation_angle))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 5
(_x.stop, _x.rotation_angle,) = _struct_Bf.unpack(str[start:end])
self.stop = bool(self.stop)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_Bf.pack(_x.stop, _x.rotation_angle))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 5
(_x.stop, _x.rotation_angle,) = _struct_Bf.unpack(str[start:end])
self.stop = bool(self.stop)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_Bf = struct.Struct("<Bf")
| gpl-2.0 |
coursemdetw/2014c2 | exts/wsgi/static/Brython2.1.0-20140419-113919/Lib/_sysconfigdata.py | 731 | 18167 | build_time_vars={'HAVE_SYS_WAIT_H': 1, 'HAVE_UTIL_H': 0, 'HAVE_SYMLINKAT': 1, 'HAVE_LIBSENDFILE': 0, 'SRCDIRS': 'Parser Grammar Objects Python Modules Mac', 'SIZEOF_OFF_T': 8, 'BASECFLAGS': '-Wno-unused-result', 'HAVE_UTIME_H': 1, 'EXTRAMACHDEPPATH': '', 'HAVE_SYS_TIME_H': 1, 'CFLAGSFORSHARED': '-fPIC', 'HAVE_HYPOT': 1, 'PGSRCS': '\\', 'HAVE_LIBUTIL_H': 0, 'HAVE_COMPUTED_GOTOS': 1, 'HAVE_LUTIMES': 1, 'HAVE_MAKEDEV': 1, 'HAVE_REALPATH': 1, 'HAVE_LINUX_TIPC_H': 1, 'MULTIARCH': 'i386-linux-gnu', 'HAVE_GETWD': 1, 'HAVE_GCC_ASM_FOR_X64': 0, 'HAVE_INET_PTON': 1, 'HAVE_GETHOSTBYNAME_R_6_ARG': 1, 'SIZEOF__BOOL': 1, 'HAVE_ZLIB_COPY': 1, 'ASDLGEN': 'python3.3 ../Parser/asdl_c.py', 'GRAMMAR_INPUT': '../Grammar/Grammar', 'HOST_GNU_TYPE': 'i686-pc-linux-gnu', 'HAVE_SCHED_RR_GET_INTERVAL': 1, 'HAVE_BLUETOOTH_H': 0, 'HAVE_MKFIFO': 1, 'TIMEMODULE_LIB': 0, 'LIBM': '-lm', 'PGENOBJS': '\\ \\', 'PYTHONFRAMEWORK': '', 'GETPGRP_HAVE_ARG': 0, 'HAVE_MMAP': 1, 'SHLIB_SUFFIX': '.so', 'SIZEOF_FLOAT': 4, 'HAVE_RENAMEAT': 1, 'HAVE_LANGINFO_H': 1, 'HAVE_STDLIB_H': 1, 'PY_CORE_CFLAGS': '-Wno-unused-result -DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes -g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security -I. -IInclude -I../Include -D_FORTIFY_SOURCE=2 -fPIC -DPy_BUILD_CORE', 'HAVE_BROKEN_PIPE_BUF': 0, 'HAVE_CONFSTR': 1, 'HAVE_SIGTIMEDWAIT': 1, 'HAVE_FTELLO': 1, 'READELF': 'readelf', 'HAVE_SIGALTSTACK': 1, 'TESTTIMEOUT': 3600, 'PYTHONPATH': ':plat-i386-linux-gnu', 'SIZEOF_WCHAR_T': 4, 'LIBOBJS': '', 'HAVE_SYSCONF': 1, 'MAKESETUP': '../Modules/makesetup', 'HAVE_UTIMENSAT': 1, 'HAVE_FCHOWNAT': 1, 'HAVE_WORKING_TZSET': 1, 'HAVE_FINITE': 1, 'HAVE_ASINH': 1, 'HAVE_SETEUID': 1, 'CONFIGFILES': 'configure configure.ac acconfig.h pyconfig.h.in Makefile.pre.in', 'HAVE_SETGROUPS': 1, 'PARSER_OBJS': '\\ Parser/myreadline.o Parser/parsetok.o Parser/tokenizer.o', 'HAVE_MBRTOWC': 1, 'SIZEOF_INT': 4, 'HAVE_STDARG_PROTOTYPES': 1, 'TM_IN_SYS_TIME': 0, 'HAVE_SYS_TIMES_H': 1, 'HAVE_LCHOWN': 1, 'HAVE_SSIZE_T': 1, 'HAVE_PAUSE': 1, 'SYSLIBS': '-lm', 'POSIX_SEMAPHORES_NOT_ENABLED': 0, 'HAVE_DEVICE_MACROS': 1, 'BLDSHARED': 'i686-linux-gnu-gcc -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions -Wl,-Bsymbolic-functions -Wl,-z,relro -Wno-unused-result -DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes -g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security ', 'LIBSUBDIRS': 'tkinter tkinter/test tkinter/test/test_tkinter \\', 'HAVE_SYS_UN_H': 1, 'HAVE_SYS_STAT_H': 1, 'VPATH': '..', 'INCLDIRSTOMAKE': '/usr/include /usr/include /usr/include/python3.3m /usr/include/python3.3m', 'HAVE_BROKEN_SEM_GETVALUE': 0, 'HAVE_TIMEGM': 1, 'PACKAGE_VERSION': 0, 'MAJOR_IN_SYSMACROS': 0, 'HAVE_ATANH': 1, 'HAVE_GAI_STRERROR': 1, 'HAVE_SYS_POLL_H': 1, 'SIZEOF_PTHREAD_T': 4, 'SIZEOF_FPOS_T': 16, 'HAVE_CTERMID': 1, 'HAVE_TMPFILE': 1, 'HAVE_SETUID': 1, 'CXX': 'i686-linux-gnu-g++ -pthread', 'srcdir': '..', 'HAVE_UINT32_T': 1, 'HAVE_ADDRINFO': 1, 'HAVE_GETSPENT': 1, 'SIZEOF_DOUBLE': 8, 'HAVE_INT32_T': 1, 'LIBRARY_OBJS_OMIT_FROZEN': '\\', 'HAVE_FUTIMES': 1, 'CONFINCLUDEPY': '/usr/include/python3.3m', 'HAVE_RL_COMPLETION_APPEND_CHARACTER': 1, 'LIBFFI_INCLUDEDIR': '', 'HAVE_SETGID': 1, 'HAVE_UINT64_T': 1, 'EXEMODE': 755, 'UNIVERSALSDK': '', 'HAVE_LIBDL': 1, 'HAVE_GETNAMEINFO': 1, 'HAVE_STDINT_H': 1, 'COREPYTHONPATH': ':plat-i386-linux-gnu', 'HAVE_SOCKADDR_STORAGE': 1, 'HAVE_WAITID': 1, 'EXTRAPLATDIR': '@EXTRAPLATDIR@', 'HAVE_ACCEPT4': 1, 'RUNSHARED': 'LD_LIBRARY_PATH=/build/buildd/python3.3-3.3.1/build-shared:', 'EXE': '', 'HAVE_SIGACTION': 1, 'HAVE_CHOWN': 1, 'HAVE_GETLOGIN': 1, 'HAVE_TZNAME': 0, 'PACKAGE_NAME': 0, 'HAVE_GETPGID': 1, 'HAVE_GLIBC_MEMMOVE_BUG': 0, 'BUILD_GNU_TYPE': 'i686-pc-linux-gnu', 'HAVE_LINUX_CAN_H': 1, 'DYNLOADFILE': 'dynload_shlib.o', 'HAVE_PWRITE': 1, 'BUILDEXE': '', 'HAVE_OPENPTY': 1, 'HAVE_LOCKF': 1, 'HAVE_COPYSIGN': 1, 'HAVE_PREAD': 1, 'HAVE_DLOPEN': 1, 'HAVE_SYS_KERN_CONTROL_H': 0, 'PY_FORMAT_LONG_LONG': '"ll"', 'HAVE_TCSETPGRP': 1, 'HAVE_SETSID': 1, 'HAVE_STRUCT_STAT_ST_BIRTHTIME': 0, 'HAVE_STRING_H': 1, 'LDLIBRARY': 'libpython3.3m.so', 'INSTALL_SCRIPT': '/usr/bin/install -c', 'HAVE_SYS_XATTR_H': 1, 'HAVE_CURSES_IS_TERM_RESIZED': 1, 'HAVE_TMPNAM_R': 1, 'STRICT_SYSV_CURSES': "/* Don't use ncurses extensions */", 'WANT_SIGFPE_HANDLER': 1, 'HAVE_INT64_T': 1, 'HAVE_STAT_TV_NSEC': 1, 'HAVE_SYS_MKDEV_H': 0, 'HAVE_BROKEN_POLL': 0, 'HAVE_IF_NAMEINDEX': 1, 'HAVE_GETPWENT': 1, 'PSRCS': '\\', 'RANLIB': 'ranlib', 'HAVE_WCSCOLL': 1, 'WITH_NEXT_FRAMEWORK': 0, 'ASDLGEN_FILES': '../Parser/asdl.py ../Parser/asdl_c.py', 'HAVE_RL_PRE_INPUT_HOOK': 1, 'PACKAGE_URL': 0, 'SHLIB_EXT': 0, 'HAVE_SYS_LOADAVG_H': 0, 'HAVE_LIBIEEE': 0, 'HAVE_SEM_OPEN': 1, 'HAVE_TERM_H': 1, 'IO_OBJS': '\\', 'IO_H': 'Modules/_io/_iomodule.h', 'HAVE_STATVFS': 1, 'VERSION': '3.3', 'HAVE_GETC_UNLOCKED': 1, 'MACHDEPS': 'plat-i386-linux-gnu @EXTRAPLATDIR@', 'SUBDIRSTOO': 'Include Lib Misc', 'HAVE_SETREUID': 1, 'HAVE_ERFC': 1, 'HAVE_SETRESUID': 1, 'LINKFORSHARED': '-Xlinker -export-dynamic -Wl,-O1 -Wl,-Bsymbolic-functions', 'HAVE_SYS_TYPES_H': 1, 'HAVE_GETPAGESIZE': 1, 'HAVE_SETEGID': 1, 'HAVE_PTY_H': 1, 'HAVE_STRUCT_STAT_ST_FLAGS': 0, 'HAVE_WCHAR_H': 1, 'HAVE_FSEEKO': 1, 'Py_ENABLE_SHARED': 1, 'HAVE_SIGRELSE': 1, 'HAVE_PTHREAD_INIT': 0, 'FILEMODE': 644, 'HAVE_SYS_RESOURCE_H': 1, 'HAVE_READLINKAT': 1, 'PYLONG_BITS_IN_DIGIT': 0, 'LINKCC': 'i686-linux-gnu-gcc -pthread', 'HAVE_SETLOCALE': 1, 'HAVE_CHROOT': 1, 'HAVE_OPENAT': 1, 'HAVE_FEXECVE': 1, 'LDCXXSHARED': 'i686-linux-gnu-g++ -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions', 'DIST': 'README ChangeLog configure configure.ac acconfig.h pyconfig.h.in Makefile.pre.in Include Lib Misc Ext-dummy', 'HAVE_MKNOD': 1, 'PY_LDFLAGS': '-Wl,-Bsymbolic-functions -Wl,-z,relro', 'HAVE_BROKEN_MBSTOWCS': 0, 'LIBRARY_OBJS': '\\', 'HAVE_LOG1P': 1, 'SIZEOF_VOID_P': 4, 'HAVE_FCHOWN': 1, 'PYTHONFRAMEWORKPREFIX': '', 'HAVE_LIBDLD': 0, 'HAVE_TGAMMA': 1, 'HAVE_ERRNO_H': 1, 'HAVE_IO_H': 0, 'OTHER_LIBTOOL_OPT': '', 'HAVE_POLL_H': 1, 'PY_CPPFLAGS': '-I. -IInclude -I../Include -D_FORTIFY_SOURCE=2', 'XMLLIBSUBDIRS': 'xml xml/dom xml/etree xml/parsers xml/sax', 'GRAMMAR_H': 'Include/graminit.h', 'TANH_PRESERVES_ZERO_SIGN': 1, 'HAVE_GETLOADAVG': 1, 'UNICODE_DEPS': '\\ \\', 'HAVE_GETCWD': 1, 'MANDIR': '/usr/share/man', 'MACHDESTLIB': '/usr/lib/python3.3', 'GRAMMAR_C': 'Python/graminit.c', 'PGOBJS': '\\', 'HAVE_DEV_PTMX': 1, 'HAVE_UINTPTR_T': 1, 'HAVE_SCHED_SETAFFINITY': 1, 'PURIFY': '', 'HAVE_DECL_ISINF': 1, 'HAVE_RL_CALLBACK': 1, 'HAVE_WRITEV': 1, 'HAVE_GETHOSTBYNAME_R_5_ARG': 0, 'HAVE_SYS_AUDIOIO_H': 0, 'EXT_SUFFIX': '.cpython-33m.so', 'SIZEOF_LONG_LONG': 8, 'DLINCLDIR': '.', 'HAVE_PATHCONF': 1, 'HAVE_UNLINKAT': 1, 'MKDIR_P': '/bin/mkdir -p', 'HAVE_ALTZONE': 0, 'SCRIPTDIR': '/usr/lib', 'OPCODETARGETGEN_FILES': '\\', 'HAVE_GETSPNAM': 1, 'HAVE_SYS_TERMIO_H': 0, 'HAVE_ATTRIBUTE_FORMAT_PARSETUPLE': 0, 'HAVE_PTHREAD_H': 1, 'Py_DEBUG': 0, 'HAVE_STRUCT_STAT_ST_BLOCKS': 1, 'X87_DOUBLE_ROUNDING': 1, 'SIZEOF_TIME_T': 4, 'HAVE_DYNAMIC_LOADING': 1, 'HAVE_DIRECT_H': 0, 'SRC_GDB_HOOKS': '../Tools/gdb/libpython.py', 'HAVE_GETADDRINFO': 1, 'HAVE_BROKEN_NICE': 0, 'HAVE_DIRENT_H': 1, 'HAVE_WCSXFRM': 1, 'HAVE_RL_COMPLETION_DISPLAY_MATCHES_HOOK': 1, 'HAVE_FSTATVFS': 1, 'PYTHON': 'python', 'HAVE_OSX105_SDK': 0, 'BINDIR': '/usr/bin', 'TESTPYTHON': 'LD_LIBRARY_PATH=/build/buildd/python3.3-3.3.1/build-shared: ./python', 'ARFLAGS': 'rc', 'PLATDIR': 'plat-i386-linux-gnu', 'HAVE_ASM_TYPES_H': 1, 'PY3LIBRARY': 'libpython3.so', 'HAVE_PLOCK': 0, 'FLOCK_NEEDS_LIBBSD': 0, 'WITH_TSC': 0, 'HAVE_LIBREADLINE': 1, 'MACHDEP': 'linux', 'HAVE_SELECT': 1, 'LDFLAGS': '-Wl,-Bsymbolic-functions -Wl,-z,relro', 'HAVE_HSTRERROR': 1, 'SOABI': 'cpython-33m', 'HAVE_GETTIMEOFDAY': 1, 'HAVE_LIBRESOLV': 0, 'HAVE_UNSETENV': 1, 'HAVE_TM_ZONE': 1, 'HAVE_GETPGRP': 1, 'HAVE_FLOCK': 1, 'HAVE_SYS_BSDTTY_H': 0, 'SUBDIRS': '', 'PYTHONFRAMEWORKINSTALLDIR': '', 'PACKAGE_BUGREPORT': 0, 'HAVE_CLOCK': 1, 'HAVE_GETPEERNAME': 1, 'SIZEOF_PID_T': 4, 'HAVE_CONIO_H': 0, 'HAVE_FSTATAT': 1, 'HAVE_NETPACKET_PACKET_H': 1, 'HAVE_WAIT3': 1, 'DESTPATH': '', 'HAVE_STAT_TV_NSEC2': 0, 'HAVE_GETRESGID': 1, 'HAVE_UCS4_TCL': 0, 'SIGNED_RIGHT_SHIFT_ZERO_FILLS': 0, 'HAVE_TIMES': 1, 'HAVE_UNAME': 1, 'HAVE_ERF': 1, 'SIZEOF_SHORT': 2, 'HAVE_NCURSES_H': 1, 'HAVE_SYS_SENDFILE_H': 1, 'HAVE_CTERMID_R': 0, 'HAVE_TMPNAM': 1, 'prefix': '/usr', 'HAVE_NICE': 1, 'WITH_THREAD': 1, 'LN': 'ln', 'TESTRUNNER': 'LD_LIBRARY_PATH=/build/buildd/python3.3-3.3.1/build-shared: ./python ../Tools/scripts/run_tests.py', 'HAVE_SIGINTERRUPT': 1, 'HAVE_SETPGID': 1, 'RETSIGTYPE': 'void', 'HAVE_SCHED_GET_PRIORITY_MAX': 1, 'HAVE_SYS_SYS_DOMAIN_H': 0, 'HAVE_SYS_DIR_H': 0, 'HAVE__GETPTY': 0, 'HAVE_BLUETOOTH_BLUETOOTH_H': 1, 'HAVE_BIND_TEXTDOMAIN_CODESET': 1, 'HAVE_POLL': 1, 'PYTHON_OBJS': '\\', 'HAVE_WAITPID': 1, 'USE_INLINE': 1, 'HAVE_FUTIMENS': 1, 'USE_COMPUTED_GOTOS': 1, 'MAINCC': 'i686-linux-gnu-gcc -pthread', 'HAVE_SOCKETPAIR': 1, 'HAVE_PROCESS_H': 0, 'HAVE_SETVBUF': 1, 'HAVE_FDOPENDIR': 1, 'CONFINCLUDEDIR': '/usr/include', 'BINLIBDEST': '/usr/lib/python3.3', 'HAVE_SYS_IOCTL_H': 1, 'HAVE_SYSEXITS_H': 1, 'LDLAST': '', 'HAVE_SYS_FILE_H': 1, 'HAVE_RL_COMPLETION_SUPPRESS_APPEND': 1, 'HAVE_RL_COMPLETION_MATCHES': 1, 'HAVE_TCGETPGRP': 1, 'SIZEOF_SIZE_T': 4, 'HAVE_EPOLL_CREATE1': 1, 'HAVE_SYS_SELECT_H': 1, 'HAVE_CLOCK_GETTIME': 1, 'CFLAGS': '-Wno-unused-result -DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes -g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security ', 'HAVE_SNPRINTF': 1, 'BLDLIBRARY': '-lpython3.3m', 'PARSER_HEADERS': '\\', 'SO': '.so', 'LIBRARY': 'libpython3.3m.a', 'HAVE_FPATHCONF': 1, 'HAVE_TERMIOS_H': 1, 'HAVE_BROKEN_PTHREAD_SIGMASK': 0, 'AST_H': 'Include/Python-ast.h', 'HAVE_GCC_UINT128_T': 0, 'HAVE_ACOSH': 1, 'MODOBJS': 'Modules/_threadmodule.o Modules/signalmodule.o Modules/arraymodule.o Modules/mathmodule.o Modules/_math.o Modules/_struct.o Modules/timemodule.o Modules/_randommodule.o Modules/atexitmodule.o Modules/_elementtree.o Modules/_pickle.o Modules/_datetimemodule.o Modules/_bisectmodule.o Modules/_heapqmodule.o Modules/unicodedata.o Modules/fcntlmodule.o Modules/spwdmodule.o Modules/grpmodule.o Modules/selectmodule.o Modules/socketmodule.o Modules/_posixsubprocess.o Modules/md5module.o Modules/sha1module.o Modules/sha256module.o Modules/sha512module.o Modules/syslogmodule.o Modules/binascii.o Modules/zlibmodule.o Modules/pyexpat.o Modules/posixmodule.o Modules/errnomodule.o Modules/pwdmodule.o Modules/_sre.o Modules/_codecsmodule.o Modules/_weakref.o Modules/_functoolsmodule.o Modules/operator.o Modules/_collectionsmodule.o Modules/itertoolsmodule.o Modules/_localemodule.o Modules/_iomodule.o Modules/iobase.o Modules/fileio.o Modules/bytesio.o Modules/bufferedio.o Modules/textio.o Modules/stringio.o Modules/zipimport.o Modules/faulthandler.o Modules/symtablemodule.o Modules/xxsubtype.o', 'AST_C': 'Python/Python-ast.c', 'HAVE_SYS_NDIR_H': 0, 'DESTDIRS': '/usr /usr/lib /usr/lib/python3.3 /usr/lib/python3.3/lib-dynload', 'HAVE_SIGNAL_H': 1, 'PACKAGE_TARNAME': 0, 'HAVE_GETPRIORITY': 1, 'INCLUDEDIR': '/usr/include', 'HAVE_INTTYPES_H': 1, 'SIGNAL_OBJS': '', 'HAVE_READV': 1, 'HAVE_SETHOSTNAME': 1, 'MODLIBS': '-lrt -lexpat -L/usr/lib -lz -lexpat', 'CC': 'i686-linux-gnu-gcc -pthread', 'HAVE_LCHMOD': 0, 'SIZEOF_UINTPTR_T': 4, 'LIBPC': '/usr/lib/i386-linux-gnu/pkgconfig', 'BYTESTR_DEPS': '\\', 'HAVE_MKDIRAT': 1, 'LIBPL': '/usr/lib/python3.3/config-3.3m-i386-linux-gnu', 'HAVE_SHADOW_H': 1, 'HAVE_SYS_EVENT_H': 0, 'INSTALL': '/usr/bin/install -c', 'HAVE_GCC_ASM_FOR_X87': 1, 'HAVE_BROKEN_UNSETENV': 0, 'BASECPPFLAGS': '', 'DOUBLE_IS_BIG_ENDIAN_IEEE754': 0, 'HAVE_STRUCT_STAT_ST_RDEV': 1, 'HAVE_SEM_UNLINK': 1, 'BUILDPYTHON': 'python', 'HAVE_RL_CATCH_SIGNAL': 1, 'HAVE_DECL_TZNAME': 0, 'RESSRCDIR': 'Mac/Resources/framework', 'HAVE_PTHREAD_SIGMASK': 1, 'HAVE_UTIMES': 1, 'DISTDIRS': 'Include Lib Misc Ext-dummy', 'HAVE_FDATASYNC': 1, 'HAVE_USABLE_WCHAR_T': 0, 'PY_FORMAT_SIZE_T': '"z"', 'HAVE_SCHED_SETSCHEDULER': 1, 'VA_LIST_IS_ARRAY': 0, 'HAVE_LINUX_NETLINK_H': 1, 'HAVE_SETREGID': 1, 'HAVE_STROPTS_H': 1, 'LDVERSION': '3.3m', 'abs_builddir': '/build/buildd/python3.3-3.3.1/build-shared', 'SITEPATH': '', 'HAVE_GETHOSTBYNAME': 0, 'HAVE_SIGPENDING': 1, 'HAVE_KQUEUE': 0, 'HAVE_SYNC': 1, 'HAVE_GETSID': 1, 'HAVE_ROUND': 1, 'HAVE_STRFTIME': 1, 'AST_H_DIR': 'Include', 'HAVE_PIPE2': 1, 'AST_C_DIR': 'Python', 'TESTPYTHONOPTS': '', 'HAVE_DEV_PTC': 0, 'GETTIMEOFDAY_NO_TZ': 0, 'HAVE_NET_IF_H': 1, 'HAVE_SENDFILE': 1, 'HAVE_SETPGRP': 1, 'HAVE_SEM_GETVALUE': 1, 'CONFIGURE_LDFLAGS': '-Wl,-Bsymbolic-functions -Wl,-z,relro', 'DLLLIBRARY': '', 'PYTHON_FOR_BUILD': './python -E', 'SETPGRP_HAVE_ARG': 0, 'HAVE_INET_ATON': 1, 'INSTALL_SHARED': '/usr/bin/install -c -m 555', 'WITH_DOC_STRINGS': 1, 'OPCODETARGETS_H': '\\', 'HAVE_INITGROUPS': 1, 'HAVE_LINKAT': 1, 'BASEMODLIBS': '', 'SGI_ABI': '', 'HAVE_SCHED_SETPARAM': 1, 'OPT': '-DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes', 'HAVE_POSIX_FADVISE': 1, 'datarootdir': '/usr/share', 'HAVE_MEMRCHR': 1, 'HGTAG': '', 'HAVE_MEMMOVE': 1, 'HAVE_GETRESUID': 1, 'DOUBLE_IS_ARM_MIXED_ENDIAN_IEEE754': 0, 'HAVE_LSTAT': 1, 'AR': 'ar', 'HAVE_WAIT4': 1, 'HAVE_SYS_MODEM_H': 0, 'INSTSONAME': 'libpython3.3m.so.1.0', 'HAVE_SYS_STATVFS_H': 1, 'HAVE_LGAMMA': 1, 'HAVE_PROTOTYPES': 1, 'HAVE_SYS_UIO_H': 1, 'MAJOR_IN_MKDEV': 0, 'QUICKTESTOPTS': '-x test_subprocess test_io test_lib2to3 \\', 'HAVE_SYS_DEVPOLL_H': 0, 'HAVE_CHFLAGS': 0, 'HAVE_FSYNC': 1, 'HAVE_FCHMOD': 1, 'INCLUDEPY': '/usr/include/python3.3m', 'HAVE_SEM_TIMEDWAIT': 1, 'LDLIBRARYDIR': '', 'HAVE_STRUCT_TM_TM_ZONE': 1, 'HAVE_CURSES_H': 1, 'TIME_WITH_SYS_TIME': 1, 'HAVE_DUP2': 1, 'ENABLE_IPV6': 1, 'WITH_VALGRIND': 0, 'HAVE_SETITIMER': 1, 'THREADOBJ': 'Python/thread.o', 'LOCALMODLIBS': '-lrt -lexpat -L/usr/lib -lz -lexpat', 'HAVE_MEMORY_H': 1, 'HAVE_GETITIMER': 1, 'HAVE_C99_BOOL': 1, 'INSTALL_DATA': '/usr/bin/install -c -m 644', 'PGEN': 'Parser/pgen', 'HAVE_GRP_H': 1, 'HAVE_WCSFTIME': 1, 'AIX_GENUINE_CPLUSPLUS': 0, 'HAVE_LIBINTL_H': 1, 'SHELL': '/bin/sh', 'HAVE_UNISTD_H': 1, 'EXTRATESTOPTS': '', 'HAVE_EXECV': 1, 'HAVE_FSEEK64': 0, 'MVWDELCH_IS_EXPRESSION': 1, 'DESTSHARED': '/usr/lib/python3.3/lib-dynload', 'OPCODETARGETGEN': '\\', 'LIBDEST': '/usr/lib/python3.3', 'CCSHARED': '-fPIC', 'HAVE_EXPM1': 1, 'HAVE_DLFCN_H': 1, 'exec_prefix': '/usr', 'HAVE_READLINK': 1, 'WINDOW_HAS_FLAGS': 1, 'HAVE_FTELL64': 0, 'HAVE_STRLCPY': 0, 'MACOSX_DEPLOYMENT_TARGET': '', 'HAVE_SYS_SYSCALL_H': 1, 'DESTLIB': '/usr/lib/python3.3', 'LDSHARED': 'i686-linux-gnu-gcc -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions -Wl,-Bsymbolic-functions -Wl,-z,relro -Wno-unused-result -DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes -g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security ', 'HGVERSION': '', 'PYTHON_HEADERS': '\\', 'HAVE_STRINGS_H': 1, 'DOUBLE_IS_LITTLE_ENDIAN_IEEE754': 1, 'HAVE_POSIX_FALLOCATE': 1, 'HAVE_DIRFD': 1, 'HAVE_LOG2': 1, 'HAVE_GETPID': 1, 'HAVE_ALARM': 1, 'MACHDEP_OBJS': '', 'HAVE_SPAWN_H': 1, 'HAVE_FORK': 1, 'HAVE_SETRESGID': 1, 'HAVE_FCHMODAT': 1, 'HAVE_CLOCK_GETRES': 1, 'MACHDEPPATH': ':plat-i386-linux-gnu', 'STDC_HEADERS': 1, 'HAVE_SETPRIORITY': 1, 'LIBC': '', 'HAVE_SYS_EPOLL_H': 1, 'HAVE_SYS_UTSNAME_H': 1, 'HAVE_PUTENV': 1, 'HAVE_CURSES_RESIZE_TERM': 1, 'HAVE_FUTIMESAT': 1, 'WITH_DYLD': 0, 'INSTALL_PROGRAM': '/usr/bin/install -c', 'LIBS': '-lpthread -ldl -lutil', 'HAVE_TRUNCATE': 1, 'TESTOPTS': '', 'PROFILE_TASK': '../Tools/pybench/pybench.py -n 2 --with-gc --with-syscheck', 'HAVE_CURSES_RESIZETERM': 1, 'ABIFLAGS': 'm', 'HAVE_GETGROUPLIST': 1, 'OBJECT_OBJS': '\\', 'HAVE_MKNODAT': 1, 'HAVE_ST_BLOCKS': 1, 'HAVE_STRUCT_STAT_ST_GEN': 0, 'SYS_SELECT_WITH_SYS_TIME': 1, 'SHLIBS': '-lpthread -ldl -lutil', 'HAVE_GETGROUPS': 1, 'MODULE_OBJS': '\\', 'PYTHONFRAMEWORKDIR': 'no-framework', 'HAVE_FCNTL_H': 1, 'HAVE_LINK': 1, 'HAVE_SIGWAIT': 1, 'HAVE_GAMMA': 1, 'HAVE_SYS_LOCK_H': 0, 'HAVE_FORKPTY': 1, 'HAVE_SOCKADDR_SA_LEN': 0, 'HAVE_TEMPNAM': 1, 'HAVE_STRUCT_STAT_ST_BLKSIZE': 1, 'HAVE_MKFIFOAT': 1, 'HAVE_SIGWAITINFO': 1, 'HAVE_FTIME': 1, 'HAVE_EPOLL': 1, 'HAVE_SYS_SOCKET_H': 1, 'HAVE_LARGEFILE_SUPPORT': 1, 'CONFIGURE_CFLAGS': '-g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security', 'HAVE_PTHREAD_DESTRUCTOR': 0, 'CONFIGURE_CPPFLAGS': '-D_FORTIFY_SOURCE=2', 'HAVE_SYMLINK': 1, 'HAVE_LONG_LONG': 1, 'HAVE_IEEEFP_H': 0, 'LIBDIR': '/usr/lib', 'HAVE_PTHREAD_KILL': 1, 'TESTPATH': '', 'HAVE_STRDUP': 1, 'POBJS': '\\', 'NO_AS_NEEDED': '-Wl,--no-as-needed', 'HAVE_LONG_DOUBLE': 1, 'HGBRANCH': '', 'DISTFILES': 'README ChangeLog configure configure.ac acconfig.h pyconfig.h.in Makefile.pre.in', 'PTHREAD_SYSTEM_SCHED_SUPPORTED': 1, 'HAVE_FACCESSAT': 1, 'AST_ASDL': '../Parser/Python.asdl', 'CPPFLAGS': '-I. -IInclude -I../Include -D_FORTIFY_SOURCE=2', 'HAVE_MKTIME': 1, 'HAVE_NDIR_H': 0, 'PY_CFLAGS': '-Wno-unused-result -DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes -g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security ', 'LIBOBJDIR': 'Python/', 'HAVE_LINUX_CAN_RAW_H': 1, 'HAVE_GETHOSTBYNAME_R_3_ARG': 0, 'PACKAGE_STRING': 0, 'GNULD': 'yes', 'LOG1P_DROPS_ZERO_SIGN': 0, 'HAVE_FTRUNCATE': 1, 'WITH_LIBINTL': 0, 'HAVE_MREMAP': 1, 'HAVE_DECL_ISNAN': 1, 'HAVE_KILLPG': 1, 'SIZEOF_LONG': 4, 'HAVE_DECL_ISFINITE': 1, 'HAVE_IPA_PURE_CONST_BUG': 0, 'WITH_PYMALLOC': 1, 'abs_srcdir': '/build/buildd/python3.3-3.3.1/build-shared/..', 'HAVE_FCHDIR': 1, 'HAVE_BROKEN_POSIX_SEMAPHORES': 0, 'AC_APPLE_UNIVERSAL_BUILD': 0, 'PGENSRCS': '\\ \\', 'DIRMODE': 755, 'HAVE_GETHOSTBYNAME_R': 1, 'HAVE_LCHFLAGS': 0, 'HAVE_SYS_PARAM_H': 1, 'SIZEOF_LONG_DOUBLE': 12, 'CONFIG_ARGS': "'--enable-shared' '--prefix=/usr' '--enable-ipv6' '--enable-loadable-sqlite-extensions' '--with-dbmliborder=bdb:gdbm' '--with-computed-gotos' '--with-system-expat' '--with-system-ffi' '--with-fpectl' 'CC=i686-linux-gnu-gcc' 'CFLAGS=-g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security ' 'LDFLAGS=-Wl,-Bsymbolic-functions -Wl,-z,relro' 'CPPFLAGS=-D_FORTIFY_SOURCE=2'", 'HAVE_SCHED_H': 1, 'HAVE_KILL': 1}
| gpl-2.0 |
halvertoluke/edx-platform | lms/djangoapps/certificates/urls.py | 12 | 1331 | """
URLs for the certificates app.
"""
from django.conf.urls import patterns, url
from django.conf import settings
from certificates import views
urlpatterns = patterns(
'',
# Certificates HTML view end point to render web certs by user and course
url(
r'^user/(?P<user_id>[^/]*)/course/{course_id}'.format(course_id=settings.COURSE_ID_PATTERN),
views.render_html_view,
name='html_view'
),
# Certificates HTML view end point to render web certs by certificate_uuid
url(
r'^(?P<certificate_uuid>[0-9a-f]{32})$',
views.render_cert_by_uuid,
name='render_cert_by_uuid'
),
# End-points used by student support
# The views in the lms/djangoapps/support use these end-points
# to retrieve certificate information and regenerate certificates.
url(r'search', views.search_by_user, name="search"),
url(r'regenerate', views.regenerate_certificate_for_user, name="regenerate_certificate_for_user"),
)
if settings.FEATURES.get("ENABLE_OPENBADGES", False):
urlpatterns += (
url(
r'^badge_share_tracker/{}/(?P<network>[^/]+)/(?P<student_username>[^/]+)/$'.format(
settings.COURSE_ID_PATTERN
),
views.track_share_redirect,
name='badge_share_tracker'
),
)
| agpl-3.0 |
lupyuen/RaspberryPiImage | home/pi/GrovePi/Software/Python/others/temboo/Library/Basecamp/GetAllListItems.py | 5 | 3833 | # -*- coding: utf-8 -*-
###############################################################################
#
# GetAllListItems
# Retrieves all items in a specified To-do list.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetAllListItems(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetAllListItems Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetAllListItems, self).__init__(temboo_session, '/Library/Basecamp/GetAllListItems')
def new_input_set(self):
return GetAllListItemsInputSet()
def _make_result_set(self, result, path):
return GetAllListItemsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetAllListItemsChoreographyExecution(session, exec_id, path)
class GetAllListItemsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetAllListItems
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccountName(self, value):
"""
Set the value of the AccountName input for this Choreo. ((required, string) A valid Basecamp account name. This is the first part of the account's URL.)
"""
super(GetAllListItemsInputSet, self)._set_input('AccountName', value)
def set_ListID(self, value):
"""
Set the value of the ListID input for this Choreo. ((required, integer) The ID of the list for which you're returning items.)
"""
super(GetAllListItemsInputSet, self)._set_input('ListID', value)
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((required, password) The Basecamp account password. Use the value 'X' when specifying an API Key for the Username input.)
"""
super(GetAllListItemsInputSet, self)._set_input('Password', value)
def set_Username(self, value):
"""
Set the value of the Username input for this Choreo. ((required, string) A Basecamp account username or API Key.)
"""
super(GetAllListItemsInputSet, self)._set_input('Username', value)
class GetAllListItemsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetAllListItems Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((xml) The response returned from Basecamp.)
"""
return self._output.get('Response', None)
class GetAllListItemsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetAllListItemsResultSet(response, path)
| apache-2.0 |
kennedyshead/home-assistant | tests/components/elgato/test_init.py | 2 | 1137 | """Tests for the Elgato Key Light integration."""
import aiohttp
from homeassistant.components.elgato.const import DOMAIN
from homeassistant.config_entries import ConfigEntryState
from homeassistant.core import HomeAssistant
from tests.components.elgato import init_integration
from tests.test_util.aiohttp import AiohttpClientMocker
async def test_config_entry_not_ready(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the Elgato Key Light configuration entry not ready."""
aioclient_mock.get(
"http://127.0.0.1:9123/elgato/accessory-info", exc=aiohttp.ClientError
)
entry = await init_integration(hass, aioclient_mock)
assert entry.state is ConfigEntryState.SETUP_RETRY
async def test_unload_config_entry(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the Elgato Key Light configuration entry unloading."""
entry = await init_integration(hass, aioclient_mock)
assert hass.data[DOMAIN]
await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert not hass.data.get(DOMAIN)
| apache-2.0 |
foomango/linux-3.7.1 | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
aimas/TuniErp-8.0 | addons/stock_account/stock_account.py | 89 | 19895 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp import SUPERUSER_ID, api
import logging
_logger = logging.getLogger(__name__)
class stock_inventory(osv.osv):
_inherit = "stock.inventory"
_columns = {
'period_id': fields.many2one('account.period', 'Force Valuation Period', help="Choose the accounting period where you want to value the stock moves created by the inventory instead of the default one (chosen by the inventory end date)"),
}
def post_inventory(self, cr, uid, inv, context=None):
if context is None:
context = {}
ctx = context.copy()
if inv.period_id:
ctx['force_period'] = inv.period_id.id
return super(stock_inventory, self).post_inventory(cr, uid, inv, context=ctx)
#----------------------------------------------------------
# Stock Location
#----------------------------------------------------------
class stock_location(osv.osv):
_inherit = "stock.location"
_columns = {
'valuation_in_account_id': fields.many2one('account.account', 'Stock Valuation Account (Incoming)', domain=[('type', '=', 'other')],
help="Used for real-time inventory valuation. When set on a virtual location (non internal type), "
"this account will be used to hold the value of products being moved from an internal location "
"into this location, instead of the generic Stock Output Account set on the product. "
"This has no effect for internal locations."),
'valuation_out_account_id': fields.many2one('account.account', 'Stock Valuation Account (Outgoing)', domain=[('type', '=', 'other')],
help="Used for real-time inventory valuation. When set on a virtual location (non internal type), "
"this account will be used to hold the value of products being moved out of this location "
"and into an internal location, instead of the generic Stock Output Account set on the product. "
"This has no effect for internal locations."),
}
#----------------------------------------------------------
# Quants
#----------------------------------------------------------
class stock_quant(osv.osv):
_inherit = "stock.quant"
def _get_inventory_value(self, cr, uid, quant, context=None):
if quant.product_id.cost_method in ('real'):
return quant.cost * quant.qty
return super(stock_quant, self)._get_inventory_value(cr, uid, quant, context=context)
@api.cr_uid_ids_context
def _price_update(self, cr, uid, quant_ids, newprice, context=None):
''' This function is called at the end of negative quant reconciliation and does the accounting entries adjustemnts and the update of the product cost price if needed
'''
if context is None:
context = {}
account_period = self.pool['account.period']
super(stock_quant, self)._price_update(cr, uid, quant_ids, newprice, context=context)
for quant in self.browse(cr, uid, quant_ids, context=context):
move = self._get_latest_move(cr, uid, quant, context=context)
valuation_update = newprice - quant.cost
# this is where we post accounting entries for adjustment, if needed
if not quant.company_id.currency_id.is_zero(valuation_update):
# adjustment journal entry needed, cost has been updated
period_id = (context.get('force_period') or
account_period.find(cr, uid, move.date, context=context)[0])
period = account_period.browse(cr, uid, period_id, context=context)
# If neg quant period already closed (likely with manual valuation), skip update
if period.state != 'done':
ctx = dict(context, force_valuation_amount=valuation_update)
self._account_entry_move(cr, uid, [quant], move, context=ctx)
#update the standard price of the product, only if we would have done it if we'd have had enough stock at first, which means
#1) the product cost's method is 'real'
#2) we just fixed a negative quant caused by an outgoing shipment
if quant.product_id.cost_method == 'real' and quant.location_id.usage != 'internal':
self.pool.get('stock.move')._store_average_cost_price(cr, uid, move, context=context)
def _account_entry_move(self, cr, uid, quants, move, context=None):
"""
Accounting Valuation Entries
quants: browse record list of Quants to create accounting valuation entries for. Unempty and all quants are supposed to have the same location id (thay already moved in)
move: Move to use. browse record
"""
if context is None:
context = {}
location_obj = self.pool.get('stock.location')
location_from = move.location_id
location_to = quants[0].location_id
company_from = location_obj._location_owner(cr, uid, location_from, context=context)
company_to = location_obj._location_owner(cr, uid, location_to, context=context)
if move.product_id.valuation != 'real_time':
return False
for q in quants:
if q.owner_id:
#if the quant isn't owned by the company, we don't make any valuation entry
return False
if q.qty <= 0:
#we don't make any stock valuation for negative quants because the valuation is already made for the counterpart.
#At that time the valuation will be made at the product cost price and afterward there will be new accounting entries
#to make the adjustments when we know the real cost price.
return False
#in case of routes making the link between several warehouse of the same company, the transit location belongs to this company, so we don't need to create accounting entries
# Create Journal Entry for products arriving in the company
if company_to and (move.location_id.usage not in ('internal', 'transit') and move.location_dest_id.usage == 'internal' or company_from != company_to):
ctx = context.copy()
ctx['force_company'] = company_to.id
journal_id, acc_src, acc_dest, acc_valuation = self._get_accounting_data_for_valuation(cr, uid, move, context=ctx)
if location_from and location_from.usage == 'customer':
#goods returned from customer
self._create_account_move_line(cr, uid, quants, move, acc_dest, acc_valuation, journal_id, context=ctx)
else:
self._create_account_move_line(cr, uid, quants, move, acc_src, acc_valuation, journal_id, context=ctx)
# Create Journal Entry for products leaving the company
if company_from and (move.location_id.usage == 'internal' and move.location_dest_id.usage not in ('internal', 'transit') or company_from != company_to):
ctx = context.copy()
ctx['force_company'] = company_from.id
journal_id, acc_src, acc_dest, acc_valuation = self._get_accounting_data_for_valuation(cr, uid, move, context=ctx)
if location_to and location_to.usage == 'supplier':
#goods returned to supplier
self._create_account_move_line(cr, uid, quants, move, acc_valuation, acc_src, journal_id, context=ctx)
else:
self._create_account_move_line(cr, uid, quants, move, acc_valuation, acc_dest, journal_id, context=ctx)
def _quant_create(self, cr, uid, qty, move, lot_id=False, owner_id=False, src_package_id=False, dest_package_id=False, force_location_from=False, force_location_to=False, context=None):
quant = super(stock_quant, self)._quant_create(cr, uid, qty, move, lot_id=lot_id, owner_id=owner_id, src_package_id=src_package_id, dest_package_id=dest_package_id, force_location_from=force_location_from, force_location_to=force_location_to, context=context)
if move.product_id.valuation == 'real_time':
self._account_entry_move(cr, uid, [quant], move, context)
return quant
def move_quants_write(self, cr, uid, quants, move, location_dest_id, dest_package_id, context=None):
res = super(stock_quant, self).move_quants_write(cr, uid, quants, move, location_dest_id, dest_package_id, context=context)
if move.product_id.valuation == 'real_time':
self._account_entry_move(cr, uid, quants, move, context=context)
return res
def _get_accounting_data_for_valuation(self, cr, uid, move, context=None):
"""
Return the accounts and journal to use to post Journal Entries for the real-time
valuation of the quant.
:param context: context dictionary that can explicitly mention the company to consider via the 'force_company' key
:returns: journal_id, source account, destination account, valuation account
:raise: osv.except_osv() is any mandatory account or journal is not defined.
"""
product_obj = self.pool.get('product.template')
accounts = product_obj.get_product_accounts(cr, uid, move.product_id.product_tmpl_id.id, context)
if move.location_id.valuation_out_account_id:
acc_src = move.location_id.valuation_out_account_id.id
else:
acc_src = accounts['stock_account_input']
if move.location_dest_id.valuation_in_account_id:
acc_dest = move.location_dest_id.valuation_in_account_id.id
else:
acc_dest = accounts['stock_account_output']
acc_valuation = accounts.get('property_stock_valuation_account_id', False)
journal_id = accounts['stock_journal']
return journal_id, acc_src, acc_dest, acc_valuation
def _prepare_account_move_line(self, cr, uid, move, qty, cost, credit_account_id, debit_account_id, context=None):
"""
Generate the account.move.line values to post to track the stock valuation difference due to the
processing of the given quant.
"""
if context is None:
context = {}
currency_obj = self.pool.get('res.currency')
if context.get('force_valuation_amount'):
valuation_amount = context.get('force_valuation_amount')
else:
if move.product_id.cost_method == 'average':
valuation_amount = cost if move.location_id.usage != 'internal' and move.location_dest_id.usage == 'internal' else move.product_id.standard_price
else:
valuation_amount = cost if move.product_id.cost_method == 'real' else move.product_id.standard_price
#the standard_price of the product may be in another decimal precision, or not compatible with the coinage of
#the company currency... so we need to use round() before creating the accounting entries.
valuation_amount = currency_obj.round(cr, uid, move.company_id.currency_id, valuation_amount * qty)
partner_id = (move.picking_id.partner_id and self.pool.get('res.partner')._find_accounting_partner(move.picking_id.partner_id).id) or False
debit_line_vals = {
'name': move.name,
'product_id': move.product_id.id,
'quantity': qty,
'product_uom_id': move.product_id.uom_id.id,
'ref': move.picking_id and move.picking_id.name or False,
'date': move.date,
'partner_id': partner_id,
'debit': valuation_amount > 0 and valuation_amount or 0,
'credit': valuation_amount < 0 and -valuation_amount or 0,
'account_id': debit_account_id,
}
credit_line_vals = {
'name': move.name,
'product_id': move.product_id.id,
'quantity': qty,
'product_uom_id': move.product_id.uom_id.id,
'ref': move.picking_id and move.picking_id.name or False,
'date': move.date,
'partner_id': partner_id,
'credit': valuation_amount > 0 and valuation_amount or 0,
'debit': valuation_amount < 0 and -valuation_amount or 0,
'account_id': credit_account_id,
}
return [(0, 0, debit_line_vals), (0, 0, credit_line_vals)]
def _create_account_move_line(self, cr, uid, quants, move, credit_account_id, debit_account_id, journal_id, context=None):
#group quants by cost
quant_cost_qty = {}
for quant in quants:
if quant_cost_qty.get(quant.cost):
quant_cost_qty[quant.cost] += quant.qty
else:
quant_cost_qty[quant.cost] = quant.qty
move_obj = self.pool.get('account.move')
for cost, qty in quant_cost_qty.items():
move_lines = self._prepare_account_move_line(cr, uid, move, qty, cost, credit_account_id, debit_account_id, context=context)
period_id = context.get('force_period', self.pool.get('account.period').find(cr, uid, context=context)[0])
move_obj.create(cr, uid, {'journal_id': journal_id,
'line_id': move_lines,
'period_id': period_id,
'date': fields.date.context_today(self, cr, uid, context=context),
'ref': move.picking_id.name}, context=context)
#def _reconcile_single_negative_quant(self, cr, uid, to_solve_quant, quant, quant_neg, qty, context=None):
# move = self._get_latest_move(cr, uid, to_solve_quant, context=context)
# quant_neg_position = quant_neg.negative_dest_location_id.usage
# remaining_solving_quant, remaining_to_solve_quant = super(stock_quant, self)._reconcile_single_negative_quant(cr, uid, to_solve_quant, quant, quant_neg, qty, context=context)
# #update the standard price of the product, only if we would have done it if we'd have had enough stock at first, which means
# #1) there isn't any negative quant anymore
# #2) the product cost's method is 'real'
# #3) we just fixed a negative quant caused by an outgoing shipment
# if not remaining_to_solve_quant and move.product_id.cost_method == 'real' and quant_neg_position != 'internal':
# self.pool.get('stock.move')._store_average_cost_price(cr, uid, move, context=context)
# return remaining_solving_quant, remaining_to_solve_quant
class stock_move(osv.osv):
_inherit = "stock.move"
def action_done(self, cr, uid, ids, context=None):
self.product_price_update_before_done(cr, uid, ids, context=context)
res = super(stock_move, self).action_done(cr, uid, ids, context=context)
self.product_price_update_after_done(cr, uid, ids, context=context)
return res
def _store_average_cost_price(self, cr, uid, move, context=None):
''' move is a browe record '''
product_obj = self.pool.get('product.product')
if any([q.qty <= 0 for q in move.quant_ids]):
#if there is a negative quant, the standard price shouldn't be updated
return
#Note: here we can't store a quant.cost directly as we may have moved out 2 units (1 unit to 5€ and 1 unit to 7€) and in case of a product return of 1 unit, we can't know which of the 2 costs has to be used (5€ or 7€?). So at that time, thanks to the average valuation price we are storing we will svaluate it at 6€
average_valuation_price = 0.0
for q in move.quant_ids:
average_valuation_price += q.qty * q.cost
average_valuation_price = average_valuation_price / move.product_qty
# Write the standard price, as SUPERUSER_ID because a warehouse manager may not have the right to write on products
ctx = dict(context, force_company=move.company_id.id)
product_obj.write(cr, SUPERUSER_ID, [move.product_id.id], {'standard_price': average_valuation_price}, context=ctx)
self.write(cr, uid, [move.id], {'price_unit': average_valuation_price}, context=context)
def product_price_update_before_done(self, cr, uid, ids, context=None):
product_obj = self.pool.get('product.product')
tmpl_dict = {}
for move in self.browse(cr, uid, ids, context=context):
#adapt standard price on incomming moves if the product cost_method is 'average'
if (move.location_id.usage == 'supplier') and (move.product_id.cost_method == 'average'):
product = move.product_id
prod_tmpl_id = move.product_id.product_tmpl_id.id
qty_available = move.product_id.product_tmpl_id.qty_available
if tmpl_dict.get(prod_tmpl_id):
product_avail = qty_available + tmpl_dict[prod_tmpl_id]
else:
tmpl_dict[prod_tmpl_id] = 0
product_avail = qty_available
if product_avail <= 0:
new_std_price = move.price_unit
else:
# Get the standard price
amount_unit = product.standard_price
new_std_price = ((amount_unit * product_avail) + (move.price_unit * move.product_qty)) / (product_avail + move.product_qty)
tmpl_dict[prod_tmpl_id] += move.product_qty
# Write the standard price, as SUPERUSER_ID because a warehouse manager may not have the right to write on products
ctx = dict(context or {}, force_company=move.company_id.id)
product_obj.write(cr, SUPERUSER_ID, [product.id], {'standard_price': new_std_price}, context=ctx)
def product_price_update_after_done(self, cr, uid, ids, context=None):
'''
This method adapts the price on the product when necessary
'''
for move in self.browse(cr, uid, ids, context=context):
#adapt standard price on outgoing moves if the product cost_method is 'real', so that a return
#or an inventory loss is made using the last value used for an outgoing valuation.
if move.product_id.cost_method == 'real' and move.location_dest_id.usage != 'internal':
#store the average price of the move on the move and product form
self._store_average_cost_price(cr, uid, move, context=context)
| agpl-3.0 |
MpApQ/kernel_huawei | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
charbeljc/OCB | openerp/addons/test_inherit/tests/test_inherit.py | 118 | 3182 | # -*- coding: utf-8 -*-
from openerp.tests import common
class test_inherits(common.TransactionCase):
def test_00_inherits(self):
""" Check that a many2one field with delegate=True adds an entry in _inherits """
daughter = self.env['test.inherit.daughter']
self.assertEqual(daughter._inherits, {'test.inherit.mother': 'template_id'})
def test_10_access_from_child_to_parent_model(self):
""" check whether added field in model is accessible from children models (_inherits) """
# This test checks if the new added column of a parent model
# is accessible from the child model. This test has been written
# to verify the purpose of the inheritance computing of the class
# in the openerp.osv.orm._build_model.
mother = self.env['test.inherit.mother']
daughter = self.env['test.inherit.daughter']
self.assertIn('field_in_mother', mother._fields)
self.assertIn('field_in_mother', daughter._fields)
def test_20_field_extension(self):
""" check the extension of a field in an inherited model """
mother = self.env['test.inherit.mother']
daughter = self.env['test.inherit.daughter']
# the field mother.name must have required=True and "Bar" as default
field = mother._fields['name']
self.assertTrue(field.required)
self.assertEqual(field.default(mother), "Bar")
self.assertEqual(mother._defaults.get('name'), "Bar")
self.assertEqual(mother.default_get(['name']), {'name': "Bar"})
# the field daughter.name must have required=False and "Baz" as default
field = daughter._fields['name']
self.assertFalse(field.required)
self.assertEqual(field.default(mother), "Baz")
self.assertEqual(daughter._defaults.get('name'), "Baz")
self.assertEqual(daughter.default_get(['name']), {'name': "Baz"})
# the field daughter.template_id should have
# comodel_name='test.inherit.mother', string='Template', required=True
field = daughter._fields['template_id']
self.assertEqual(field.comodel_name, 'test.inherit.mother')
self.assertEqual(field.string, "Template")
self.assertTrue(field.required)
def test_30_depends_extension(self):
""" check that @depends on overridden compute methods extends dependencies """
mother = self.env['test.inherit.mother']
field = mother._fields['surname']
# the field dependencies are added
self.assertItemsEqual(field.depends, ['name', 'field_in_mother'])
def test_40_selection_extension(self):
""" check that attribute selection_add=... extends selection on fields. """
mother = self.env['test.inherit.mother']
# the extra values are added, both in the field and the column
self.assertEqual(mother._fields['state'].selection,
[('a', 'A'), ('b', 'B'), ('c', 'C'), ('d', 'D')])
self.assertEqual(mother._columns['state'].selection,
[('a', 'A'), ('b', 'B'), ('c', 'C'), ('d', 'D')])
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
gchaimovitz/CouchPotatoServer | libs/tornado/curl_httpclient.py | 64 | 20382 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Non-blocking HTTP client implementation using pycurl."""
from __future__ import absolute_import, division, print_function, with_statement
import collections
import functools
import logging
import pycurl
import threading
import time
from io import BytesIO
from tornado import httputil
from tornado import ioloop
from tornado import stack_context
from tornado.escape import utf8, native_str
from tornado.httpclient import HTTPResponse, HTTPError, AsyncHTTPClient, main
curl_log = logging.getLogger('tornado.curl_httpclient')
class CurlAsyncHTTPClient(AsyncHTTPClient):
def initialize(self, io_loop, max_clients=10, defaults=None):
super(CurlAsyncHTTPClient, self).initialize(io_loop, defaults=defaults)
self._multi = pycurl.CurlMulti()
self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout)
self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket)
self._curls = [self._curl_create() for i in range(max_clients)]
self._free_list = self._curls[:]
self._requests = collections.deque()
self._fds = {}
self._timeout = None
# libcurl has bugs that sometimes cause it to not report all
# relevant file descriptors and timeouts to TIMERFUNCTION/
# SOCKETFUNCTION. Mitigate the effects of such bugs by
# forcing a periodic scan of all active requests.
self._force_timeout_callback = ioloop.PeriodicCallback(
self._handle_force_timeout, 1000, io_loop=io_loop)
self._force_timeout_callback.start()
# Work around a bug in libcurl 7.29.0: Some fields in the curl
# multi object are initialized lazily, and its destructor will
# segfault if it is destroyed without having been used. Add
# and remove a dummy handle to make sure everything is
# initialized.
dummy_curl_handle = pycurl.Curl()
self._multi.add_handle(dummy_curl_handle)
self._multi.remove_handle(dummy_curl_handle)
def close(self):
self._force_timeout_callback.stop()
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
for curl in self._curls:
curl.close()
self._multi.close()
super(CurlAsyncHTTPClient, self).close()
def fetch_impl(self, request, callback):
self._requests.append((request, callback))
self._process_queue()
self._set_timeout(0)
def _handle_socket(self, event, fd, multi, data):
"""Called by libcurl when it wants to change the file descriptors
it cares about.
"""
event_map = {
pycurl.POLL_NONE: ioloop.IOLoop.NONE,
pycurl.POLL_IN: ioloop.IOLoop.READ,
pycurl.POLL_OUT: ioloop.IOLoop.WRITE,
pycurl.POLL_INOUT: ioloop.IOLoop.READ | ioloop.IOLoop.WRITE
}
if event == pycurl.POLL_REMOVE:
if fd in self._fds:
self.io_loop.remove_handler(fd)
del self._fds[fd]
else:
ioloop_event = event_map[event]
# libcurl sometimes closes a socket and then opens a new
# one using the same FD without giving us a POLL_NONE in
# between. This is a problem with the epoll IOLoop,
# because the kernel can tell when a socket is closed and
# removes it from the epoll automatically, causing future
# update_handler calls to fail. Since we can't tell when
# this has happened, always use remove and re-add
# instead of update.
if fd in self._fds:
self.io_loop.remove_handler(fd)
self.io_loop.add_handler(fd, self._handle_events,
ioloop_event)
self._fds[fd] = ioloop_event
def _set_timeout(self, msecs):
"""Called by libcurl to schedule a timeout."""
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = self.io_loop.add_timeout(
self.io_loop.time() + msecs / 1000.0, self._handle_timeout)
def _handle_events(self, fd, events):
"""Called by IOLoop when there is activity on one of our
file descriptors.
"""
action = 0
if events & ioloop.IOLoop.READ:
action |= pycurl.CSELECT_IN
if events & ioloop.IOLoop.WRITE:
action |= pycurl.CSELECT_OUT
while True:
try:
ret, num_handles = self._multi.socket_action(fd, action)
except pycurl.error as e:
ret = e.args[0]
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
self._finish_pending_requests()
def _handle_timeout(self):
"""Called by IOLoop when the requested timeout has passed."""
with stack_context.NullContext():
self._timeout = None
while True:
try:
ret, num_handles = self._multi.socket_action(
pycurl.SOCKET_TIMEOUT, 0)
except pycurl.error as e:
ret = e.args[0]
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
self._finish_pending_requests()
# In theory, we shouldn't have to do this because curl will
# call _set_timeout whenever the timeout changes. However,
# sometimes after _handle_timeout we will need to reschedule
# immediately even though nothing has changed from curl's
# perspective. This is because when socket_action is
# called with SOCKET_TIMEOUT, libcurl decides internally which
# timeouts need to be processed by using a monotonic clock
# (where available) while tornado uses python's time.time()
# to decide when timeouts have occurred. When those clocks
# disagree on elapsed time (as they will whenever there is an
# NTP adjustment), tornado might call _handle_timeout before
# libcurl is ready. After each timeout, resync the scheduled
# timeout with libcurl's current state.
new_timeout = self._multi.timeout()
if new_timeout >= 0:
self._set_timeout(new_timeout)
def _handle_force_timeout(self):
"""Called by IOLoop periodically to ask libcurl to process any
events it may have forgotten about.
"""
with stack_context.NullContext():
while True:
try:
ret, num_handles = self._multi.socket_all()
except pycurl.error as e:
ret = e.args[0]
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
self._finish_pending_requests()
def _finish_pending_requests(self):
"""Process any requests that were completed by the last
call to multi.socket_action.
"""
while True:
num_q, ok_list, err_list = self._multi.info_read()
for curl in ok_list:
self._finish(curl)
for curl, errnum, errmsg in err_list:
self._finish(curl, errnum, errmsg)
if num_q == 0:
break
self._process_queue()
def _process_queue(self):
with stack_context.NullContext():
while True:
started = 0
while self._free_list and self._requests:
started += 1
curl = self._free_list.pop()
(request, callback) = self._requests.popleft()
curl.info = {
"headers": httputil.HTTPHeaders(),
"buffer": BytesIO(),
"request": request,
"callback": callback,
"curl_start_time": time.time(),
}
self._curl_setup_request(curl, request, curl.info["buffer"],
curl.info["headers"])
self._multi.add_handle(curl)
if not started:
break
def _finish(self, curl, curl_error=None, curl_message=None):
info = curl.info
curl.info = None
self._multi.remove_handle(curl)
self._free_list.append(curl)
buffer = info["buffer"]
if curl_error:
error = CurlError(curl_error, curl_message)
code = error.code
effective_url = None
buffer.close()
buffer = None
else:
error = None
code = curl.getinfo(pycurl.HTTP_CODE)
effective_url = curl.getinfo(pycurl.EFFECTIVE_URL)
buffer.seek(0)
# the various curl timings are documented at
# http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html
time_info = dict(
queue=info["curl_start_time"] - info["request"].start_time,
namelookup=curl.getinfo(pycurl.NAMELOOKUP_TIME),
connect=curl.getinfo(pycurl.CONNECT_TIME),
pretransfer=curl.getinfo(pycurl.PRETRANSFER_TIME),
starttransfer=curl.getinfo(pycurl.STARTTRANSFER_TIME),
total=curl.getinfo(pycurl.TOTAL_TIME),
redirect=curl.getinfo(pycurl.REDIRECT_TIME),
)
try:
info["callback"](HTTPResponse(
request=info["request"], code=code, headers=info["headers"],
buffer=buffer, effective_url=effective_url, error=error,
reason=info['headers'].get("X-Http-Reason", None),
request_time=time.time() - info["curl_start_time"],
time_info=time_info))
except Exception:
self.handle_callback_exception(info["callback"])
def handle_callback_exception(self, callback):
self.io_loop.handle_callback_exception(callback)
def _curl_create(self):
curl = pycurl.Curl()
if curl_log.isEnabledFor(logging.DEBUG):
curl.setopt(pycurl.VERBOSE, 1)
curl.setopt(pycurl.DEBUGFUNCTION, self._curl_debug)
return curl
def _curl_setup_request(self, curl, request, buffer, headers):
curl.setopt(pycurl.URL, native_str(request.url))
# libcurl's magic "Expect: 100-continue" behavior causes delays
# with servers that don't support it (which include, among others,
# Google's OpenID endpoint). Additionally, this behavior has
# a bug in conjunction with the curl_multi_socket_action API
# (https://sourceforge.net/tracker/?func=detail&atid=100976&aid=3039744&group_id=976),
# which increases the delays. It's more trouble than it's worth,
# so just turn off the feature (yes, setting Expect: to an empty
# value is the official way to disable this)
if "Expect" not in request.headers:
request.headers["Expect"] = ""
# libcurl adds Pragma: no-cache by default; disable that too
if "Pragma" not in request.headers:
request.headers["Pragma"] = ""
curl.setopt(pycurl.HTTPHEADER,
["%s: %s" % (native_str(k), native_str(v))
for k, v in request.headers.get_all()])
curl.setopt(pycurl.HEADERFUNCTION,
functools.partial(self._curl_header_callback,
headers, request.header_callback))
if request.streaming_callback:
write_function = lambda chunk: self.io_loop.add_callback(
request.streaming_callback, chunk)
else:
write_function = buffer.write
if bytes is str: # py2
curl.setopt(pycurl.WRITEFUNCTION, write_function)
else: # py3
# Upstream pycurl doesn't support py3, but ubuntu 12.10 includes
# a fork/port. That version has a bug in which it passes unicode
# strings instead of bytes to the WRITEFUNCTION. This means that
# if you use a WRITEFUNCTION (which tornado always does), you cannot
# download arbitrary binary data. This needs to be fixed in the
# ported pycurl package, but in the meantime this lambda will
# make it work for downloading (utf8) text.
curl.setopt(pycurl.WRITEFUNCTION, lambda s: write_function(utf8(s)))
curl.setopt(pycurl.FOLLOWLOCATION, request.follow_redirects)
curl.setopt(pycurl.MAXREDIRS, request.max_redirects)
curl.setopt(pycurl.CONNECTTIMEOUT_MS, int(1000 * request.connect_timeout))
curl.setopt(pycurl.TIMEOUT_MS, int(1000 * request.request_timeout))
if request.user_agent:
curl.setopt(pycurl.USERAGENT, native_str(request.user_agent))
else:
curl.setopt(pycurl.USERAGENT, "Mozilla/5.0 (compatible; pycurl)")
if request.network_interface:
curl.setopt(pycurl.INTERFACE, request.network_interface)
if request.decompress_response:
curl.setopt(pycurl.ENCODING, "gzip,deflate")
else:
curl.setopt(pycurl.ENCODING, "none")
if request.proxy_host and request.proxy_port:
curl.setopt(pycurl.PROXY, request.proxy_host)
curl.setopt(pycurl.PROXYPORT, request.proxy_port)
if request.proxy_username:
credentials = '%s:%s' % (request.proxy_username,
request.proxy_password)
curl.setopt(pycurl.PROXYUSERPWD, credentials)
else:
curl.setopt(pycurl.PROXY, '')
curl.unsetopt(pycurl.PROXYUSERPWD)
if request.validate_cert:
curl.setopt(pycurl.SSL_VERIFYPEER, 1)
curl.setopt(pycurl.SSL_VERIFYHOST, 2)
else:
curl.setopt(pycurl.SSL_VERIFYPEER, 0)
curl.setopt(pycurl.SSL_VERIFYHOST, 0)
if request.ca_certs is not None:
curl.setopt(pycurl.CAINFO, request.ca_certs)
else:
# There is no way to restore pycurl.CAINFO to its default value
# (Using unsetopt makes it reject all certificates).
# I don't see any way to read the default value from python so it
# can be restored later. We'll have to just leave CAINFO untouched
# if no ca_certs file was specified, and require that if any
# request uses a custom ca_certs file, they all must.
pass
if request.allow_ipv6 is False:
# Curl behaves reasonably when DNS resolution gives an ipv6 address
# that we can't reach, so allow ipv6 unless the user asks to disable.
curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4)
else:
curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_WHATEVER)
# Set the request method through curl's irritating interface which makes
# up names for almost every single method
curl_options = {
"GET": pycurl.HTTPGET,
"POST": pycurl.POST,
"PUT": pycurl.UPLOAD,
"HEAD": pycurl.NOBODY,
}
custom_methods = set(["DELETE", "OPTIONS", "PATCH"])
for o in curl_options.values():
curl.setopt(o, False)
if request.method in curl_options:
curl.unsetopt(pycurl.CUSTOMREQUEST)
curl.setopt(curl_options[request.method], True)
elif request.allow_nonstandard_methods or request.method in custom_methods:
curl.setopt(pycurl.CUSTOMREQUEST, request.method)
else:
raise KeyError('unknown method ' + request.method)
# Handle curl's cryptic options for every individual HTTP method
if request.method == "GET":
if request.body is not None:
raise ValueError('Body must be None for GET request')
elif request.method in ("POST", "PUT") or request.body:
if request.body is None:
raise ValueError(
'Body must not be None for "%s" request'
% request.method)
request_buffer = BytesIO(utf8(request.body))
def ioctl(cmd):
if cmd == curl.IOCMD_RESTARTREAD:
request_buffer.seek(0)
curl.setopt(pycurl.READFUNCTION, request_buffer.read)
curl.setopt(pycurl.IOCTLFUNCTION, ioctl)
if request.method == "POST":
curl.setopt(pycurl.POSTFIELDSIZE, len(request.body))
else:
curl.setopt(pycurl.UPLOAD, True)
curl.setopt(pycurl.INFILESIZE, len(request.body))
if request.auth_username is not None:
userpwd = "%s:%s" % (request.auth_username, request.auth_password or '')
if request.auth_mode is None or request.auth_mode == "basic":
curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
elif request.auth_mode == "digest":
curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_DIGEST)
else:
raise ValueError("Unsupported auth_mode %s" % request.auth_mode)
curl.setopt(pycurl.USERPWD, native_str(userpwd))
curl_log.debug("%s %s (username: %r)", request.method, request.url,
request.auth_username)
else:
curl.unsetopt(pycurl.USERPWD)
curl_log.debug("%s %s", request.method, request.url)
if request.client_cert is not None:
curl.setopt(pycurl.SSLCERT, request.client_cert)
if request.client_key is not None:
curl.setopt(pycurl.SSLKEY, request.client_key)
if threading.activeCount() > 1:
# libcurl/pycurl is not thread-safe by default. When multiple threads
# are used, signals should be disabled. This has the side effect
# of disabling DNS timeouts in some environments (when libcurl is
# not linked against ares), so we don't do it when there is only one
# thread. Applications that use many short-lived threads may need
# to set NOSIGNAL manually in a prepare_curl_callback since
# there may not be any other threads running at the time we call
# threading.activeCount.
curl.setopt(pycurl.NOSIGNAL, 1)
if request.prepare_curl_callback is not None:
request.prepare_curl_callback(curl)
def _curl_header_callback(self, headers, header_callback, header_line):
header_line = native_str(header_line)
if header_callback is not None:
self.io_loop.add_callback(header_callback, header_line)
# header_line as returned by curl includes the end-of-line characters.
header_line = header_line.strip()
if header_line.startswith("HTTP/"):
headers.clear()
try:
(__, __, reason) = httputil.parse_response_start_line(header_line)
header_line = "X-Http-Reason: %s" % reason
except httputil.HTTPInputError:
return
if not header_line:
return
headers.parse_line(header_line)
def _curl_debug(self, debug_type, debug_msg):
debug_types = ('I', '<', '>', '<', '>')
if debug_type == 0:
curl_log.debug('%s', debug_msg.strip())
elif debug_type in (1, 2):
for line in debug_msg.splitlines():
curl_log.debug('%s %s', debug_types[debug_type], line)
elif debug_type == 4:
curl_log.debug('%s %r', debug_types[debug_type], debug_msg)
class CurlError(HTTPError):
def __init__(self, errno, message):
HTTPError.__init__(self, 599, message)
self.errno = errno
if __name__ == "__main__":
AsyncHTTPClient.configure(CurlAsyncHTTPClient)
main()
| gpl-3.0 |
Ryex/airtime | python_apps/media-monitor/mm2/media/monitor/handler.py | 10 | 2337 | # -*- coding: utf-8 -*-
from pydispatch import dispatcher
import abc
from log import Loggable
from ..saas.thread import getsig
import pure as mmp
# Defines the handle interface
class Handles(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def handle(self, sender, event, *args, **kwargs): pass
# TODO : Investigate whether weak reffing in dispatcher.connect could possibly
# cause a memory leak
class ReportHandler(Handles):
"""
A handler that can also report problem files when things go wrong
through the report_problem_file routine
"""
__metaclass__ = abc.ABCMeta
def __init__(self, signal, weak=False):
self.signal = getsig(signal)
self.report_signal = getsig("badfile")
def dummy(sender, event): self.handle(sender,event)
dispatcher.connect(dummy, signal=self.signal, sender=dispatcher.Any,
weak=weak)
def report_problem_file(self, event, exception=None):
dispatcher.send(signal=self.report_signal, sender=self, event=event,
exception=exception)
class ProblemFileHandler(Handles, Loggable):
"""
Responsible for answering to events passed through the 'badfile'
signal. Moves the problem file passed to the designated directory.
"""
def __init__(self, channel, **kwargs):
self.channel = channel
self.signal = getsig(self.channel.signal)
self.problem_dir = self.channel.path
def dummy(sender, event, exception):
self.handle(sender, event, exception)
dispatcher.connect(dummy, signal=self.signal, sender=dispatcher.Any,
weak=False)
mmp.create_dir( self.problem_dir )
self.logger.info("Initialized problem file handler. Problem dir: '%s'" %
self.problem_dir)
def handle(self, sender, event, exception=None):
# TODO : use the exception parameter for something
self.logger.info("Received problem file: '%s'. Supposed to move it to \
problem dir", event.path)
try: mmp.move_to_dir(dir_path=self.problem_dir, file_path=event.path)
except Exception as e:
self.logger.info("Could not move file: '%s' to problem dir: '%s'" %
(event.path, self.problem_dir))
self.logger.info("Exception: %s" % str(e))
| agpl-3.0 |
wwj718/edx-platform | lms/djangoapps/mobile_api/video_outlines/serializers.py | 46 | 8899 | """
Serializer for video outline
"""
from rest_framework.reverse import reverse
from xmodule.modulestore.mongo.base import BLOCK_TYPES_WITH_CHILDREN
from xmodule.modulestore.django import modulestore
from courseware.access import has_access
from courseware.courses import get_course_by_id
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor
from util.module_utils import get_dynamic_descriptor_children
from edxval.api import (
get_video_info_for_course_and_profiles, ValInternalError
)
class BlockOutline(object):
"""
Serializes course videos, pulling data from VAL and the video modules.
"""
def __init__(self, course_id, start_block, block_types, request, video_profiles):
"""Create a BlockOutline using `start_block` as a starting point."""
self.start_block = start_block
self.block_types = block_types
self.course_id = course_id
self.request = request # needed for making full URLS
self.local_cache = {}
try:
self.local_cache['course_videos'] = get_video_info_for_course_and_profiles(
unicode(course_id), video_profiles
)
except ValInternalError: # pragma: nocover
self.local_cache['course_videos'] = {}
def __iter__(self):
def parent_or_requested_block_type(usage_key):
"""
Returns whether the usage_key's block_type is one of self.block_types or a parent type.
"""
return (
usage_key.block_type in self.block_types or
usage_key.block_type in BLOCK_TYPES_WITH_CHILDREN
)
def create_module(descriptor):
"""
Factory method for creating and binding a module for the given descriptor.
"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
self.course_id, self.request.user, descriptor, depth=0,
)
course = get_course_by_id(self.course_id)
return get_module_for_descriptor(
self.request.user, self.request, descriptor, field_data_cache, self.course_id, course=course
)
with modulestore().bulk_operations(self.course_id):
child_to_parent = {}
stack = [self.start_block]
while stack:
curr_block = stack.pop()
if curr_block.hide_from_toc:
# For now, if the 'hide_from_toc' setting is set on the block, do not traverse down
# the hierarchy. The reason being is that these blocks may not have human-readable names
# to display on the mobile clients.
# Eventually, we'll need to figure out how we want these blocks to be displayed on the
# mobile clients. As they are still accessible in the browser, just not navigatable
# from the table-of-contents.
continue
if curr_block.location.block_type in self.block_types:
if not has_access(self.request.user, 'load', curr_block, course_key=self.course_id):
continue
summary_fn = self.block_types[curr_block.category]
block_path = list(path(curr_block, child_to_parent, self.start_block))
unit_url, section_url = find_urls(self.course_id, curr_block, child_to_parent, self.request)
yield {
"path": block_path,
"named_path": [b["name"] for b in block_path],
"unit_url": unit_url,
"section_url": section_url,
"summary": summary_fn(self.course_id, curr_block, self.request, self.local_cache)
}
if curr_block.has_children:
children = get_dynamic_descriptor_children(
curr_block,
self.request.user.id,
create_module,
usage_key_filter=parent_or_requested_block_type
)
for block in reversed(children):
stack.append(block)
child_to_parent[block] = curr_block
def path(block, child_to_parent, start_block):
"""path for block"""
block_path = []
while block in child_to_parent:
block = child_to_parent[block]
if block is not start_block:
block_path.append({
# to be consistent with other edx-platform clients, return the defaulted display name
'name': block.display_name_with_default_escaped,
'category': block.category,
'id': unicode(block.location)
})
return reversed(block_path)
def find_urls(course_id, block, child_to_parent, request):
"""
Find the section and unit urls for a block.
Returns:
unit_url, section_url:
unit_url (str): The url of a unit
section_url (str): The url of a section
"""
block_path = []
while block in child_to_parent:
block = child_to_parent[block]
block_path.append(block)
block_list = list(reversed(block_path))
block_count = len(block_list)
chapter_id = block_list[1].location.block_id if block_count > 1 else None
section = block_list[2] if block_count > 2 else None
position = None
if block_count > 3:
position = 1
for block in section.children:
if block.name == block_list[3].url_name:
break
position += 1
kwargs = {'course_id': unicode(course_id)}
if chapter_id is None:
course_url = reverse("courseware", kwargs=kwargs, request=request)
return course_url, course_url
kwargs['chapter'] = chapter_id
if section is None:
chapter_url = reverse("courseware_chapter", kwargs=kwargs, request=request)
return chapter_url, chapter_url
kwargs['section'] = section.url_name
section_url = reverse("courseware_section", kwargs=kwargs, request=request)
if position is None:
return section_url, section_url
kwargs['position'] = position
unit_url = reverse("courseware_position", kwargs=kwargs, request=request)
return unit_url, section_url
def video_summary(video_profiles, course_id, video_descriptor, request, local_cache):
"""
returns summary dict for the given video module
"""
always_available_data = {
"name": video_descriptor.display_name,
"category": video_descriptor.category,
"id": unicode(video_descriptor.scope_ids.usage_id),
"only_on_web": video_descriptor.only_on_web,
}
if video_descriptor.only_on_web:
ret = {
"video_url": None,
"video_thumbnail_url": None,
"duration": 0,
"size": 0,
"transcripts": {},
"language": None,
}
ret.update(always_available_data)
return ret
# Get encoded videos
video_data = local_cache['course_videos'].get(video_descriptor.edx_video_id, {})
# Get highest priority video to populate backwards compatible field
default_encoded_video = {}
if video_data:
for profile in video_profiles:
default_encoded_video = video_data['profiles'].get(profile, {})
if default_encoded_video:
break
if default_encoded_video:
video_url = default_encoded_video['url']
# Then fall back to VideoDescriptor fields for video URLs
elif video_descriptor.html5_sources:
video_url = video_descriptor.html5_sources[0]
else:
video_url = video_descriptor.source
# Get duration/size, else default
duration = video_data.get('duration', None)
size = default_encoded_video.get('file_size', 0)
# Transcripts...
transcripts_info = video_descriptor.get_transcripts_info()
transcript_langs = video_descriptor.available_translations(transcripts_info, verify_assets=False)
transcripts = {
lang: reverse(
'video-transcripts-detail',
kwargs={
'course_id': unicode(course_id),
'block_id': video_descriptor.scope_ids.usage_id.block_id,
'lang': lang
},
request=request,
)
for lang in transcript_langs
}
ret = {
"video_url": video_url,
"video_thumbnail_url": None,
"duration": duration,
"size": size,
"transcripts": transcripts,
"language": video_descriptor.get_default_transcript_language(transcripts_info),
"encoded_videos": video_data.get('profiles')
}
ret.update(always_available_data)
return ret
| agpl-3.0 |
FiveEye/ml-notebook | dlp/ch6_2_pretrained_embedding.py | 1 | 2633 | import os
imdb_dir = '/home/han/code/data/aclImdb'
train_dir = os.path.join(imdb_dir, 'train')
# Processing the labels of the raw IMDB data
labels = []
texts = []
for label_type in ['neg', 'pos']:
dir_name = os.path.join(train_dir, label_type)
for fname in os.listdir(dir_name):
if fname[-4:] == '.txt':
f = open(os.path.join(dir_name, fname))
texts.append(f.read())
f.close()
if label_type == 'neg':
labels.append(0)
else:
labels.append(1)
# Tokenizing the text of the raw IMDB data
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import numpy as np
maxlen = 100
training_samples = 10000
validation_samples = 10000
max_words = 10000
tokenizer = Tokenizer(num_words=max_words)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
data = pad_sequences(sequences, maxlen=maxlen)
labels = np.asarray(labels)
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
x_train = data[:training_samples]
y_train = labels[:training_samples]
x_val = data[training_samples : training_samples + validation_samples]
y_val = labels[training_samples : training_samples + validation_samples]
# Parsing the GloVe word-embedding file
glove_dir = '/home/han/code/models/glove.6B'
embeddings_index = {}
f = open(os.path.join(glove_dir, 'glove.6B.100d.txt'))
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
# preparing the glove matrix
embedding_dim = 100
embedding_matrix = np.zeros((max_words, embedding_dim))
for word, i in word_index.items():
if i < max_words:
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
else:
print("Not found ", word)
# build model
from keras.models import Sequential
from keras.layers import Embedding, Dense, Flatten, LSTM
model = Sequential()
model.add(Embedding(max_words, embedding_dim, input_length=maxlen))
model.add(LSTM(32))
#model.add(Flatten())
#model.add(Dense(32, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.layers[0].set_weights([embedding_matrix])
model.layers[0].trainable = False
print(model.summary())
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
history = model.fit(x_train, y_train, epochs=10, batch_size=32, validation_data=(x_val, y_val))
| mit |
mbernasocchi/inasafe | safe/common/parameters/resource_parameter.py | 8 | 2162 | # coding=utf-8
"""Resource Parameter."""
import os
import sys
PARAMETERS_DIR = os.path.abspath(
os.path.join(
os.path.dirname(__file__), '..', '..', '..', 'safe_extras',
'parameters'))
if PARAMETERS_DIR not in sys.path:
sys.path.append(PARAMETERS_DIR)
from parameters.float_parameter import FloatParameter # NOQA
from parameters.unit import Unit # NOQA
__copyright__ = "Copyright 2016, The InaSAFE Project"
__license__ = "GPL version 3"
__email__ = "info@inasafe.org"
__revision__ = '$Format:%H$'
class ResourceParameter(FloatParameter):
"""A parameter handling specifically the resources used in InaSAFE
minimum needs.
:param guid: The unique reference to use when addressing this value.
:type guid: str, None
"""
def __init__(self, guid=None):
super(ResourceParameter, self).__init__(guid)
self._frequency = ''
self._unit = Unit()
@property
def frequency(self):
"""The frequency that the resource needs to be supplied getter.
:returns: The frequency.
:rtype: str
"""
return self._frequency
@frequency.setter
def frequency(self, frequency):
"""Set the frequency that the resource needs to be supplied.
:param frequency: The frequency of the resource.
:type frequency: str
"""
self._frequency = frequency
def serialize(self):
"""Convert the parameter into a dictionary.
:return: The parameter dictionary.
:rtype: dict
"""
pickle = super(ResourceParameter, self).serialize()
pickle['frequency'] = self.frequency
pickle['unit'] = self._unit.serialize()
return pickle
# pylint: disable=W0221
@property
def unit(self):
"""Property for the unit for the parameter.
:returns: The unit of the parameter.
:rtype: Unit
"""
return self._unit
@unit.setter
def unit(self, unit):
"""Setter for unit for the parameter.
:param unit: Unit for parameter
:type unit: Unit
"""
self._unit = unit
# pylint: enable=W0221
| gpl-3.0 |
mano3m/CouchPotatoServer | libs/dateutil/tz.py | 217 | 32988 | """
Copyright (c) 2003-2007 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard Python
datetime module.
"""
__license__ = "Simplified BSD"
from six import string_types, PY3
import datetime
import struct
import time
import sys
import os
relativedelta = None
parser = None
rrule = None
__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange",
"tzstr", "tzical", "tzwin", "tzwinlocal", "gettz"]
try:
from dateutil.tzwin import tzwin, tzwinlocal
except (ImportError, OSError):
tzwin, tzwinlocal = None, None
def tzname_in_python2(myfunc):
"""Change unicode output into bytestrings in Python 2
tzname() API changed in Python 3. It used to return bytes, but was changed
to unicode strings
"""
def inner_func(*args, **kwargs):
if PY3:
return myfunc(*args, **kwargs)
else:
return myfunc(*args, **kwargs).encode()
return inner_func
ZERO = datetime.timedelta(0)
EPOCHORDINAL = datetime.datetime.utcfromtimestamp(0).toordinal()
class tzutc(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
def dst(self, dt):
return ZERO
@tzname_in_python2
def tzname(self, dt):
return "UTC"
def __eq__(self, other):
return (isinstance(other, tzutc) or
(isinstance(other, tzoffset) and other._offset == ZERO))
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
class tzoffset(datetime.tzinfo):
def __init__(self, name, offset):
self._name = name
self._offset = datetime.timedelta(seconds=offset)
def utcoffset(self, dt):
return self._offset
def dst(self, dt):
return ZERO
@tzname_in_python2
def tzname(self, dt):
return self._name
def __eq__(self, other):
return (isinstance(other, tzoffset) and
self._offset == other._offset)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__,
repr(self._name),
self._offset.days*86400+self._offset.seconds)
__reduce__ = object.__reduce__
class tzlocal(datetime.tzinfo):
_std_offset = datetime.timedelta(seconds=-time.timezone)
if time.daylight:
_dst_offset = datetime.timedelta(seconds=-time.altzone)
else:
_dst_offset = _std_offset
def utcoffset(self, dt):
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if self._isdst(dt):
return self._dst_offset-self._std_offset
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
# We can't use mktime here. It is unstable when deciding if
# the hour near to a change is DST or not.
#
# timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour,
# dt.minute, dt.second, dt.weekday(), 0, -1))
# return time.localtime(timestamp).tm_isdst
#
# The code above yields the following result:
#
#>>> import tz, datetime
#>>> t = tz.tzlocal()
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRDT'
#>>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname()
#'BRST'
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRST'
#>>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname()
#'BRDT'
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRDT'
#
# Here is a more stable implementation:
#
timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400
+ dt.hour * 3600
+ dt.minute * 60
+ dt.second)
return time.localtime(timestamp+time.timezone).tm_isdst
def __eq__(self, other):
if not isinstance(other, tzlocal):
return False
return (self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset)
return True
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
class _ttinfo(object):
__slots__ = ["offset", "delta", "isdst", "abbr", "isstd", "isgmt"]
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, None)
def __repr__(self):
l = []
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, repr(value)))
return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
def __eq__(self, other):
if not isinstance(other, _ttinfo):
return False
return (self.offset == other.offset and
self.delta == other.delta and
self.isdst == other.isdst and
self.abbr == other.abbr and
self.isstd == other.isstd and
self.isgmt == other.isgmt)
def __ne__(self, other):
return not self.__eq__(other)
def __getstate__(self):
state = {}
for name in self.__slots__:
state[name] = getattr(self, name, None)
return state
def __setstate__(self, state):
for name in self.__slots__:
if name in state:
setattr(self, name, state[name])
class tzfile(datetime.tzinfo):
# http://www.twinsun.com/tz/tz-link.htm
# ftp://ftp.iana.org/tz/tz*.tar.gz
def __init__(self, fileobj):
if isinstance(fileobj, string_types):
self._filename = fileobj
fileobj = open(fileobj, 'rb')
elif hasattr(fileobj, "name"):
self._filename = fileobj.name
else:
self._filename = repr(fileobj)
# From tzfile(5):
#
# The time zone information files used by tzset(3)
# begin with the magic characters "TZif" to identify
# them as time zone information files, followed by
# sixteen bytes reserved for future use, followed by
# six four-byte values of type long, written in a
# ``standard'' byte order (the high-order byte
# of the value is written first).
if fileobj.read(4).decode() != "TZif":
raise ValueError("magic not found")
fileobj.read(16)
(
# The number of UTC/local indicators stored in the file.
ttisgmtcnt,
# The number of standard/wall indicators stored in the file.
ttisstdcnt,
# The number of leap seconds for which data is
# stored in the file.
leapcnt,
# The number of "transition times" for which data
# is stored in the file.
timecnt,
# The number of "local time types" for which data
# is stored in the file (must not be zero).
typecnt,
# The number of characters of "time zone
# abbreviation strings" stored in the file.
charcnt,
) = struct.unpack(">6l", fileobj.read(24))
# The above header is followed by tzh_timecnt four-byte
# values of type long, sorted in ascending order.
# These values are written in ``standard'' byte order.
# Each is used as a transition time (as returned by
# time(2)) at which the rules for computing local time
# change.
if timecnt:
self._trans_list = struct.unpack(">%dl" % timecnt,
fileobj.read(timecnt*4))
else:
self._trans_list = []
# Next come tzh_timecnt one-byte values of type unsigned
# char; each one tells which of the different types of
# ``local time'' types described in the file is associated
# with the same-indexed transition time. These values
# serve as indices into an array of ttinfo structures that
# appears next in the file.
if timecnt:
self._trans_idx = struct.unpack(">%dB" % timecnt,
fileobj.read(timecnt))
else:
self._trans_idx = []
# Each ttinfo structure is written as a four-byte value
# for tt_gmtoff of type long, in a standard byte
# order, followed by a one-byte value for tt_isdst
# and a one-byte value for tt_abbrind. In each
# structure, tt_gmtoff gives the number of
# seconds to be added to UTC, tt_isdst tells whether
# tm_isdst should be set by localtime(3), and
# tt_abbrind serves as an index into the array of
# time zone abbreviation characters that follow the
# ttinfo structure(s) in the file.
ttinfo = []
for i in range(typecnt):
ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))
abbr = fileobj.read(charcnt).decode()
# Then there are tzh_leapcnt pairs of four-byte
# values, written in standard byte order; the
# first value of each pair gives the time (as
# returned by time(2)) at which a leap second
# occurs; the second gives the total number of
# leap seconds to be applied after the given time.
# The pairs of values are sorted in ascending order
# by time.
# Not used, for now
if leapcnt:
leap = struct.unpack(">%dl" % (leapcnt*2),
fileobj.read(leapcnt*8))
# Then there are tzh_ttisstdcnt standard/wall
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as standard
# time or wall clock time, and are used when
# a time zone file is used in handling POSIX-style
# time zone environment variables.
if ttisstdcnt:
isstd = struct.unpack(">%db" % ttisstdcnt,
fileobj.read(ttisstdcnt))
# Finally, there are tzh_ttisgmtcnt UTC/local
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as UTC or
# local time, and are used when a time zone file
# is used in handling POSIX-style time zone envi-
# ronment variables.
if ttisgmtcnt:
isgmt = struct.unpack(">%db" % ttisgmtcnt,
fileobj.read(ttisgmtcnt))
# ** Everything has been read **
# Build ttinfo list
self._ttinfo_list = []
for i in range(typecnt):
gmtoff, isdst, abbrind = ttinfo[i]
# Round to full-minutes if that's not the case. Python's
# datetime doesn't accept sub-minute timezones. Check
# http://python.org/sf/1447945 for some information.
gmtoff = (gmtoff+30)//60*60
tti = _ttinfo()
tti.offset = gmtoff
tti.delta = datetime.timedelta(seconds=gmtoff)
tti.isdst = isdst
tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
self._ttinfo_list.append(tti)
# Replace ttinfo indexes for ttinfo objects.
trans_idx = []
for idx in self._trans_idx:
trans_idx.append(self._ttinfo_list[idx])
self._trans_idx = tuple(trans_idx)
# Set standard, dst, and before ttinfos. before will be
# used when a given time is before any transitions,
# and will be set to the first non-dst ttinfo, or to
# the first dst, if all of them are dst.
self._ttinfo_std = None
self._ttinfo_dst = None
self._ttinfo_before = None
if self._ttinfo_list:
if not self._trans_list:
self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
else:
for i in range(timecnt-1, -1, -1):
tti = self._trans_idx[i]
if not self._ttinfo_std and not tti.isdst:
self._ttinfo_std = tti
elif not self._ttinfo_dst and tti.isdst:
self._ttinfo_dst = tti
if self._ttinfo_std and self._ttinfo_dst:
break
else:
if self._ttinfo_dst and not self._ttinfo_std:
self._ttinfo_std = self._ttinfo_dst
for tti in self._ttinfo_list:
if not tti.isdst:
self._ttinfo_before = tti
break
else:
self._ttinfo_before = self._ttinfo_list[0]
# Now fix transition times to become relative to wall time.
#
# I'm not sure about this. In my tests, the tz source file
# is setup to wall time, and in the binary file isstd and
# isgmt are off, so it should be in wall time. OTOH, it's
# always in gmt time. Let me know if you have comments
# about this.
laststdoffset = 0
self._trans_list = list(self._trans_list)
for i in range(len(self._trans_list)):
tti = self._trans_idx[i]
if not tti.isdst:
# This is std time.
self._trans_list[i] += tti.offset
laststdoffset = tti.offset
else:
# This is dst time. Convert to std.
self._trans_list[i] += laststdoffset
self._trans_list = tuple(self._trans_list)
def _find_ttinfo(self, dt, laststd=0):
timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400
+ dt.hour * 3600
+ dt.minute * 60
+ dt.second)
idx = 0
for trans in self._trans_list:
if timestamp < trans:
break
idx += 1
else:
return self._ttinfo_std
if idx == 0:
return self._ttinfo_before
if laststd:
while idx > 0:
tti = self._trans_idx[idx-1]
if not tti.isdst:
return tti
idx -= 1
else:
return self._ttinfo_std
else:
return self._trans_idx[idx-1]
def utcoffset(self, dt):
if not self._ttinfo_std:
return ZERO
return self._find_ttinfo(dt).delta
def dst(self, dt):
if not self._ttinfo_dst:
return ZERO
tti = self._find_ttinfo(dt)
if not tti.isdst:
return ZERO
# The documentation says that utcoffset()-dst() must
# be constant for every dt.
return tti.delta-self._find_ttinfo(dt, laststd=1).delta
# An alternative for that would be:
#
# return self._ttinfo_dst.offset-self._ttinfo_std.offset
#
# However, this class stores historical changes in the
# dst offset, so I belive that this wouldn't be the right
# way to implement this.
@tzname_in_python2
def tzname(self, dt):
if not self._ttinfo_std:
return None
return self._find_ttinfo(dt).abbr
def __eq__(self, other):
if not isinstance(other, tzfile):
return False
return (self._trans_list == other._trans_list and
self._trans_idx == other._trans_idx and
self._ttinfo_list == other._ttinfo_list)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._filename))
def __reduce__(self):
if not os.path.isfile(self._filename):
raise ValueError("Unpickable %s class" % self.__class__.__name__)
return (self.__class__, (self._filename,))
class tzrange(datetime.tzinfo):
def __init__(self, stdabbr, stdoffset=None,
dstabbr=None, dstoffset=None,
start=None, end=None):
global relativedelta
if not relativedelta:
from dateutil import relativedelta
self._std_abbr = stdabbr
self._dst_abbr = dstabbr
if stdoffset is not None:
self._std_offset = datetime.timedelta(seconds=stdoffset)
else:
self._std_offset = ZERO
if dstoffset is not None:
self._dst_offset = datetime.timedelta(seconds=dstoffset)
elif dstabbr and stdoffset is not None:
self._dst_offset = self._std_offset+datetime.timedelta(hours=+1)
else:
self._dst_offset = ZERO
if dstabbr and start is None:
self._start_delta = relativedelta.relativedelta(
hours=+2, month=4, day=1, weekday=relativedelta.SU(+1))
else:
self._start_delta = start
if dstabbr and end is None:
self._end_delta = relativedelta.relativedelta(
hours=+1, month=10, day=31, weekday=relativedelta.SU(-1))
else:
self._end_delta = end
def utcoffset(self, dt):
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if self._isdst(dt):
return self._dst_offset-self._std_offset
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
if self._isdst(dt):
return self._dst_abbr
else:
return self._std_abbr
def _isdst(self, dt):
if not self._start_delta:
return False
year = datetime.datetime(dt.year, 1, 1)
start = year+self._start_delta
end = year+self._end_delta
dt = dt.replace(tzinfo=None)
if start < end:
return dt >= start and dt < end
else:
return dt >= start or dt < end
def __eq__(self, other):
if not isinstance(other, tzrange):
return False
return (self._std_abbr == other._std_abbr and
self._dst_abbr == other._dst_abbr and
self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset and
self._start_delta == other._start_delta and
self._end_delta == other._end_delta)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(...)" % self.__class__.__name__
__reduce__ = object.__reduce__
class tzstr(tzrange):
def __init__(self, s):
global parser
if not parser:
from dateutil import parser
self._s = s
res = parser._parsetz(s)
if res is None:
raise ValueError("unknown string format")
# Here we break the compatibility with the TZ variable handling.
# GMT-3 actually *means* the timezone -3.
if res.stdabbr in ("GMT", "UTC"):
res.stdoffset *= -1
# We must initialize it first, since _delta() needs
# _std_offset and _dst_offset set. Use False in start/end
# to avoid building it two times.
tzrange.__init__(self, res.stdabbr, res.stdoffset,
res.dstabbr, res.dstoffset,
start=False, end=False)
if not res.dstabbr:
self._start_delta = None
self._end_delta = None
else:
self._start_delta = self._delta(res.start)
if self._start_delta:
self._end_delta = self._delta(res.end, isend=1)
def _delta(self, x, isend=0):
kwargs = {}
if x.month is not None:
kwargs["month"] = x.month
if x.weekday is not None:
kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week)
if x.week > 0:
kwargs["day"] = 1
else:
kwargs["day"] = 31
elif x.day:
kwargs["day"] = x.day
elif x.yday is not None:
kwargs["yearday"] = x.yday
elif x.jyday is not None:
kwargs["nlyearday"] = x.jyday
if not kwargs:
# Default is to start on first sunday of april, and end
# on last sunday of october.
if not isend:
kwargs["month"] = 4
kwargs["day"] = 1
kwargs["weekday"] = relativedelta.SU(+1)
else:
kwargs["month"] = 10
kwargs["day"] = 31
kwargs["weekday"] = relativedelta.SU(-1)
if x.time is not None:
kwargs["seconds"] = x.time
else:
# Default is 2AM.
kwargs["seconds"] = 7200
if isend:
# Convert to standard time, to follow the documented way
# of working with the extra hour. See the documentation
# of the tzinfo class.
delta = self._dst_offset-self._std_offset
kwargs["seconds"] -= delta.seconds+delta.days*86400
return relativedelta.relativedelta(**kwargs)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._s))
class _tzicalvtzcomp(object):
def __init__(self, tzoffsetfrom, tzoffsetto, isdst,
tzname=None, rrule=None):
self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom)
self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto)
self.tzoffsetdiff = self.tzoffsetto-self.tzoffsetfrom
self.isdst = isdst
self.tzname = tzname
self.rrule = rrule
class _tzicalvtz(datetime.tzinfo):
def __init__(self, tzid, comps=[]):
self._tzid = tzid
self._comps = comps
self._cachedate = []
self._cachecomp = []
def _find_comp(self, dt):
if len(self._comps) == 1:
return self._comps[0]
dt = dt.replace(tzinfo=None)
try:
return self._cachecomp[self._cachedate.index(dt)]
except ValueError:
pass
lastcomp = None
lastcompdt = None
for comp in self._comps:
if not comp.isdst:
# Handle the extra hour in DST -> STD
compdt = comp.rrule.before(dt-comp.tzoffsetdiff, inc=True)
else:
compdt = comp.rrule.before(dt, inc=True)
if compdt and (not lastcompdt or lastcompdt < compdt):
lastcompdt = compdt
lastcomp = comp
if not lastcomp:
# RFC says nothing about what to do when a given
# time is before the first onset date. We'll look for the
# first standard component, or the first component, if
# none is found.
for comp in self._comps:
if not comp.isdst:
lastcomp = comp
break
else:
lastcomp = comp[0]
self._cachedate.insert(0, dt)
self._cachecomp.insert(0, lastcomp)
if len(self._cachedate) > 10:
self._cachedate.pop()
self._cachecomp.pop()
return lastcomp
def utcoffset(self, dt):
return self._find_comp(dt).tzoffsetto
def dst(self, dt):
comp = self._find_comp(dt)
if comp.isdst:
return comp.tzoffsetdiff
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
return self._find_comp(dt).tzname
def __repr__(self):
return "<tzicalvtz %s>" % repr(self._tzid)
__reduce__ = object.__reduce__
class tzical(object):
def __init__(self, fileobj):
global rrule
if not rrule:
from dateutil import rrule
if isinstance(fileobj, string_types):
self._s = fileobj
fileobj = open(fileobj, 'r') # ical should be encoded in UTF-8 with CRLF
elif hasattr(fileobj, "name"):
self._s = fileobj.name
else:
self._s = repr(fileobj)
self._vtz = {}
self._parse_rfc(fileobj.read())
def keys(self):
return list(self._vtz.keys())
def get(self, tzid=None):
if tzid is None:
keys = list(self._vtz.keys())
if len(keys) == 0:
raise ValueError("no timezones defined")
elif len(keys) > 1:
raise ValueError("more than one timezone available")
tzid = keys[0]
return self._vtz.get(tzid)
def _parse_offset(self, s):
s = s.strip()
if not s:
raise ValueError("empty offset")
if s[0] in ('+', '-'):
signal = (-1, +1)[s[0]=='+']
s = s[1:]
else:
signal = +1
if len(s) == 4:
return (int(s[:2])*3600+int(s[2:])*60)*signal
elif len(s) == 6:
return (int(s[:2])*3600+int(s[2:4])*60+int(s[4:]))*signal
else:
raise ValueError("invalid offset: "+s)
def _parse_rfc(self, s):
lines = s.splitlines()
if not lines:
raise ValueError("empty string")
# Unfold
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
tzid = None
comps = []
invtz = False
comptype = None
for line in lines:
if not line:
continue
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError("empty property name")
name = parms[0].upper()
parms = parms[1:]
if invtz:
if name == "BEGIN":
if value in ("STANDARD", "DAYLIGHT"):
# Process component
pass
else:
raise ValueError("unknown component: "+value)
comptype = value
founddtstart = False
tzoffsetfrom = None
tzoffsetto = None
rrulelines = []
tzname = None
elif name == "END":
if value == "VTIMEZONE":
if comptype:
raise ValueError("component not closed: "+comptype)
if not tzid:
raise ValueError("mandatory TZID not found")
if not comps:
raise ValueError("at least one component is needed")
# Process vtimezone
self._vtz[tzid] = _tzicalvtz(tzid, comps)
invtz = False
elif value == comptype:
if not founddtstart:
raise ValueError("mandatory DTSTART not found")
if tzoffsetfrom is None:
raise ValueError("mandatory TZOFFSETFROM not found")
if tzoffsetto is None:
raise ValueError("mandatory TZOFFSETFROM not found")
# Process component
rr = None
if rrulelines:
rr = rrule.rrulestr("\n".join(rrulelines),
compatible=True,
ignoretz=True,
cache=True)
comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto,
(comptype == "DAYLIGHT"),
tzname, rr)
comps.append(comp)
comptype = None
else:
raise ValueError("invalid component end: "+value)
elif comptype:
if name == "DTSTART":
rrulelines.append(line)
founddtstart = True
elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"):
rrulelines.append(line)
elif name == "TZOFFSETFROM":
if parms:
raise ValueError("unsupported %s parm: %s "%(name, parms[0]))
tzoffsetfrom = self._parse_offset(value)
elif name == "TZOFFSETTO":
if parms:
raise ValueError("unsupported TZOFFSETTO parm: "+parms[0])
tzoffsetto = self._parse_offset(value)
elif name == "TZNAME":
if parms:
raise ValueError("unsupported TZNAME parm: "+parms[0])
tzname = value
elif name == "COMMENT":
pass
else:
raise ValueError("unsupported property: "+name)
else:
if name == "TZID":
if parms:
raise ValueError("unsupported TZID parm: "+parms[0])
tzid = value
elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"):
pass
else:
raise ValueError("unsupported property: "+name)
elif name == "BEGIN" and value == "VTIMEZONE":
tzid = None
comps = []
invtz = True
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._s))
if sys.platform != "win32":
TZFILES = ["/etc/localtime", "localtime"]
TZPATHS = ["/usr/share/zoneinfo", "/usr/lib/zoneinfo", "/etc/zoneinfo"]
else:
TZFILES = []
TZPATHS = []
def gettz(name=None):
tz = None
if not name:
try:
name = os.environ["TZ"]
except KeyError:
pass
if name is None or name == ":":
for filepath in TZFILES:
if not os.path.isabs(filepath):
filename = filepath
for path in TZPATHS:
filepath = os.path.join(path, filename)
if os.path.isfile(filepath):
break
else:
continue
if os.path.isfile(filepath):
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = tzlocal()
else:
if name.startswith(":"):
name = name[:-1]
if os.path.isabs(name):
if os.path.isfile(name):
tz = tzfile(name)
else:
tz = None
else:
for path in TZPATHS:
filepath = os.path.join(path, name)
if not os.path.isfile(filepath):
filepath = filepath.replace(' ', '_')
if not os.path.isfile(filepath):
continue
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = None
if tzwin:
try:
tz = tzwin(name)
except OSError:
pass
if not tz:
from dateutil.zoneinfo import gettz
tz = gettz(name)
if not tz:
for c in name:
# name must have at least one offset to be a tzstr
if c in "0123456789":
try:
tz = tzstr(name)
except ValueError:
pass
break
else:
if name in ("GMT", "UTC"):
tz = tzutc()
elif name in time.tzname:
tz = tzlocal()
return tz
# vim:ts=4:sw=4:et
| gpl-3.0 |
facebookexperimental/eden | eden/scm/edenscm/hgext/hggit/__init__.py | 2 | 16871 | # git.py - git server bridge
#
# Copyright 2008 Scott Chacon <schacon at gmail dot com>
# also some code (and help) borrowed from durin42
#
# This software may be used and distributed according to the terms
# of the GNU General Public License, incorporated herein by reference.
"""push and pull from a Git server
This extension lets you communicate (push and pull) with a Git server.
This way you can use Git hosting for your project or collaborate with a
project that is in Git. A bridger of worlds, this plugin be.
Try hg clone git:// or hg clone git+ssh://
For more information and instructions, see :hg:`help git`
"""
# global modules
import os
import shutil
import sys
import warnings
from bisect import insort
from bindings import nodemap as nodemapmod
from edenscm.mercurial import (
bundlerepo,
cmdutil,
demandimport,
discovery,
extensions,
help,
hg,
localrepo,
manifest,
phases,
pycompat,
revset,
scmutil,
templatekw,
ui as hgui,
util as hgutil,
)
from edenscm.mercurial.error import LookupError
from edenscm.mercurial.i18n import _
from edenscm.mercurial.node import hex, nullid
# local modules
from . import compat, gitrepo, hgrepo, overlay, util, verify
from .git_handler import GitHandler
# Disable DeprecationWarning from newer dulwich since hggit also supports older
# dulwich.
warnings.filterwarnings(r"ignore", r"", DeprecationWarning, r"edenscm.hgext.hggit")
warnings.filterwarnings(
r"ignore", r"", DeprecationWarning, r"edenscm.mercurial.pycompat"
)
try:
from edenscm.mercurial import exchange
exchange.push # existed in first iteration of this file
except (AttributeError, ImportError):
# We only *use* the exchange module in hg 3.2+, so this is safe
pass
try:
# pyre-fixme[21]: Could not find name `ignore` in `edenscm.mercurial`.
from edenscm.mercurial import ignore
# pyre-fixme[16]: Module `mercurial` has no attribute `ignore`.
ignore.readpats
ignoremod = True
except (AttributeError, ImportError):
# The ignore module disappeared in Mercurial 3.5
ignoremod = False
baseset = set
try:
baseset = revset.baseset
except AttributeError:
# baseset was added in hg 3.0
pass
demandimport.ignore.extend(["collections"])
__version__ = "0.8.10"
testedwith = (
"2.8.2 2.9.2 3.0.2 3.1.2 3.2.4 3.3.3 3.4.2 3.5.2 3.6.3 3.7.3"
"3.8.4 3.9.2 4.0.2 4.1.3 4.2.3 4.3.3 4.4"
)
buglink = "https://bitbucket.org/durin42/hg-git/issues"
cmdtable = {}
configtable = {}
try:
from edenscm.mercurial import registrar
command = registrar.command(cmdtable)
configitem = registrar.configitem(configtable)
compat.registerconfigs(configitem)
except (ImportError, AttributeError):
command = cmdutil.command(cmdtable)
# support for `hg clone git://github.com/defunkt/facebox.git`
# also hg clone git+ssh://git@github.com/schacon/simplegit.git
for _scheme in util.gitschemes:
hg.schemes[_scheme] = gitrepo
# support for `hg clone localgitrepo`
_oldlocal = hg.schemes["file"]
try:
urlcls = hgutil.url
except AttributeError:
class urlcls(object):
def __init__(self, path):
self.p = hgutil.drop_scheme("file", path)
def localpath(self):
return self.p
def _isgitdir(path):
"""True if the given file path is a git repo."""
if os.path.exists(os.path.join(path, ".hg")):
return False
if os.path.exists(os.path.join(path, ".git")):
# is full git repo
return True
if (
os.path.exists(os.path.join(path, "HEAD"))
and os.path.exists(os.path.join(path, "objects"))
and os.path.exists(os.path.join(path, "refs"))
):
# is bare git repo
return True
return False
def _local(path):
p = urlcls(path).localpath()
if _isgitdir(p):
return gitrepo
# detect git ssh urls (which mercurial thinks is a file-like path)
if util.isgitsshuri(p):
return gitrepo
return _oldlocal(path)
hg.schemes["file"] = _local
# we need to wrap this so that git-like ssh paths are not prepended with a
# local filesystem path. ugh.
def _url(orig, path, **kwargs):
# we'll test for 'git@' then use our heuristic method to determine if it's
# a git uri
if not (path.startswith(pycompat.ossep) and ":" in path):
return orig(path, **kwargs)
# the file path will be everything up until the last slash right before the
# ':'
lastsep = path.rindex(pycompat.ossep, None, path.index(":")) + 1
gituri = path[lastsep:]
if util.isgitsshuri(gituri):
return orig(gituri, **kwargs)
return orig(path, **kwargs)
extensions.wrapfunction(hgutil, "url", _url)
def _httpgitwrapper(orig):
# we should probably test the connection but for now, we just keep it
# simple and check for a url ending in '.git'
def httpgitscheme(uri):
if uri.endswith(".git"):
return gitrepo
# the http(s) scheme just returns the _peerlookup
return orig
return httpgitscheme
hg.schemes["https"] = _httpgitwrapper(hg.schemes["https"])
hg.schemes["http"] = _httpgitwrapper(hg.schemes["http"])
hgdefaultdest = hg.defaultdest
def defaultdest(source):
for scheme in util.gitschemes:
if source.startswith("%s://" % scheme) and source.endswith(".git"):
return hgdefaultdest(source[:-4])
if source.endswith(".git"):
return hgdefaultdest(source[:-4])
return hgdefaultdest(source)
hg.defaultdest = defaultdest
def getversion():
"""return version with dependencies for hg --version -v"""
import dulwich
dulver = ".".join(str(i) for i in dulwich.__version__)
return __version__ + (" (dulwich %s)" % dulver)
# defend against tracebacks if we specify -r in 'hg pull'
def safebranchrevs(orig, lrepo, repo, branches, revs):
revs, co = orig(lrepo, repo, branches, revs)
if (
isinstance(co, int)
and hgutil.safehasattr(lrepo, "changelog")
and co not in lrepo.changelog
):
co = None
return revs, co
if getattr(hg, "addbranchrevs", False):
extensions.wrapfunction(hg, "addbranchrevs", safebranchrevs)
def extsetup(ui):
templatekw.keywords.update({"gitnode": gitnodekw})
revset.symbols.update({"fromgit": revset_fromgit, "gitnode": revset_gitnode})
helpdir = os.path.join(os.path.dirname(__file__), "help")
entry = (
["git"],
_("Working with Git Repositories"),
# Mercurial >= 3.6: doc(ui)
lambda *args: open(os.path.join(helpdir, "git.rst")).read(),
)
insort(help.helptable, entry)
# Mercurial >= 3.2
extensions.wrapfunction(exchange, "pull", exchangepull)
# Mercurial >= 3.4
extensions.wrapfunction(manifest.manifestdict, "diff", overlay.wrapmanifestdictdiff)
def reposetup(ui, repo):
if not isinstance(repo, gitrepo.gitrepo):
klass = hgrepo.generate_repo_subclass(repo.__class__)
repo.__class__ = klass
@command("external-sync", [], _("REMOTE HEAD LIMIT"))
def externalsync(ui, repo, remote, head, limit):
limit = int(limit)
repo.ui.status(
_("importing up to %d commits from %s in %s\n") % (limit, remote, head)
)
with repo.wlock(), repo.lock():
refs = repo.githandler.fetch_pack(remote, [head])
refs = repo.githandler.filter_refs(refs, [head])
imported = repo.githandler.import_git_objects(refs, limit)
repo.ui.status(_("imported %s commits\n") % imported)
@command("gimport")
def gimport(ui, repo, remote_name=None):
"""import commits from Git to Mercurial"""
with repo.wlock(), repo.lock():
repo.githandler.import_commits(remote_name)
@command("gexport")
def gexport(ui, repo):
"""export commits from Mercurial to Git"""
repo.githandler.export_commits()
@command("gclear")
def gclear(ui, repo):
"""clear out the Git cached data
Strips all Git-related metadata from the repo, including the mapping
between Git and Mercurial changesets. This is an irreversible
destructive operation that may prevent further interaction with
other clones.
"""
repo.ui.status(_("clearing out the git cache data\n"))
repo.githandler.clear()
@command(
"gverify", [("r", "rev", "", _("revision to verify"), _("REV"))], _("[-r REV]")
)
def gverify(ui, repo, **opts):
"""verify that a Mercurial rev matches the corresponding Git rev
Given a Mercurial revision that has a corresponding Git revision in the map,
this attempts to answer whether that revision has the same contents as the
corresponding Git revision.
"""
ctx = scmutil.revsingle(repo, opts.get("rev"), ".")
return verify.verify(ui, repo, ctx)
@command("git-cleanup")
def git_cleanup(ui, repo):
"""clean up Git commit map after history editing"""
items = repo.githandler._map.items()
if ui.configbool("hggit", "indexedlognodemap", False):
dir = repo.sharedvfs.join(repo.githandler.map_file + "-log")
tempdir = dir + ".temp"
if os.path.exists(tempdir):
hgutil.removedirs(tempdir)
nodemap = nodemapmod.nodemap(tempdir)
for gitsha, hgsha in items:
if hgsha in repo:
nodemap.add(gitsha, hgsha)
nodemap.flush()
with repo.wlock():
tempdir2 = dir + ".temp2"
hgutil.rename(dir, tempdir2)
hgutil.rename(tempdir, dir)
shutil.rmtree(tempdir2)
new_map = []
for gitsha, hgsha in items:
if hgsha in repo:
new_map.append(pycompat.encodeutf8("%s %s\n" % (hex(gitsha), hex(hgsha))))
wlock = repo.wlock()
try:
f = repo.sharedvfs(GitHandler.map_file, "wb")
list(map(f.write, new_map))
finally:
wlock.release()
ui.status(_("git commit map cleaned\n"))
@command("git-updatemeta")
def gitupdatemeta(ui, repo):
"""Reads git hashes from the latest hg commits and adds them to the git-hg
mapping."""
with repo.wlock(), repo.lock():
stack = repo.heads()
githandler = repo.githandler
parents = repo.changelog.parents
clrev = repo.changelog.rev
clrevision = repo.changelog.changelogrevision
phase = repo._phasecache.phase
public = phases.public
seen = set(stack)
seen.add(nullid)
while stack:
node = stack.pop()
hgsha = hex(node)
gitsha = githandler.map_git_get(hgsha)
# If the gitsha is not already known, add it if we can
if gitsha is None:
gitsha = None
commitdata = clrevision(node)
if (
commitdata._rawextra is not None
and b"convert_revision" in commitdata._rawextra
):
gitsha = commitdata.extra.get("convert_revision")
# If there is no git sha, it may be a local commit. Just walk past
# it.
if gitsha:
githandler.map_set(gitsha, hgsha)
if gitsha or phase(repo, clrev(node)) != public:
for pnode in parents(node):
if pnode not in seen:
seen.add(pnode)
stack.append(pnode)
githandler.save_map(githandler.map_file)
def findcommonoutgoing(orig, repo, other, *args, **kwargs):
if isinstance(other, gitrepo.gitrepo):
heads = repo.githandler.get_refs(other.path)[0]
kw = {}
kw.update(kwargs)
for val, k in zip(args, ("onlyheads", "force", "commoninc", "portable")):
kw[k] = val
force = kw.get("force", False)
commoninc = kw.get("commoninc", None)
if commoninc is None:
commoninc = discovery.findcommonincoming(
repo, other, heads=heads, force=force
)
kw["commoninc"] = commoninc
return orig(repo, other, **kw)
return orig(repo, other, *args, **kwargs)
extensions.wrapfunction(discovery, "findcommonoutgoing", findcommonoutgoing)
def getremotechanges(orig, ui, repo, other, *args, **opts):
if isinstance(other, gitrepo.gitrepo):
if args:
revs = args[0]
else:
revs = opts.get("onlyheads", opts.get("revs"))
r, c, cleanup = repo.githandler.getremotechanges(other, revs)
# ugh. This is ugly even by mercurial API compatibility standards
if sys.version_info[0] >= 3:
if "onlyheads" not in orig.__code__.co_varnames:
cleanup = None
else:
if "onlyheads" not in orig.func_code.co_varnames:
cleanup = None
return r, c, cleanup
return orig(ui, repo, other, *args, **opts)
extensions.wrapfunction(bundlerepo, "getremotechanges", getremotechanges)
def peer(orig, uiorrepo, *args, **opts):
newpeer = orig(uiorrepo, *args, **opts)
if isinstance(newpeer, gitrepo.gitrepo):
if isinstance(uiorrepo, localrepo.localrepository):
newpeer.localrepo = uiorrepo
return newpeer
extensions.wrapfunction(hg, "peer", peer)
def isvalidlocalpath(orig, self, path):
return orig(self, path) or _isgitdir(path)
if hgutil.safehasattr(hgui, "path") and hgutil.safehasattr(
hgui.path, "_isvalidlocalpath"
):
extensions.wrapfunction(hgui.path, "_isvalidlocalpath", isvalidlocalpath)
@util.transform_notgit
def exchangepull(orig, repo, remote, heads=None, force=False, bookmarks=(), **kwargs):
if isinstance(remote, gitrepo.gitrepo):
# transaction manager is present in Mercurial >= 3.3
try:
trmanager = getattr(exchange, "transactionmanager")
except AttributeError:
trmanager = None
pullop = exchange.pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
if trmanager:
pullop.trmanager = trmanager(repo, "pull", remote.url())
wlock = repo.wlock()
lock = repo.lock()
try:
pullop.cgresult = repo.githandler.fetch(remote.path, heads)
if trmanager:
pullop.trmanager.close()
else:
pullop.closetransaction()
return pullop
finally:
if trmanager:
pullop.trmanager.release()
else:
pullop.releasetransaction()
lock.release()
wlock.release()
else:
return orig(repo, remote, heads, force, bookmarks=bookmarks, **kwargs)
@util.transform_notgit
def exchangepush(orig, repo, remote, force=False, revs=None, bookmarks=(), **kwargs):
if isinstance(remote, gitrepo.gitrepo):
# opargs is in Mercurial >= 3.6
opargs = kwargs.get("opargs")
if opargs is None:
opargs = {}
pushop = exchange.pushoperation(repo, remote, force, revs, bookmarks, **opargs)
pushop.cgresult = repo.githandler.push(remote.path, revs, force)
return pushop
else:
return orig(repo, remote, force, revs, bookmarks=bookmarks, **kwargs)
if not hgutil.safehasattr(localrepo.localrepository, "push"):
# Mercurial >= 3.2
extensions.wrapfunction(exchange, "push", exchangepush)
def revset_fromgit(repo, subset, x):
"""``fromgit()``
Select changesets that originate from Git.
"""
revset.getargs(x, 0, 0, "fromgit takes no arguments")
git = repo.githandler
node = repo.changelog.node
return baseset(
(r for r in subset if git.map_git_get(hex(node(r))) is not None), repo=repo
)
def revset_gitnode(repo, subset, x):
"""``gitnode(hash)``
Select the changeset that originates in the given Git revision. The hash
may be abbreviated: `gitnode(a5b)` selects the revision whose Git hash
starts with `a5b`. Aborts if multiple changesets match the abbreviation.
"""
args = revset.getargs(x, 1, 1, "gitnode takes one argument")
rev = revset.getstring(args[0], "the argument to gitnode() must be a hash")
git = repo.githandler
node = repo.changelog.node
def matches(r):
gitnode = git.map_git_get(hex(node(r)))
if gitnode is None:
return False
return gitnode.startswith(rev)
result = baseset((r for r in subset if matches(r)), repo=repo)
if 0 <= len(result) < 2:
return result
raise LookupError(rev, git.map_file, _("ambiguous identifier"))
def gitnodekw(**args):
""":gitnode: String. The Git changeset identification hash, as a 40 char
hexadecimal digit string."""
node = args["ctx"]
repo = args["repo"]
fromextra = node.extra().get("convert_revision", "")
if fromextra:
return fromextra
gitnode = repo.githandler.map_git_get(node.hex())
if gitnode is None:
gitnode = ""
return gitnode
| gpl-2.0 |
yongshengwang/hue | build/env/lib/python2.7/site-packages/Django-1.6.10-py2.7.egg/django/contrib/gis/gdal/driver.py | 221 | 2469 | # prerequisites imports
from ctypes import c_void_p
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import OGRException
from django.contrib.gis.gdal.prototypes import ds as capi
from django.utils import six
from django.utils.encoding import force_bytes
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_Dr_* routines are relevant here.
class Driver(GDALBase):
"Wraps an OGR Data Source Driver."
# Case-insensitive aliases for OGR Drivers.
_alias = {'esri' : 'ESRI Shapefile',
'shp' : 'ESRI Shapefile',
'shape' : 'ESRI Shapefile',
'tiger' : 'TIGER',
'tiger/line' : 'TIGER',
}
def __init__(self, dr_input):
"Initializes an OGR driver on either a string or integer input."
if isinstance(dr_input, six.string_types):
# If a string name of the driver was passed in
self._register()
# Checking the alias dictionary (case-insensitive) to see if an alias
# exists for the given driver.
if dr_input.lower() in self._alias:
name = self._alias[dr_input.lower()]
else:
name = dr_input
# Attempting to get the OGR driver by the string name.
dr = capi.get_driver_by_name(force_bytes(name))
elif isinstance(dr_input, int):
self._register()
dr = capi.get_driver(dr_input)
elif isinstance(dr_input, c_void_p):
dr = dr_input
else:
raise OGRException('Unrecognized input type for OGR Driver: %s' % str(type(dr_input)))
# Making sure we get a valid pointer to the OGR Driver
if not dr:
raise OGRException('Could not initialize OGR Driver on input: %s' % str(dr_input))
self.ptr = dr
def __str__(self):
"Returns the string name of the OGR Driver."
return capi.get_driver_name(self.ptr)
def _register(self):
"Attempts to register all the data source drivers."
# Only register all if the driver count is 0 (or else all drivers
# will be registered over and over again)
if not self.driver_count: capi.register_all()
# Driver properties
@property
def driver_count(self):
"Returns the number of OGR data source drivers registered."
return capi.get_driver_count()
| apache-2.0 |
sgiavasis/nipype | nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py | 6 | 1063 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ....testing import assert_equal
from ..utils import Apas2Aseg
def test_Apas2Aseg_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='--i %s',
mandatory=True,
),
out_file=dict(argstr='--o %s',
mandatory=True,
),
subjects_dir=dict(),
terminal_output=dict(nohash=True,
),
)
inputs = Apas2Aseg.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_Apas2Aseg_outputs():
output_map = dict(out_file=dict(argstr='%s',
),
)
outputs = Apas2Aseg.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
tavendo/AutobahnPython | autobahn/xbr/_eip712_channel_open.py | 3 | 5767 | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from ._eip712_base import sign, recover, is_address, is_signature, is_eth_privkey, is_bytes16, \
is_block_number, is_chain_id
def _create_eip712_channel_open(chainId: int, verifyingContract: bytes, ctype: int, openedAt: int,
marketId: bytes, channelId: bytes, actor: bytes, delegate: bytes,
marketmaker: bytes, recipient: bytes, amount: int) -> dict:
"""
:param chainId:
:param verifyingContract:
:param ctype:
:param openedAt:
:param marketId:
:param channelId:
:param actor:
:param delegate:
:param marketmaker:
:param recipient:
:param amount:
:return:
"""
assert is_chain_id(chainId)
assert is_address(verifyingContract)
assert type(ctype) == int
assert is_block_number(openedAt)
assert is_bytes16(marketId)
assert is_bytes16(channelId)
assert is_address(actor)
assert is_address(delegate)
assert is_address(marketmaker)
assert is_address(recipient)
assert type(amount) == int
data = {
'types': {
'EIP712Domain': [
{
'name': 'name',
'type': 'string'
},
{
'name': 'version',
'type': 'string'
},
],
'EIP712ChannelOpen': [{
'name': 'chainId',
'type': 'uint256'
}, {
'name': 'verifyingContract',
'type': 'address'
}, {
'name': 'ctype',
'type': 'uint8'
}, {
'name': 'openedAt',
'type': 'uint256'
}, {
'name': 'marketId',
'type': 'bytes16'
}, {
'name': 'channelId',
'type': 'bytes16'
}, {
'name': 'actor',
'type': 'address'
}, {
'name': 'delegate',
'type': 'address'
}, {
'name': 'marketmaker',
'type': 'address'
}, {
'name': 'recipient',
'type': 'address'
}, {
'name': 'amount',
'type': 'uint256'
}]
},
'primaryType': 'EIP712ChannelOpen',
'domain': {
'name': 'XBR',
'version': '1',
},
'message': {
'chainId': chainId,
'verifyingContract': verifyingContract,
'ctype': ctype,
'openedAt': openedAt,
'marketId': marketId,
'channelId': channelId,
'actor': actor,
'delegate': delegate,
'marketmaker': marketmaker,
'recipient': recipient,
'amount': amount
}
}
return data
def sign_eip712_channel_open(eth_privkey: bytes, chainId: int, verifyingContract: bytes, ctype: int,
openedAt: int, marketId: bytes, channelId: bytes, actor: bytes, delegate: bytes,
marketmaker: bytes, recipient: bytes, amount: int) -> bytes:
"""
:param eth_privkey: Ethereum address of buyer (a raw 20 bytes Ethereum address).
:type eth_privkey: bytes
:return: The signature according to EIP712 (32+32+1 raw bytes).
:rtype: bytes
"""
assert is_eth_privkey(eth_privkey)
data = _create_eip712_channel_open(chainId, verifyingContract, ctype, openedAt, marketId, channelId,
actor, delegate, marketmaker, recipient, amount)
return sign(eth_privkey, data)
def recover_eip712_channel_open(chainId: int, verifyingContract: bytes, ctype: int, openedAt: int,
marketId: bytes, channelId: bytes, actor: bytes, delegate: bytes,
marketmaker: bytes, recipient: bytes, amount: int, signature: bytes) -> bytes:
"""
Recover the signer address the given EIP712 signature was signed with.
:return: The (computed) signer address the signature was signed with.
:rtype: bytes
"""
assert is_signature(signature)
data = _create_eip712_channel_open(chainId, verifyingContract, ctype, openedAt, marketId, channelId,
actor, delegate, marketmaker, recipient, amount)
return recover(data, signature)
| mit |
varunnaganathan/django | tests/template_tests/test_library.py | 413 | 3753 | from django.template import Library
from django.template.base import Node
from django.test import TestCase
class FilterRegistrationTests(TestCase):
def setUp(self):
self.library = Library()
def test_filter(self):
@self.library.filter
def func():
return ''
self.assertEqual(self.library.filters['func'], func)
def test_filter_parens(self):
@self.library.filter()
def func():
return ''
self.assertEqual(self.library.filters['func'], func)
def test_filter_name_arg(self):
@self.library.filter('name')
def func():
return ''
self.assertEqual(self.library.filters['name'], func)
def test_filter_name_kwarg(self):
@self.library.filter(name='name')
def func():
return ''
self.assertEqual(self.library.filters['name'], func)
def test_filter_call(self):
def func():
return ''
self.library.filter('name', func)
self.assertEqual(self.library.filters['name'], func)
def test_filter_invalid(self):
msg = "Unsupported arguments to Library.filter: (None, '')"
with self.assertRaisesMessage(ValueError, msg):
self.library.filter(None, '')
class InclusionTagRegistrationTests(TestCase):
def setUp(self):
self.library = Library()
def test_inclusion_tag(self):
@self.library.inclusion_tag('template.html')
def func():
return ''
self.assertIn('func', self.library.tags)
def test_inclusion_tag_name(self):
@self.library.inclusion_tag('template.html', name='name')
def func():
return ''
self.assertIn('name', self.library.tags)
class SimpleTagRegistrationTests(TestCase):
def setUp(self):
self.library = Library()
def test_simple_tag(self):
@self.library.simple_tag
def func():
return ''
self.assertIn('func', self.library.tags)
def test_simple_tag_parens(self):
@self.library.simple_tag()
def func():
return ''
self.assertIn('func', self.library.tags)
def test_simple_tag_name_kwarg(self):
@self.library.simple_tag(name='name')
def func():
return ''
self.assertIn('name', self.library.tags)
def test_simple_tag_invalid(self):
msg = "Invalid arguments provided to simple_tag"
with self.assertRaisesMessage(ValueError, msg):
self.library.simple_tag('invalid')
class TagRegistrationTests(TestCase):
def setUp(self):
self.library = Library()
def test_tag(self):
@self.library.tag
def func(parser, token):
return Node()
self.assertEqual(self.library.tags['func'], func)
def test_tag_parens(self):
@self.library.tag()
def func(parser, token):
return Node()
self.assertEqual(self.library.tags['func'], func)
def test_tag_name_arg(self):
@self.library.tag('name')
def func(parser, token):
return Node()
self.assertEqual(self.library.tags['name'], func)
def test_tag_name_kwarg(self):
@self.library.tag(name='name')
def func(parser, token):
return Node()
self.assertEqual(self.library.tags['name'], func)
def test_tag_call(self):
def func(parser, token):
return Node()
self.library.tag('name', func)
self.assertEqual(self.library.tags['name'], func)
def test_tag_invalid(self):
msg = "Unsupported arguments to Library.tag: (None, '')"
with self.assertRaisesMessage(ValueError, msg):
self.library.tag(None, '')
| bsd-3-clause |
pikeBishop/OMP_gpxReport | geotiler/tests/cache/test_redis.py | 1 | 3155 | #
# GeoTiler - library to create maps using tiles from a map provider
#
# Copyright (C) 2014 by Artur Wroblewski <wrobell@pld-linux.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file incorporates work covered by the following copyright and
# permission notice (restored, based on setup.py file from
# https://github.com/stamen/modestmaps-py):
#
# Copyright (C) 2007-2013 by Michal Migurski and other contributors
# License: BSD
#
"""
Redis cache unit tests.
"""
from geotiler.cache.redis import RedisCache
import unittest
from unittest import mock
class RedisCacheTestCase(unittest.TestCase):
"""
Redis cache unit tests.
"""
def test_wrapper(self):
"""
Test Redis cache wrapper
"""
client = mock.MagicMock()
downloader = mock.MagicMock()
f = lambda host, path, query: True
cache = RedisCache(client, downloader)
fc = cache(f)
self.assertEqual(f, fc.__wrapped__)
def test_updating_cache(self):
"""
Test Redis cache update
Check that valid paramaters are passed to underlying function and
that cache got updated
"""
client = mock.MagicMock()
downloader = mock.MagicMock()
data = mock.MagicMock()
downloader.f.return_value = data
cache = RedisCache(client, downloader)
fc = cache(downloader.f) # function f with cachinig capability
client.exists.return_value = False
value = fc('host', 'path', 'query')
self.assertEqual(data, value)
downloader.f.assert_called_once_with(
downloader, 'host', 'path', 'query'
)
client.setex.assert_called_once_with(
('host', 'path', 'query'),
data,
cache.timeout
)
def test_cache_use(self):
"""
Test Redis cache use
Verify that value is fetched from Redis cache on cache hit
"""
client = mock.MagicMock()
data = mock.MagicMock() # data returned from cache
downloader = mock.MagicMock()
cache = RedisCache(client, downloader)
fc = cache(downloader.f) # function f with cachinig capability
client.exists.return_value = True # cache hit
client.get.return_value = data # return data from cache
value = fc('host', 'path', 'query')
self.assertEqual(data, value)
self.assertFalse(downloader.f.called)
client.get.assert_called_once_with(('host', 'path', 'query'))
# vim: sw=4:et:ai
| gpl-2.0 |
fanjunwei/depot_tools | third_party/logilab/common/ureports/docbook_writer.py | 93 | 5706 | # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""HTML formatting drivers for ureports"""
__docformat__ = "restructuredtext en"
from six.moves import range
from logilab.common.ureports import HTMLWriter
class DocbookWriter(HTMLWriter):
"""format layouts as HTML"""
def begin_format(self, layout):
"""begin to format a layout"""
super(HTMLWriter, self).begin_format(layout)
if self.snippet is None:
self.writeln('<?xml version="1.0" encoding="ISO-8859-1"?>')
self.writeln("""
<book xmlns:xi='http://www.w3.org/2001/XInclude'
lang='fr'>
""")
def end_format(self, layout):
"""finished to format a layout"""
if self.snippet is None:
self.writeln('</book>')
def visit_section(self, layout):
"""display a section (using <chapter> (level 0) or <section>)"""
if self.section == 0:
tag = "chapter"
else:
tag = "section"
self.section += 1
self.writeln(self._indent('<%s%s>' % (tag, self.handle_attrs(layout))))
self.format_children(layout)
self.writeln(self._indent('</%s>'% tag))
self.section -= 1
def visit_title(self, layout):
"""display a title using <title>"""
self.write(self._indent(' <title%s>' % self.handle_attrs(layout)))
self.format_children(layout)
self.writeln('</title>')
def visit_table(self, layout):
"""display a table as html"""
self.writeln(self._indent(' <table%s><title>%s</title>' \
% (self.handle_attrs(layout), layout.title)))
self.writeln(self._indent(' <tgroup cols="%s">'% layout.cols))
for i in range(layout.cols):
self.writeln(self._indent(' <colspec colname="c%s" colwidth="1*"/>' % i))
table_content = self.get_table_content(layout)
# write headers
if layout.cheaders:
self.writeln(self._indent(' <thead>'))
self._write_row(table_content[0])
self.writeln(self._indent(' </thead>'))
table_content = table_content[1:]
elif layout.rcheaders:
self.writeln(self._indent(' <thead>'))
self._write_row(table_content[-1])
self.writeln(self._indent(' </thead>'))
table_content = table_content[:-1]
# write body
self.writeln(self._indent(' <tbody>'))
for i in range(len(table_content)):
row = table_content[i]
self.writeln(self._indent(' <row>'))
for j in range(len(row)):
cell = row[j] or ' '
self.writeln(self._indent(' <entry>%s</entry>' % cell))
self.writeln(self._indent(' </row>'))
self.writeln(self._indent(' </tbody>'))
self.writeln(self._indent(' </tgroup>'))
self.writeln(self._indent(' </table>'))
def _write_row(self, row):
"""write content of row (using <row> <entry>)"""
self.writeln(' <row>')
for j in range(len(row)):
cell = row[j] or ' '
self.writeln(' <entry>%s</entry>' % cell)
self.writeln(self._indent(' </row>'))
def visit_list(self, layout):
"""display a list (using <itemizedlist>)"""
self.writeln(self._indent(' <itemizedlist%s>' % self.handle_attrs(layout)))
for row in list(self.compute_content(layout)):
self.writeln(' <listitem><para>%s</para></listitem>' % row)
self.writeln(self._indent(' </itemizedlist>'))
def visit_paragraph(self, layout):
"""display links (using <para>)"""
self.write(self._indent(' <para>'))
self.format_children(layout)
self.writeln('</para>')
def visit_span(self, layout):
"""display links (using <p>)"""
#TODO: translate in docbook
self.write('<literal %s>' % self.handle_attrs(layout))
self.format_children(layout)
self.write('</literal>')
def visit_link(self, layout):
"""display links (using <ulink>)"""
self.write('<ulink url="%s"%s>%s</ulink>' % (layout.url,
self.handle_attrs(layout),
layout.label))
def visit_verbatimtext(self, layout):
"""display verbatim text (using <programlisting>)"""
self.writeln(self._indent(' <programlisting>'))
self.write(layout.data.replace('&', '&').replace('<', '<'))
self.writeln(self._indent(' </programlisting>'))
def visit_text(self, layout):
"""add some text"""
self.write(layout.data.replace('&', '&').replace('<', '<'))
def _indent(self, string):
"""correctly indent string according to section"""
return ' ' * 2*(self.section) + string
| bsd-3-clause |
pandegroup/vs-utils | vs_utils/utils/tests/test_image_utils.py | 3 | 2212 | """
Tests for image utilities.
"""
import numpy as np
from PIL import Image
import tempfile
import unittest
from vs_utils.utils import image_utils
class TestImageUtils(unittest.TestCase):
"""
Test image_utils.
"""
def setUp(self):
"""
Set up tests.
"""
pixels = np.random.randint(255, size=(5, 5, 3))
self.pixels = np.asarray(pixels, dtype='uint8')
def test_get_pixels(self):
"""
Read pixels from image.
"""
im = Image.fromarray(self.pixels, mode='RGB')
assert np.array_equal(self.pixels, image_utils.get_pixels(im))
def test_load_file(self):
"""
Load an image from a file.
"""
im = Image.fromarray(self.pixels, mode='RGB')
_, filename = tempfile.mkstemp(suffix='.png')
im.save(filename)
im = image_utils.load(filename)
assert np.array_equal(self.pixels, image_utils.get_pixels(im))
def test_load_string(self):
"""
Load an image from binary string.
"""
im = Image.fromarray(self.pixels, mode='RGB')
_, filename = tempfile.mkstemp(suffix='.png')
im.save(filename)
with open(filename) as f:
string = f.read()
im = image_utils.load(string)
assert np.array_equal(self.pixels, image_utils.get_pixels(im))
def test_downscale(self):
"""
Downscale image while maintaining aspect ratio.
"""
pixels = np.random.randint(255, size=(20, 16, 3))
pixels = np.asarray(pixels, dtype='uint8')
im = Image.fromarray(pixels, mode='RGB')
im = image_utils.downscale(im, 10)
assert im.size == (8, 10)
def test_pad(self):
"""Pad an image."""
im = Image.fromarray(self.pixels, mode='RGB')
im = image_utils.pad(im, (7, 8))
assert im.size == (8, 7)
def test_pad_fail(self):
"""
Attempt to pad an image with desired size smaller than original size.
"""
im = Image.fromarray(self.pixels, mode='RGB')
try:
image_utils.pad(im, (4, 3))
except AssertionError:
return True
raise AssertionError
| gpl-3.0 |
seanfisk/buzzword-bingo-server | djangorestframework/status.py | 1 | 1455 | """
Descriptive HTTP status codes, for code readability.
See RFC 2616 - Sec 10: http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
Also see django.core.handlers.wsgi.STATUS_CODE_TEXT
"""
HTTP_100_CONTINUE = 100
HTTP_101_SWITCHING_PROTOCOLS = 101
HTTP_200_OK = 200
HTTP_201_CREATED = 201
HTTP_202_ACCEPTED = 202
HTTP_203_NON_AUTHORITATIVE_INFORMATION = 203
HTTP_204_NO_CONTENT = 204
HTTP_205_RESET_CONTENT = 205
HTTP_206_PARTIAL_CONTENT = 206
HTTP_300_MULTIPLE_CHOICES = 300
HTTP_301_MOVED_PERMANENTLY = 301
HTTP_302_FOUND = 302
HTTP_303_SEE_OTHER = 303
HTTP_304_NOT_MODIFIED = 304
HTTP_305_USE_PROXY = 305
HTTP_306_RESERVED = 306
HTTP_307_TEMPORARY_REDIRECT = 307
HTTP_400_BAD_REQUEST = 400
HTTP_401_UNAUTHORIZED = 401
HTTP_402_PAYMENT_REQUIRED = 402
HTTP_403_FORBIDDEN = 403
HTTP_404_NOT_FOUND = 404
HTTP_405_METHOD_NOT_ALLOWED = 405
HTTP_406_NOT_ACCEPTABLE = 406
HTTP_407_PROXY_AUTHENTICATION_REQUIRED = 407
HTTP_408_REQUEST_TIMEOUT = 408
HTTP_409_CONFLICT = 409
HTTP_410_GONE = 410
HTTP_411_LENGTH_REQUIRED = 411
HTTP_412_PRECONDITION_FAILED = 412
HTTP_413_REQUEST_ENTITY_TOO_LARGE = 413
HTTP_414_REQUEST_URI_TOO_LONG = 414
HTTP_415_UNSUPPORTED_MEDIA_TYPE = 415
HTTP_416_REQUESTED_RANGE_NOT_SATISFIABLE = 416
HTTP_417_EXPECTATION_FAILED = 417
HTTP_500_INTERNAL_SERVER_ERROR = 500
HTTP_501_NOT_IMPLEMENTED = 501
HTTP_502_BAD_GATEWAY = 502
HTTP_503_SERVICE_UNAVAILABLE = 503
HTTP_504_GATEWAY_TIMEOUT = 504
HTTP_505_HTTP_VERSION_NOT_SUPPORTED = 505
| bsd-3-clause |
captiosus/treadmill | treadmill/infra/utils/cli_callbacks.py | 2 | 1663 | from treadmill.infra import vpc
import click
import re
import pkg_resources
_IPA_PASSWORD_RE = re.compile('.{8,}')
def convert_to_vpc_id(ctx, param, value):
"""Returns VPC ID from Name"""
if not value:
return value
return vpc.VPC.get_id_from_name(value)
def validate_vpc_name(ctx, param, value):
_vpc_id = vpc.VPC.get_id_from_name(value)
if _vpc_id:
raise click.BadParameter(
'VPC %s already exists with name: %s' %
(_vpc_id, value)
)
else:
return value
def validate_ipa_password(ctx, param, value):
"""IPA admin password valdiation"""
value = value or click.prompt(
'IPA admin password ', hide_input=True, confirmation_prompt=True
)
if not _IPA_PASSWORD_RE.match(value):
raise click.BadParameter(
'Password must be greater than 8 characters.'
)
return value
def validate_domain(ctx, param, value):
"""Cloud domain validation"""
if value.count(".") != 1:
raise click.BadParameter('Valid domain like example.com')
return value
def ipa_password_prompt(ctx, param, value):
"""IPA admin password prompt"""
return value or click.prompt('IPA admin password ', hide_input=True)
def current_release_version(ctx, param, value):
"""Treadmill current release version"""
version = None
try:
version = pkg_resources.resource_string(
'treadmill',
'VERSION.txt'
)
except Exception:
pass
if version:
return version.decode('utf-8').strip()
else:
raise click.BadParameter('No version specified in VERSION.txt')
| apache-2.0 |
charris/numpy | numpy/lib/tests/test_mixins.py | 17 | 7030 | import numbers
import operator
import numpy as np
from numpy.testing import assert_, assert_equal, assert_raises
# NOTE: This class should be kept as an exact copy of the example from the
# docstring for NDArrayOperatorsMixin.
class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin):
def __init__(self, value):
self.value = np.asarray(value)
# One might also consider adding the built-in list type to this
# list, to support operations like np.add(array_like, list)
_HANDLED_TYPES = (np.ndarray, numbers.Number)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
out = kwargs.get('out', ())
for x in inputs + out:
# Only support operations with instances of _HANDLED_TYPES.
# Use ArrayLike instead of type(self) for isinstance to
# allow subclasses that don't override __array_ufunc__ to
# handle ArrayLike objects.
if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)):
return NotImplemented
# Defer to the implementation of the ufunc on unwrapped values.
inputs = tuple(x.value if isinstance(x, ArrayLike) else x
for x in inputs)
if out:
kwargs['out'] = tuple(
x.value if isinstance(x, ArrayLike) else x
for x in out)
result = getattr(ufunc, method)(*inputs, **kwargs)
if type(result) is tuple:
# multiple return values
return tuple(type(self)(x) for x in result)
elif method == 'at':
# no return value
return None
else:
# one return value
return type(self)(result)
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self.value)
def wrap_array_like(result):
if type(result) is tuple:
return tuple(ArrayLike(r) for r in result)
else:
return ArrayLike(result)
def _assert_equal_type_and_value(result, expected, err_msg=None):
assert_equal(type(result), type(expected), err_msg=err_msg)
if isinstance(result, tuple):
assert_equal(len(result), len(expected), err_msg=err_msg)
for result_item, expected_item in zip(result, expected):
_assert_equal_type_and_value(result_item, expected_item, err_msg)
else:
assert_equal(result.value, expected.value, err_msg=err_msg)
assert_equal(getattr(result.value, 'dtype', None),
getattr(expected.value, 'dtype', None), err_msg=err_msg)
_ALL_BINARY_OPERATORS = [
operator.lt,
operator.le,
operator.eq,
operator.ne,
operator.gt,
operator.ge,
operator.add,
operator.sub,
operator.mul,
operator.truediv,
operator.floordiv,
operator.mod,
divmod,
pow,
operator.lshift,
operator.rshift,
operator.and_,
operator.xor,
operator.or_,
]
class TestNDArrayOperatorsMixin:
def test_array_like_add(self):
def check(result):
_assert_equal_type_and_value(result, ArrayLike(0))
check(ArrayLike(0) + 0)
check(0 + ArrayLike(0))
check(ArrayLike(0) + np.array(0))
check(np.array(0) + ArrayLike(0))
check(ArrayLike(np.array(0)) + 0)
check(0 + ArrayLike(np.array(0)))
check(ArrayLike(np.array(0)) + np.array(0))
check(np.array(0) + ArrayLike(np.array(0)))
def test_inplace(self):
array_like = ArrayLike(np.array([0]))
array_like += 1
_assert_equal_type_and_value(array_like, ArrayLike(np.array([1])))
array = np.array([0])
array += ArrayLike(1)
_assert_equal_type_and_value(array, ArrayLike(np.array([1])))
def test_opt_out(self):
class OptOut:
"""Object that opts out of __array_ufunc__."""
__array_ufunc__ = None
def __add__(self, other):
return self
def __radd__(self, other):
return self
array_like = ArrayLike(1)
opt_out = OptOut()
# supported operations
assert_(array_like + opt_out is opt_out)
assert_(opt_out + array_like is opt_out)
# not supported
with assert_raises(TypeError):
# don't use the Python default, array_like = array_like + opt_out
array_like += opt_out
with assert_raises(TypeError):
array_like - opt_out
with assert_raises(TypeError):
opt_out - array_like
def test_subclass(self):
class SubArrayLike(ArrayLike):
"""Should take precedence over ArrayLike."""
x = ArrayLike(0)
y = SubArrayLike(1)
_assert_equal_type_and_value(x + y, y)
_assert_equal_type_and_value(y + x, y)
def test_object(self):
x = ArrayLike(0)
obj = object()
with assert_raises(TypeError):
x + obj
with assert_raises(TypeError):
obj + x
with assert_raises(TypeError):
x += obj
def test_unary_methods(self):
array = np.array([-1, 0, 1, 2])
array_like = ArrayLike(array)
for op in [operator.neg,
operator.pos,
abs,
operator.invert]:
_assert_equal_type_and_value(op(array_like), ArrayLike(op(array)))
def test_forward_binary_methods(self):
array = np.array([-1, 0, 1, 2])
array_like = ArrayLike(array)
for op in _ALL_BINARY_OPERATORS:
expected = wrap_array_like(op(array, 1))
actual = op(array_like, 1)
err_msg = 'failed for operator {}'.format(op)
_assert_equal_type_and_value(expected, actual, err_msg=err_msg)
def test_reflected_binary_methods(self):
for op in _ALL_BINARY_OPERATORS:
expected = wrap_array_like(op(2, 1))
actual = op(2, ArrayLike(1))
err_msg = 'failed for operator {}'.format(op)
_assert_equal_type_and_value(expected, actual, err_msg=err_msg)
def test_matmul(self):
array = np.array([1, 2], dtype=np.float64)
array_like = ArrayLike(array)
expected = ArrayLike(np.float64(5))
_assert_equal_type_and_value(expected, np.matmul(array_like, array))
_assert_equal_type_and_value(
expected, operator.matmul(array_like, array))
_assert_equal_type_and_value(
expected, operator.matmul(array, array_like))
def test_ufunc_at(self):
array = ArrayLike(np.array([1, 2, 3, 4]))
assert_(np.negative.at(array, np.array([0, 1])) is None)
_assert_equal_type_and_value(array, ArrayLike([-1, -2, 3, 4]))
def test_ufunc_two_outputs(self):
mantissa, exponent = np.frexp(2 ** -3)
expected = (ArrayLike(mantissa), ArrayLike(exponent))
_assert_equal_type_and_value(
np.frexp(ArrayLike(2 ** -3)), expected)
_assert_equal_type_and_value(
np.frexp(ArrayLike(np.array(2 ** -3))), expected)
| bsd-3-clause |
dlebauer/plantcv | lib/plantcv/dev/roi_multi_objects.py | 1 | 3484 | import cv2
import numpy as np
from plantcv import print_image
### Find Objects Partially Inside Region of Interest or Cut Objects to Region of Interest
def roi_objects(img,roi_type,roi_contour, roi_hierarchy,object_contour, obj_hierarchy, device, debug=False):
# img = img to display kept objects
# roi_type = 'cutto' or 'partial' (for partially inside)
# roi_contour = contour of roi, output from "View and Ajust ROI" function
# roi_hierarchy = contour of roi, output from "View and Ajust ROI" function
# object_contour = contours of objects, output from "Identifying Objects" fuction
# obj_hierarchy = hierarchy of objects, output from "Identifying Objects" fuction
# device = device number. Used to count steps in the pipeline
device +=1
if len(np.shape(img))==3:
ix,iy,iz=np.shape(img)
else:
ix,iy=np.shape(img)
size = ix,iy,3
background = np.zeros(size, dtype=np.uint8)
ori_img=np.copy(img)
w_back=background+255
background1 = np.zeros(size, dtype=np.uint8)
background2 = np.zeros(size, dtype=np.uint8)
# Allows user to find all objects that are completely inside or overlapping with ROI
if roi_type=='partial':
for c,cnt in enumerate(object_contour):
length=(len(cnt)-1)
stack=np.vstack(cnt)
test=[]
keep=False
for i in range(0,length):
pptest=cv2.pointPolygonTest(roi_contour[0], (stack[i][0],stack[i][1]), False)
if int(pptest)!=-1:
keep=True
if keep==True:
if obj_hierarchy[0][c][3]>-1:
cv2.drawContours(w_back,object_contour,c, (255,255,255),-1, lineType=8,hierarchy=obj_hierarchy)
else:
cv2.drawContours(w_back,object_contour,c, (0,0,0),-1, lineType=8,hierarchy=obj_hierarchy)
else:
cv2.drawContours(w_back,object_contour,c, (255,255,255),-1, lineType=8,hierarchy=obj_hierarchy)
kept=cv2.cvtColor(w_back, cv2.COLOR_RGB2GRAY )
kept_obj= cv2.bitwise_not(kept)
mask=np.copy(kept_obj)
obj_area=cv2.countNonZero(kept_obj)
kept_cnt,hierarchy=cv2.findContours(kept_obj,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
cv2.drawContours(ori_img,kept_cnt,-1, (0,255,0),-1, lineType=8,hierarchy=hierarchy)
cv2.drawContours(ori_img,roi_contour,-1, (255,0,0),5, lineType=8,hierarchy=roi_hierarchy)
# Allows uer to cut objects to the ROI (all objects completely outside ROI will not be kept)
elif roi_type=='cutto':
cv2.drawContours(background1,object_contour,-1, (255,255,255),-1, lineType=8,hierarchy=obj_hierarchy)
roi_points=np.vstack(roi_contour[0])
cv2.fillPoly(background2,[roi_points], (255,255,255))
obj_roi=cv2.multiply(background1,background2)
kept_obj=cv2.cvtColor(obj_roi, cv2.COLOR_RGB2GRAY)
mask=np.copy(kept_obj)
obj_area=cv2.countNonZero(kept_obj)
kept_cnt,hierarchy = cv2.findContours(kept_obj,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
cv2.drawContours(w_back,kept_cnt,-1, (0,0,0),-1)
cv2.drawContours(ori_img,kept_cnt,-1, (0,255,0),-1, lineType=8,hierarchy=hierarchy)
cv2.drawContours(ori_img,roi_contour,-1, (255,0,0),5, lineType=8,hierarchy=roi_hierarchy)
else:
fatal_error('ROI Type' + str(roi_type) + ' is not "cutto" or "partial"!')
if debug:
print_image(w_back, (str(device) + '_roi_objects.png'))
print_image(ori_img, (str(device) + '_obj_on_img.png'))
print_image(mask, (str(device) + '_roi_mask.png'))
#print ('Object Area=', obj_area)
return device, kept_cnt, hierarchy, mask, obj_area | gpl-2.0 |
subodhchhabra/airflow | airflow/hooks/__init__.py | 8 | 3247 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os as _os
import sys
# ------------------------------------------------------------------------
#
# #TODO #FIXME Airflow 2.0
#
# Old import machinary below.
#
# This is deprecated but should be kept until Airflow 2.0
# for compatibility.
#
# ------------------------------------------------------------------------
# Imports the hooks dynamically while keeping the package API clean,
# abstracting the underlying modules
_hooks = {
'base_hook': ['BaseHook'],
'hive_hooks': [
'HiveCliHook',
'HiveMetastoreHook',
'HiveServer2Hook',
],
'hdfs_hook': ['HDFSHook'],
'webhdfs_hook': ['WebHDFSHook'],
'pig_hook': ['PigCliHook'],
'mysql_hook': ['MySqlHook'],
'postgres_hook': ['PostgresHook'],
'presto_hook': ['PrestoHook'],
'samba_hook': ['SambaHook'],
'sqlite_hook': ['SqliteHook'],
'S3_hook': ['S3Hook'],
'zendesk_hook': ['ZendeskHook'],
'http_hook': ['HttpHook'],
'druid_hook': [
'DruidHook',
'DruidDbApiHook',
],
'jdbc_hook': ['JdbcHook'],
'dbapi_hook': ['DbApiHook'],
'mssql_hook': ['MsSqlHook'],
'oracle_hook': ['OracleHook'],
'slack_hook': ['SlackHook'],
}
if not _os.environ.get('AIRFLOW_USE_NEW_IMPORTS', False):
from airflow.utils.helpers import AirflowImporter
airflow_importer = AirflowImporter(sys.modules[__name__], _hooks)
def _integrate_plugins():
"""Integrate plugins to the context"""
from airflow.plugins_manager import hooks_modules
for hooks_module in hooks_modules:
sys.modules[hooks_module.__name__] = hooks_module
globals()[hooks_module._name] = hooks_module
##########################################################
# TODO FIXME Remove in Airflow 2.0
if not _os.environ.get('AIRFLOW_USE_NEW_IMPORTS', False):
from zope.deprecation import deprecated as _deprecated
for _hook in hooks_module._objects:
hook_name = _hook.__name__
globals()[hook_name] = _hook
_deprecated(
hook_name,
"Importing plugin hook '{i}' directly from "
"'airflow.hooks' has been deprecated. Please "
"import from 'airflow.hooks.[plugin_module]' "
"instead. Support for direct imports will be dropped "
"entirely in Airflow 2.0.".format(i=hook_name))
| apache-2.0 |
guition/Nocturn-RS4L | Live/src/__init__.py | 1 | 1246 | #
# Copyright (C) 2009 Guillermo Ruiz Troyano
#
# This file is part of Nocturn Remote Script for Live (Nocturn RS4L).
#
# Nocturn RS4L is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Nocturn RS4L is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Nocturn RS4L. If not, see <http://www.gnu.org/licenses/>.
#
# Contact info:
# Guillermo Ruiz Troyano, ruiztroyano@gmail.com
#
import Live
from Nocturn import Nocturn
#import MidiRemoteScript
#from apihelper import print_api
def create_instance(c_instance):
#print_api(Live, "Live", "/Users/Guillermo/Desarrollo/Control MIDI/LiveAPI/API/")
#print_api(c_instance, "c_instance", "/Users/Guillermo/Desktop/")
#print_api(MidiRemoteScript, "MidiRemoteScript", "/Users/Guillermo/Desktop/")
return Nocturn(c_instance)
| gpl-3.0 |
jeffFranklin/iam-resttools | resttools/dao_implementation/nws.py | 1 | 2539 | """
Contains NWS DAO implementations.
"""
from resttools.mock.mock_http import MockHTTP
import re
from resttools.dao_implementation.live import get_con_pool, get_live_url
from resttools.dao_implementation.mock import get_mockdata_url
import logging
logger = logging.getLogger(__name__)
class File(object):
"""
The File DAO implementation returns generally static content. Use this
DAO with this configuration:
"""
_max_pool_size = 5
def __init__(self, conf):
self._conf = conf
if 'MAX_POOL_SIZE' in conf:
self._max_pool_size = conf['MAX_POOL_SIZE']
def getURL(self, url, headers):
logger.debug('file nws get url: ' + url)
response = get_mockdata_url("nws", self._conf, url, headers)
if response.status == 404:
logger.debug('status 404')
response.data = '{"error": {"code": "7000","message": "No record matched"}}'
return response
def postURL(self, url, headers, body):
logger.debug('file nws post url: ' + url)
response = get_mockdata_url("nws", self._conf, url, headers)
if response.status == 404:
logger.debug('status 404')
response.data = '{"error": {"code": "7000","message": "No record matched"}}'
return response
class Live(object):
"""
This DAO provides real data. It requires further configuration, (conf)
"""
_max_pool_size = 5
def __init__(self, conf):
self._conf = conf
if 'MAX_POOL_SIZE' in conf:
self._max_pool_size = conf['MAX_POOL_SIZE']
pool = None
def getURL(self, url, headers):
if Live.pool is None:
Live.pool = self._get_pool()
return get_live_url(Live.pool, 'GET',
self._conf['HOST'],
url, headers=headers,
service_name='nws')
def postURL(self, url, headers, body):
if Live.pool is None:
Live.pool = self._get_pool()
return get_live_url(Live.pool, 'POST',
self._conf['HOST'],
url, headers=headers, body=body,
service_name='nws')
def _get_pool(self):
return get_con_pool(self._conf['HOST'],
self._conf['KEY_FILE'],
self._conf['CERT_FILE'],
self._conf['CA_FILE'],
max_pool_size=self._max_pool_size, verify_https=False)
| apache-2.0 |
dpshelio/sunpy | sunpy/net/dataretriever/tests/test_eve.py | 2 | 3757 | import pytest
from sunpy.time import parse_time
from sunpy.time.timerange import TimeRange
from sunpy.net.vso import VSOClient
from sunpy.net.vso.attrs import Time, Instrument, Source, Level
from sunpy.net.dataretriever.client import QueryResponse
import sunpy.net.dataretriever.sources.eve as eve
from sunpy.net.fido_factory import UnifiedResponse
from sunpy.net import Fido
from sunpy.net import attrs as a
LCClient = eve.EVEClient()
@pytest.mark.remote_data
@pytest.mark.parametrize("timerange,url_start,url_end", [
(TimeRange('2012/4/21', '2012/4/21'),
'http://lasp.colorado.edu/eve/data_access/evewebdata/quicklook/L0CS/SpWx/2012/20120421_EVE_L0CS_DIODES_1m.txt',
'http://lasp.colorado.edu/eve/data_access/evewebdata/quicklook/L0CS/SpWx/2012/20120421_EVE_L0CS_DIODES_1m.txt'
),
(TimeRange('2012/5/5', '2012/5/6'),
'http://lasp.colorado.edu/eve/data_access/evewebdata/quicklook/L0CS/SpWx/2012/20120505_EVE_L0CS_DIODES_1m.txt',
'http://lasp.colorado.edu/eve/data_access/evewebdata/quicklook/L0CS/SpWx/2012/20120506_EVE_L0CS_DIODES_1m.txt',
),
(TimeRange('2012/7/7', '2012/7/14'),
'http://lasp.colorado.edu/eve/data_access/evewebdata/quicklook/L0CS/SpWx/2012/20120707_EVE_L0CS_DIODES_1m.txt',
'http://lasp.colorado.edu/eve/data_access/evewebdata/quicklook/L0CS/SpWx/2012/20120714_EVE_L0CS_DIODES_1m.txt',
)
])
def test_get_url_for_time_range(timerange, url_start, url_end):
urls = LCClient._get_url_for_timerange(timerange)
assert isinstance(urls, list)
assert urls[0] == url_start
assert urls[-1] == url_end
def test_can_handle_query():
ans1 = eve.EVEClient._can_handle_query(
Time('2012/8/9', '2012/8/10'), Instrument('eve'), Level(0))
assert ans1 is True
ans2 = eve.EVEClient._can_handle_query(Time('2012/7/7', '2012/7/7'))
assert ans2 is False
ans3 = eve.EVEClient._can_handle_query(
Time('2012/8/9', '2012/8/10'), Instrument('eve'), Source('sdo'))
assert ans3 is False
@pytest.mark.remote_data
def test_query():
qr1 = LCClient.search(Time('2012/8/9', '2012/8/10'), Instrument('eve'))
assert isinstance(qr1, QueryResponse)
assert len(qr1) == 2
assert qr1.time_range().start == parse_time('2012/08/09')
assert qr1.time_range().end == parse_time('2012/08/11') # includes end.
@pytest.mark.remote_data
@pytest.mark.parametrize("time,instrument", [
(Time('2012/11/27', '2012/11/27'), Instrument('eve')),
])
def test_get(time, instrument):
qr1 = LCClient.search(time, instrument)
res = LCClient.fetch(qr1)
assert len(res) == len(qr1)
@pytest.mark.remote_data
@pytest.mark.parametrize(
'query',
[(a.Time('2012/10/4', '2012/10/6') & a.Instrument('eve') & a.Level(0))])
def test_fido(query):
qr = Fido.search(query)
client = qr.get_response(0).client
assert isinstance(qr, UnifiedResponse)
assert isinstance(client, eve.EVEClient)
response = Fido.fetch(qr)
assert len(response) == qr._numfile
@pytest.mark.remote_data
@pytest.mark.parametrize(
'time',
[(a.Time('2012/10/4', '2012/10/6')), (a.Time('2012/11/27', '2012/11/27'))])
def test_levels(time):
"""
Test the correct handling of level 0 / 1.
The default should be level 1 from VSO, level 0 comes from EVEClient.
"""
eve_a = a.Instrument('EVE')
qr = Fido.search(time, eve_a)
client = qr.get_response(0).client
assert isinstance(client, VSOClient)
qr = Fido.search(time, eve_a, a.Level(0))
client = qr.get_response(0).client
assert isinstance(client, eve.EVEClient)
qr = Fido.search(time, eve_a, a.Level(0) | a.Level(1))
clients = {type(a.client) for a in qr.responses}
assert clients.symmetric_difference({VSOClient, eve.EVEClient}) == set()
| bsd-2-clause |
sh4wn/vispy | vispy/scene/widgets/viewbox.py | 20 | 6727 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division
import numpy as np
from .widget import Widget
from ..subscene import SubScene
from ..cameras import make_camera, BaseCamera
from ...ext.six import string_types
from ...visuals.filters import Clipper
class ViewBox(Widget):
""" Provides a rectangular widget to which its subscene is rendered.
Three classes work together when using a ViewBox:
* The :class:`SubScene` class describes a "world" coordinate system and the
entities that live inside it.
* ViewBox is a "window" through which we view the
subscene. Multiple ViewBoxes may view the same subscene.
* :class:`Camera` describes both the perspective from which the
subscene is rendered, and the way user interaction affects that
perspective.
In general it is only necessary to create the ViewBox; a SubScene and
Camera will be generated automatically.
Parameters
----------
camera : instance of Camera | str | None
The camera through which to view the SubScene. If None, then a
PanZoomCamera (2D interaction) is used. If str, then the string is
used as the argument to :func:`make_camera`.
**kwargs : dict
Extra keyword arguments to pass to `Widget`.
"""
def __init__(self, camera=None, **kwargs):
self._camera = None
self._scene = None
Widget.__init__(self, **kwargs)
self.interactive = True
# Each viewbox has an internal scene node, which has a transform that
# represents the transformation imposed by camera.
if self.name is not None:
name = str(self.name) + "_Scene"
else:
name = None
self._scene = SubScene(name=name, parent=self)
self._scene._clipper = Clipper()
self._scene.clip_children = True
self.transforms.changed.connect(self._update_scene_clipper)
# Camera is a helper object that handles scene transformation
# and user interaction.
if camera is None:
camera = 'base'
if isinstance(camera, string_types):
self.camera = make_camera(camera, parent=self.scene)
elif isinstance(camera, BaseCamera):
self.camera = camera
else:
raise TypeError('Argument "camera" must be None, str, or Camera.')
@property
def camera(self):
""" Get/set the Camera in use by this ViewBox
If a string is given (e.g. 'panzoom', 'turntable', 'fly'). A
corresponding camera is selected if it already exists in the
scene, otherwise a new camera is created.
The camera object is made a child of the scene (if it is not
already in the scene).
Multiple cameras can exist in one scene, although only one can
be active at a time. A single camera can be used by multiple
viewboxes at the same time.
"""
return self._camera
@camera.setter
def camera(self, cam):
if isinstance(cam, string_types):
# Try to select an existing camera
for child in self.scene.children:
if isinstance(child, BaseCamera):
this_cam_type = child.__class__.__name__.lower()[:-6]
if this_cam_type == cam:
self.camera = child
return
else:
# No such camera yet, create it then
self.camera = make_camera(cam)
elif isinstance(cam, BaseCamera):
# Ensure that the camera is in the scene
if not self.is_in_scene(cam):
cam.parent = self.scene
# Disconnect / connect
if self._camera is not None:
self._camera._viewbox_unset(self)
self._camera = cam
if self._camera is not None:
self._camera._viewbox_set(self)
# Update view
cam.view_changed()
else:
raise ValueError('Not a camera object.')
def is_in_scene(self, node):
"""Get whether the given node is inside the scene of this viewbox.
Parameters
----------
node : instance of Node
The node.
"""
return self.scene.is_child(node)
def get_scene_bounds(self, dim=None):
"""Get the total bounds based on the visuals present in the scene
Parameters
----------
dim : int | None
Dimension to return.
Returns
-------
bounds : list | tuple
If ``dim is None``, Returns a list of 3 tuples, otherwise
the bounds for the requested dimension.
"""
# todo: handle sub-children
# todo: handle transformations
# Init
bounds = [(np.inf, -np.inf), (np.inf, -np.inf), (np.inf, -np.inf)]
# Get bounds of all children
for ob in self.scene.children:
if hasattr(ob, 'bounds'):
for axis in (0, 1, 2):
if (dim is not None) and dim != axis:
continue
b = ob.bounds(axis)
if b is not None:
b = min(b), max(b) # Ensure correct order
bounds[axis] = (min(bounds[axis][0], b[0]),
max(bounds[axis][1], b[1]))
# Set defaults
for axis in (0, 1, 2):
if any(np.isinf(bounds[axis])):
bounds[axis] = -1, 1
if dim is not None:
return bounds[dim]
else:
return bounds
@property
def scene(self):
""" The root node of the scene viewed by this ViewBox.
"""
return self._scene
def add(self, node):
""" Add an Node to the scene for this ViewBox.
This is a convenience method equivalent to
`node.parent = viewbox.scene`
Parameters
----------
node : instance of Node
The node to add.
"""
node.parent = self.scene
def on_resize(self, event):
"""Resize event handler
Parameters
----------
event : instance of Event
The event.
"""
if self._scene is None:
# happens during init
return
self._update_scene_clipper()
def _update_scene_clipper(self, event=None):
tr = self.get_transform('visual', 'framebuffer')
self._scene._clipper.bounds = tr.map(self.inner_rect)
| bsd-3-clause |
djnugent/ardupilot | mk/PX4/Tools/genmsg/src/genmsg/srvs.py | 216 | 3017 | # Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
ROS Service Description Language Spec
Implements http://ros.org/wiki/srv
"""
import os
import sys
from . names import is_legal_resource_name, is_legal_resource_base_name, package_resource_name, resource_name
class SrvSpec(object):
def __init__(self, request, response, text, full_name = '', short_name = '', package = ''):
alt_package, alt_short_name = package_resource_name(full_name)
if not package:
package = alt_package
if not short_name:
short_name = alt_short_name
self.request = request
self.response = response
self.text = text
self.full_name = full_name
self.short_name = short_name
self.package = package
def __eq__(self, other):
if not other or not isinstance(other, SrvSpec):
return False
return self.request == other.request and \
self.response == other.response and \
self.text == other.text and \
self.full_name == other.full_name and \
self.short_name == other.short_name and \
self.package == other.package
def __ne__(self, other):
if not other or not isinstance(other, SrvSpec):
return True
return not self.__eq__(other)
def __repr__(self):
return "SrvSpec[%s, %s]"%(repr(self.request), repr(self.response))
| gpl-3.0 |
SujaySKumar/django | tests/i18n/patterns/tests.py | 256 | 14326 | from __future__ import unicode_literals
import os
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import clear_url_caches, reverse, translate_url
from django.http import HttpResponsePermanentRedirect
from django.middleware.locale import LocaleMiddleware
from django.template import Context, Template
from django.test import SimpleTestCase, override_settings
from django.test.utils import override_script_prefix
from django.utils import translation
from django.utils._os import upath
class PermanentRedirectLocaleMiddleWare(LocaleMiddleware):
response_redirect_class = HttpResponsePermanentRedirect
@override_settings(
USE_I18N=True,
LOCALE_PATHS=[
os.path.join(os.path.dirname(upath(__file__)), 'locale'),
],
LANGUAGE_CODE='en-us',
LANGUAGES=[
('nl', 'Dutch'),
('en', 'English'),
('pt-br', 'Brazilian Portuguese'),
],
MIDDLEWARE_CLASSES=[
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
],
ROOT_URLCONF='i18n.patterns.urls.default',
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(upath(__file__)), 'templates')],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.i18n',
],
},
}],
)
class URLTestCaseBase(SimpleTestCase):
"""
TestCase base-class for the URL tests.
"""
def setUp(self):
# Make sure the cache is empty before we are doing our tests.
clear_url_caches()
def tearDown(self):
# Make sure we will leave an empty cache for other testcases.
clear_url_caches()
class URLPrefixTests(URLTestCaseBase):
"""
Tests if the `i18n_patterns` is adding the prefix correctly.
"""
def test_not_prefixed(self):
with translation.override('en'):
self.assertEqual(reverse('not-prefixed'), '/not-prefixed/')
self.assertEqual(reverse('not-prefixed-included-url'), '/not-prefixed-include/foo/')
with translation.override('nl'):
self.assertEqual(reverse('not-prefixed'), '/not-prefixed/')
self.assertEqual(reverse('not-prefixed-included-url'), '/not-prefixed-include/foo/')
def test_prefixed(self):
with translation.override('en'):
self.assertEqual(reverse('prefixed'), '/en/prefixed/')
with translation.override('nl'):
self.assertEqual(reverse('prefixed'), '/nl/prefixed/')
@override_settings(ROOT_URLCONF='i18n.patterns.urls.wrong')
def test_invalid_prefix_use(self):
self.assertRaises(ImproperlyConfigured, lambda: reverse('account:register'))
@override_settings(ROOT_URLCONF='i18n.patterns.urls.disabled')
class URLDisabledTests(URLTestCaseBase):
@override_settings(USE_I18N=False)
def test_prefixed_i18n_disabled(self):
with translation.override('en'):
self.assertEqual(reverse('prefixed'), '/prefixed/')
with translation.override('nl'):
self.assertEqual(reverse('prefixed'), '/prefixed/')
@override_settings(ROOT_URLCONF='i18n.patterns.urls.path_unused')
class PathUnusedTests(URLTestCaseBase):
"""
Check that if no i18n_patterns is used in root urlconfs, then no
language activation happens based on url prefix.
"""
def test_no_lang_activate(self):
response = self.client.get('/nl/foo/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'en')
self.assertEqual(response.context['LANGUAGE_CODE'], 'en')
class URLTranslationTests(URLTestCaseBase):
"""
Tests if the pattern-strings are translated correctly (within the
`i18n_patterns` and the normal `patterns` function).
"""
def test_no_prefix_translated(self):
with translation.override('en'):
self.assertEqual(reverse('no-prefix-translated'), '/translated/')
self.assertEqual(reverse('no-prefix-translated-slug', kwargs={'slug': 'yeah'}), '/translated/yeah/')
with translation.override('nl'):
self.assertEqual(reverse('no-prefix-translated'), '/vertaald/')
self.assertEqual(reverse('no-prefix-translated-slug', kwargs={'slug': 'yeah'}), '/vertaald/yeah/')
with translation.override('pt-br'):
self.assertEqual(reverse('no-prefix-translated'), '/traduzidos/')
self.assertEqual(reverse('no-prefix-translated-slug', kwargs={'slug': 'yeah'}), '/traduzidos/yeah/')
def test_users_url(self):
with translation.override('en'):
self.assertEqual(reverse('users'), '/en/users/')
with translation.override('nl'):
self.assertEqual(reverse('users'), '/nl/gebruikers/')
self.assertEqual(reverse('prefixed_xml'), '/nl/prefixed.xml')
with translation.override('pt-br'):
self.assertEqual(reverse('users'), '/pt-br/usuarios/')
def test_translate_url_utility(self):
with translation.override('en'):
self.assertEqual(translate_url('/en/non-existent/', 'nl'), '/en/non-existent/')
self.assertEqual(translate_url('/en/users/', 'nl'), '/nl/gebruikers/')
# Namespaced URL
self.assertEqual(translate_url('/en/account/register/', 'nl'), '/nl/profiel/registeren/')
self.assertEqual(translation.get_language(), 'en')
with translation.override('nl'):
self.assertEqual(translate_url('/nl/gebruikers/', 'en'), '/en/users/')
self.assertEqual(translation.get_language(), 'nl')
class URLNamespaceTests(URLTestCaseBase):
"""
Tests if the translations are still working within namespaces.
"""
def test_account_register(self):
with translation.override('en'):
self.assertEqual(reverse('account:register'), '/en/account/register/')
with translation.override('nl'):
self.assertEqual(reverse('account:register'), '/nl/profiel/registeren/')
class URLRedirectTests(URLTestCaseBase):
"""
Tests if the user gets redirected to the right URL when there is no
language-prefix in the request URL.
"""
def test_no_prefix_response(self):
response = self.client.get('/not-prefixed/')
self.assertEqual(response.status_code, 200)
def test_en_redirect(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
def test_en_redirect_wrong_url(self):
response = self.client.get('/profiel/registeren/', HTTP_ACCEPT_LANGUAGE='en')
self.assertEqual(response.status_code, 404)
def test_nl_redirect(self):
response = self.client.get('/profiel/registeren/', HTTP_ACCEPT_LANGUAGE='nl')
self.assertRedirects(response, '/nl/profiel/registeren/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
def test_nl_redirect_wrong_url(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='nl')
self.assertEqual(response.status_code, 404)
def test_pt_br_redirect(self):
response = self.client.get('/conta/registre-se/', HTTP_ACCEPT_LANGUAGE='pt-br')
self.assertRedirects(response, '/pt-br/conta/registre-se/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
def test_pl_pl_redirect(self):
# language from outside of the supported LANGUAGES list
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='pl-pl')
self.assertRedirects(response, '/en/account/register/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
@override_settings(
MIDDLEWARE_CLASSES=[
'i18n.patterns.tests.PermanentRedirectLocaleMiddleWare',
'django.middleware.common.CommonMiddleware',
],
)
def test_custom_redirect_class(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register/', 301)
class URLVaryAcceptLanguageTests(URLTestCaseBase):
"""
Tests that 'Accept-Language' is not added to the Vary header when using
prefixed URLs.
"""
def test_no_prefix_response(self):
response = self.client.get('/not-prefixed/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Vary'), 'Accept-Language')
def test_en_redirect(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register/')
self.assertFalse(response.get('Vary'))
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
self.assertFalse(response.get('Vary'))
class URLRedirectWithoutTrailingSlashTests(URLTestCaseBase):
"""
Tests the redirect when the requested URL doesn't end with a slash
(`settings.APPEND_SLASH=True`).
"""
def test_not_prefixed_redirect(self):
response = self.client.get('/not-prefixed', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/not-prefixed/', 301)
def test_en_redirect(self):
response = self.client.get('/account/register', HTTP_ACCEPT_LANGUAGE='en', follow=True)
# We only want one redirect, bypassing CommonMiddleware
self.assertListEqual(response.redirect_chain, [('/en/account/register/', 302)])
self.assertRedirects(response, '/en/account/register/', 302)
response = self.client.get('/prefixed.xml', HTTP_ACCEPT_LANGUAGE='en', follow=True)
self.assertRedirects(response, '/en/prefixed.xml', 302)
class URLRedirectWithoutTrailingSlashSettingTests(URLTestCaseBase):
"""
Tests the redirect when the requested URL doesn't end with a slash
(`settings.APPEND_SLASH=False`).
"""
@override_settings(APPEND_SLASH=False)
def test_not_prefixed_redirect(self):
response = self.client.get('/not-prefixed', HTTP_ACCEPT_LANGUAGE='en')
self.assertEqual(response.status_code, 404)
@override_settings(APPEND_SLASH=False)
def test_en_redirect(self):
response = self.client.get('/account/register-without-slash', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register-without-slash', 302)
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
class URLResponseTests(URLTestCaseBase):
"""
Tests if the response has the right language-code.
"""
def test_not_prefixed_with_prefix(self):
response = self.client.get('/en/not-prefixed/')
self.assertEqual(response.status_code, 404)
def test_en_url(self):
response = self.client.get('/en/account/register/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'en')
self.assertEqual(response.context['LANGUAGE_CODE'], 'en')
def test_nl_url(self):
response = self.client.get('/nl/profiel/registeren/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'nl')
self.assertEqual(response.context['LANGUAGE_CODE'], 'nl')
def test_wrong_en_prefix(self):
response = self.client.get('/en/profiel/registeren/')
self.assertEqual(response.status_code, 404)
def test_wrong_nl_prefix(self):
response = self.client.get('/nl/account/register/')
self.assertEqual(response.status_code, 404)
def test_pt_br_url(self):
response = self.client.get('/pt-br/conta/registre-se/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'pt-br')
self.assertEqual(response.context['LANGUAGE_CODE'], 'pt-br')
class URLRedirectWithScriptAliasTests(URLTestCaseBase):
"""
#21579 - LocaleMiddleware should respect the script prefix.
"""
def test_language_prefix_with_script_prefix(self):
prefix = '/script_prefix'
with override_script_prefix(prefix):
response = self.client.get('/prefixed/', HTTP_ACCEPT_LANGUAGE='en', SCRIPT_NAME=prefix)
self.assertRedirects(response, '%s/en/prefixed/' % prefix, target_status_code=404)
class URLTagTests(URLTestCaseBase):
"""
Test if the language tag works.
"""
def test_strings_only(self):
t = Template("""{% load i18n %}
{% language 'nl' %}{% url 'no-prefix-translated' %}{% endlanguage %}
{% language 'pt-br' %}{% url 'no-prefix-translated' %}{% endlanguage %}""")
self.assertEqual(t.render(Context({})).strip().split(),
['/vertaald/', '/traduzidos/'])
def test_context(self):
ctx = Context({'lang1': 'nl', 'lang2': 'pt-br'})
tpl = Template("""{% load i18n %}
{% language lang1 %}{% url 'no-prefix-translated' %}{% endlanguage %}
{% language lang2 %}{% url 'no-prefix-translated' %}{% endlanguage %}""")
self.assertEqual(tpl.render(ctx).strip().split(),
['/vertaald/', '/traduzidos/'])
def test_args(self):
tpl = Template("""{% load i18n %}
{% language 'nl' %}{% url 'no-prefix-translated-slug' 'apo' %}{% endlanguage %}
{% language 'pt-br' %}{% url 'no-prefix-translated-slug' 'apo' %}{% endlanguage %}""")
self.assertEqual(tpl.render(Context({})).strip().split(),
['/vertaald/apo/', '/traduzidos/apo/'])
def test_kwargs(self):
tpl = Template("""{% load i18n %}
{% language 'nl' %}{% url 'no-prefix-translated-slug' slug='apo' %}{% endlanguage %}
{% language 'pt-br' %}{% url 'no-prefix-translated-slug' slug='apo' %}{% endlanguage %}""")
self.assertEqual(tpl.render(Context({})).strip().split(),
['/vertaald/apo/', '/traduzidos/apo/'])
| bsd-3-clause |
keflavich/scikit-image | skimage/util/tests/test_apply_parallel.py | 15 | 1847 | from __future__ import absolute_import
import numpy as np
from numpy.testing import assert_array_almost_equal
from skimage.filters import threshold_adaptive, gaussian_filter
from skimage.util.apply_parallel import apply_parallel
def test_apply_parallel():
# data
a = np.arange(144).reshape(12, 12).astype(float)
# apply the filter
expected1 = threshold_adaptive(a, 3)
result1 = apply_parallel(threshold_adaptive, a, chunks=(6, 6), depth=5,
extra_arguments=(3,),
extra_keywords={'mode': 'reflect'})
assert_array_almost_equal(result1, expected1)
def wrapped_gauss(arr):
return gaussian_filter(arr, 1, mode='reflect')
expected2 = gaussian_filter(a, 1, mode='reflect')
result2 = apply_parallel(wrapped_gauss, a, chunks=(6, 6), depth=5)
assert_array_almost_equal(result2, expected2)
def test_no_chunks():
a = np.ones(1 * 4 * 8 * 9).reshape(1, 4, 8, 9)
def add_42(arr):
return arr + 42
expected = add_42(a)
result = apply_parallel(add_42, a)
assert_array_almost_equal(result, expected)
def test_apply_parallel_wrap():
def wrapped(arr):
return gaussian_filter(arr, 1, mode='wrap')
a = np.arange(144).reshape(12, 12).astype(float)
expected = gaussian_filter(a, 1, mode='wrap')
result = apply_parallel(wrapped, a, chunks=(6, 6), depth=5, mode='wrap')
assert_array_almost_equal(result, expected)
def test_apply_parallel_nearest():
def wrapped(arr):
return gaussian_filter(arr, 1, mode='nearest')
a = np.arange(144).reshape(12, 12).astype(float)
expected = gaussian_filter(a, 1, mode='nearest')
result = apply_parallel(wrapped, a, chunks=(6, 6), depth={0: 5, 1: 5},
mode='nearest')
assert_array_almost_equal(result, expected)
| bsd-3-clause |
hojel/calibre | src/calibre/gui2/preferences/search.py | 14 | 10956 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from PyQt5.Qt import QApplication
from calibre.gui2.preferences import ConfigWidgetBase, test_widget, \
CommaSeparatedList
from calibre.gui2.preferences.search_ui import Ui_Form
from calibre.gui2 import config, error_dialog, gprefs
from calibre.utils.config import prefs
from calibre.utils.icu import sort_key
from calibre.library.caches import set_use_primary_find_in_search
class ConfigWidget(ConfigWidgetBase, Ui_Form):
def genesis(self, gui):
self.gui = gui
db = gui.library_view.model().db
self.db = db
r = self.register
r('search_as_you_type', config)
r('highlight_search_matches', config)
r('show_highlight_toggle_button', gprefs)
r('limit_search_columns', prefs)
r('use_primary_find_in_search', prefs)
r('limit_search_columns_to', prefs, setting=CommaSeparatedList)
fl = db.field_metadata.get_search_terms()
self.opt_limit_search_columns_to.update_items_cache(fl)
self.clear_history_button.clicked.connect(self.clear_histories)
self.gst_explanation.setText('<p>' + _(
"<b>Grouped search terms</b> are search names that permit a query to automatically "
"search across more than one column. For example, if you create a grouped "
"search term <code>allseries</code> with the value "
"<code>series, #myseries, #myseries2</code>, then "
"the query <code>allseries:adhoc</code> will find 'adhoc' in any of the "
"columns <code>series</code>, <code>#myseries</code>, and "
"<code>#myseries2</code>.<p> Enter the name of the "
"grouped search term in the drop-down box, enter the list of columns "
"to search in the value box, then push the Save button. "
"<p>Note: Search terms are forced to lower case; <code>MySearch</code> "
"and <code>mysearch</code> are the same term."
"<p>You can have your grouped search term show up as user categories in "
" the Tag Browser. Just add the grouped search term names to the Make user "
"categories from box. You can add multiple terms separated by commas. "
"The new user category will be automatically "
"populated with all the items in the categories included in the grouped "
"search term. <p>Automatic user categories permit you to see easily "
"all the category items that "
"are in the columns contained in the grouped search term. Using the above "
"<code>allseries</code> example, the automatically-generated user category "
"will contain all the series mentioned in <code>series</code>, "
"<code>#myseries</code>, and <code>#myseries2</code>. This "
"can be useful to check for duplicates, to find which column contains "
"a particular item, or to have hierarchical categories (categories "
"that contain categories)."))
self.gst = db.prefs.get('grouped_search_terms', {}).copy()
self.orig_gst_keys = self.gst.keys()
fl = []
for f in db.all_field_keys():
fm = db.metadata_for_field(f)
if not fm['search_terms']:
continue
if not fm['is_category']:
continue
fl.append(f)
self.gst_value.update_items_cache(fl)
self.fill_gst_box(select=None)
self.category_fields = fl
ml = [(_('Match any'), 'match_any'), (_('Match all'), 'match_all')]
r('similar_authors_match_kind', db.prefs, choices=ml)
r('similar_tags_match_kind', db.prefs, choices=ml)
r('similar_series_match_kind', db.prefs, choices=ml)
r('similar_publisher_match_kind', db.prefs, choices=ml)
self.set_similar_fields(initial=True)
self.similar_authors_search_key.currentIndexChanged[int].connect(self.something_changed)
self.similar_tags_search_key.currentIndexChanged[int].connect(self.something_changed)
self.similar_series_search_key.currentIndexChanged[int].connect(self.something_changed)
self.similar_publisher_search_key.currentIndexChanged[int].connect(self.something_changed)
self.gst_delete_button.setEnabled(False)
self.gst_save_button.setEnabled(False)
self.gst_names.currentIndexChanged[int].connect(self.gst_index_changed)
self.gst_names.editTextChanged.connect(self.gst_text_changed)
self.gst_value.textChanged.connect(self.gst_text_changed)
self.gst_save_button.clicked.connect(self.gst_save_clicked)
self.gst_delete_button.clicked.connect(self.gst_delete_clicked)
self.gst_changed = False
if db.prefs.get('grouped_search_make_user_categories', None) is None:
db.new_api.set_pref('grouped_search_make_user_categories', [])
r('grouped_search_make_user_categories', db.prefs, setting=CommaSeparatedList)
self.muc_changed = False
self.opt_grouped_search_make_user_categories.lineEdit().editingFinished.connect(
self.muc_box_changed)
def set_similar_fields(self, initial=False):
self.set_similar('similar_authors_search_key', initial=initial)
self.set_similar('similar_tags_search_key', initial=initial)
self.set_similar('similar_series_search_key', initial=initial)
self.set_similar('similar_publisher_search_key', initial=initial)
def set_similar(self, name, initial=False):
field = getattr(self, name)
if not initial:
val = field.currentText()
else:
val = self.db.prefs[name]
field.blockSignals(True)
field.clear()
choices = []
choices.extend(self.category_fields)
choices.extend(sorted(self.gst.keys(), key=sort_key))
field.addItems(choices)
dex = field.findText(val)
if dex >= 0:
field.setCurrentIndex(dex)
else:
field.setCurrentIndex(0)
field.blockSignals(False)
def something_changed(self, dex):
self.changed_signal.emit()
def muc_box_changed(self):
self.muc_changed = True
def gst_save_clicked(self):
idx = self.gst_names.currentIndex()
name = icu_lower(unicode(self.gst_names.currentText()))
if not name:
return error_dialog(self.gui, _('Grouped Search Terms'),
_('The search term cannot be blank'),
show=True)
if idx != 0:
orig_name = unicode(self.gst_names.itemData(idx) or '')
else:
orig_name = ''
if name != orig_name:
if name in self.db.field_metadata.get_search_terms() and \
name not in self.orig_gst_keys:
return error_dialog(self.gui, _('Grouped Search Terms'),
_('That name is already used for a column or grouped search term'),
show=True)
if name in [icu_lower(p) for p in self.db.prefs.get('user_categories', {})]:
return error_dialog(self.gui, _('Grouped Search Terms'),
_('That name is already used for user category'),
show=True)
val = [v.strip() for v in unicode(self.gst_value.text()).split(',') if v.strip()]
if not val:
return error_dialog(self.gui, _('Grouped Search Terms'),
_('The value box cannot be empty'), show=True)
if orig_name and name != orig_name:
del self.gst[orig_name]
self.gst_changed = True
self.gst[name] = val
self.fill_gst_box(select=name)
self.set_similar_fields(initial=False)
self.changed_signal.emit()
def gst_delete_clicked(self):
if self.gst_names.currentIndex() == 0:
return error_dialog(self.gui, _('Grouped Search Terms'),
_('The empty grouped search term cannot be deleted'), show=True)
name = unicode(self.gst_names.currentText())
if name in self.gst:
del self.gst[name]
self.fill_gst_box(select='')
self.changed_signal.emit()
self.gst_changed = True
self.set_similar_fields(initial=False)
def fill_gst_box(self, select=None):
terms = sorted(self.gst.keys(), key=sort_key)
self.opt_grouped_search_make_user_categories.update_items_cache(terms)
self.gst_names.blockSignals(True)
self.gst_names.clear()
self.gst_names.addItem('', '')
for t in terms:
self.gst_names.addItem(t, t)
self.gst_names.blockSignals(False)
if select is not None:
if select == '':
self.gst_index_changed(0)
elif select in terms:
self.gst_names.setCurrentIndex(self.gst_names.findText(select))
def gst_text_changed(self):
self.gst_delete_button.setEnabled(False)
self.gst_save_button.setEnabled(True)
def gst_index_changed(self, idx):
self.gst_delete_button.setEnabled(idx != 0)
self.gst_save_button.setEnabled(False)
self.gst_value.blockSignals(True)
if idx == 0:
self.gst_value.setText('')
else:
name = unicode(self.gst_names.itemData(idx) or '')
self.gst_value.setText(','.join(self.gst[name]))
self.gst_value.blockSignals(False)
def commit(self):
if self.gst_changed:
self.db.new_api.set_pref('grouped_search_terms', self.gst)
self.db.field_metadata.add_grouped_search_terms(self.gst)
self.db.new_api.set_pref('similar_authors_search_key',
unicode(self.similar_authors_search_key.currentText()))
self.db.new_api.set_pref('similar_tags_search_key',
unicode(self.similar_tags_search_key.currentText()))
self.db.new_api.set_pref('similar_series_search_key',
unicode(self.similar_series_search_key.currentText()))
self.db.new_api.set_pref('similar_publisher_search_key',
unicode(self.similar_publisher_search_key.currentText()))
return ConfigWidgetBase.commit(self)
def refresh_gui(self, gui):
set_use_primary_find_in_search(prefs['use_primary_find_in_search'])
gui.set_highlight_only_button_icon()
if self.muc_changed:
gui.tags_view.recount()
gui.search.search_as_you_type(config['search_as_you_type'])
gui.search.do_search()
def clear_histories(self, *args):
for key, val in config.defaults.iteritems():
if key.endswith('_search_history') and isinstance(val, list):
config[key] = []
self.gui.search.clear_history()
if __name__ == '__main__':
app = QApplication([])
test_widget('Interface', 'Search')
| gpl-3.0 |
melviso/beatle | beatle/model/py/Argument.py | 2 | 3649 | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 22 22:31:28 2013
@author: mel
"""
from beatle.model import TComponent
from beatle import tran
class Argument(TComponent):
"""Implements argument representation"""
context_container = True
#visual methods
@tran.TransactionalMethod('move argument {0}')
def drop(self, to):
"""drop this elemento to another place"""
target = to.inner_argument_container
if not target or to.project != self.project:
return False # avoid move arguments between projects
index = 0 # trick for insert as first child
tran.TransactionalMoveObject(
object=self, origin=self.parent, target=target, index=index)
return True
def __init__(self, **kwargs):
"""Initialization"""
# arguments:
# ... default : the default or the call value
# ... context : the use case. may be:
# ... declare : when the argument represents a argument declaration
# ....value : when the argument represents a value in call (the name is irrelevant)
# ....keyword : when the argument represent a keyword value in call (default is mandatory)
# ....starargs : when the argument represent a star args in call
# ....kwargs : when the argument represent a keyword args in call
#
# normal context is declare. The rest of contexts are argument-reuse for few cases, like
# decorator instantiations
self._default = kwargs.get('default', '')
self._context = kwargs.get('context', 'declare')
super(Argument, self).__init__(**kwargs)
assert(self._context != 'keyword' or self._default)
container = self.outer_class or self.outer_module
container._lastSrcTime = None
container._lastHdrTime = None
k = self.inner_module or self.inner_package
if k:
k.ExportPythonCodeFiles()
def Delete(self):
"""Handle delete"""
k = self.inner_module or self.inner_package
super(Argument, self).Delete()
if k:
k.ExportPythonCodeFiles()
def get_kwargs(self):
"""Returns the kwargs needed for this object"""
kwargs = {}
kwargs['default'] = self._default
kwargs.update(super(Argument, self).get_kwargs())
return kwargs
def OnUndoRedoChanged(self):
"""Update from app"""
self.parent.OnUndoRedoChanged()
super(Argument, self).OnUndoRedoChanged()
def OnUndoRedoRemoving(self):
"""Do required actions for removing"""
super(Argument, self).OnUndoRedoRemoving()
self.parent.OnUndoRedoChanged()
def OnUndoRedoAdd(self):
"""Restore object from undo"""
self.parent.OnUndoRedoChanged()
super(Argument, self).OnUndoRedoAdd()
@property
def label(self):
"""Get tree label"""
if self._context == 'declare':
if self._default:
return '{self._name}={self._default}'.format(self=self)
else:
return self._name
if self._context == 'value':
return self._name
if self._context == 'keyword':
return '{self._name}={self._default}'.format(self=self)
if self._context == 'starargs':
return '*{self._default}'.format(self=self)
if self._context == 'kwargs':
return '**{self._default}'.format(self=self)
@property
def bitmap_index(self):
"""Index of tree image"""
from beatle.app import resources as rc
return rc.GetBitmapIndex('py_argument')
| gpl-2.0 |
HalcyonChimera/osf.io | website/registries/utils.py | 3 | 1344 | REG_CAMPAIGNS = {
'prereg': 'Prereg Challenge',
'registered_report': 'Registered Report Protocol Preregistration',
}
def get_campaign_schema(campaign):
from osf.models import RegistrationSchema
if campaign not in REG_CAMPAIGNS:
raise ValueError('campaign must be one of: {}'.format(', '.join(REG_CAMPAIGNS.keys())))
schema_name = REG_CAMPAIGNS[campaign]
return RegistrationSchema.objects.get(name=schema_name, schema_version=2)
def drafts_for_user(user, campaign=None):
from osf.models import DraftRegistration, Node
if campaign:
return DraftRegistration.objects.filter(
registration_schema=get_campaign_schema(campaign),
approval=None,
registered_node=None,
deleted__isnull=True,
branched_from__in=Node.objects.filter(
is_deleted=False,
contributor__admin=True,
contributor__user=user).values_list('id', flat=True)
)
else:
return DraftRegistration.objects.filter(
approval=None,
registered_node=None,
deleted__isnull=True,
branched_from__in=Node.objects.filter(
is_deleted=False,
contributor__admin=True,
contributor__user=user).values_list('id', flat=True)
)
| apache-2.0 |
rahul67/hue | desktop/core/ext-py/Django-1.6.10/django/conf/locale/de_CH/formats.py | 118 | 1448 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
from __future__ import unicode_literals
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j. F Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
)
# these are the separators for non-monetary numbers. For monetary numbers,
# the DECIMAL_SEPARATOR is a . (decimal point) and the THOUSAND_SEPARATOR is a
# ' (single quote).
# For details, please refer to http://www.bk.admin.ch/dokumentation/sprachen/04915/05016/index.html?lang=de
# (in German) and the documentation
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| apache-2.0 |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Twisted-15.2.1/twisted/internet/test/test_udp.py | 11 | 15641 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for implementations of L{IReactorUDP} and the UDP parts of
L{IReactorSocket}.
"""
from __future__ import division, absolute_import
__metaclass__ = type
import socket
from zope.interface import implementer
from zope.interface.verify import verifyObject
from twisted.python import context
from twisted.python.log import ILogContext, err
from twisted.internet.test.reactormixins import ReactorBuilder
from twisted.internet.defer import Deferred, maybeDeferred
from twisted.internet.interfaces import (
ILoggingContext, IListeningPort, IReactorUDP, IReactorSocket)
from twisted.internet.address import IPv4Address, IPv6Address
from twisted.internet.protocol import DatagramProtocol
from twisted.internet.test.connectionmixins import (LogObserverMixin,
findFreePort)
from twisted.internet import defer, error
from twisted.test.test_udp import Server, GoodClient
from twisted.trial.unittest import SkipTest
class DatagramTransportTestsMixin(LogObserverMixin):
"""
Mixin defining tests which apply to any port/datagram based transport.
"""
def test_startedListeningLogMessage(self):
"""
When a port starts, a message including a description of the associated
protocol is logged.
"""
loggedMessages = self.observe()
reactor = self.buildReactor()
@implementer(ILoggingContext)
class SomeProtocol(DatagramProtocol):
def logPrefix(self):
return "Crazy Protocol"
protocol = SomeProtocol()
p = self.getListeningPort(reactor, protocol)
expectedMessage = "Crazy Protocol starting on %d" % (p.getHost().port,)
self.assertEqual((expectedMessage,), loggedMessages[0]['message'])
def test_connectionLostLogMessage(self):
"""
When a connection is lost a message is logged containing an
address identifying the port and the fact that it was closed.
"""
loggedMessages = self.observe()
reactor = self.buildReactor()
p = self.getListeningPort(reactor, DatagramProtocol())
expectedMessage = "(UDP Port %s Closed)" % (p.getHost().port,)
def stopReactor(ignored):
reactor.stop()
def doStopListening():
del loggedMessages[:]
maybeDeferred(p.stopListening).addCallback(stopReactor)
reactor.callWhenRunning(doStopListening)
self.runReactor(reactor)
self.assertEqual((expectedMessage,), loggedMessages[0]['message'])
def test_stopProtocolScheduling(self):
"""
L{DatagramProtocol.stopProtocol} is called asynchronously (ie, not
re-entrantly) when C{stopListening} is used to stop the datagram
transport.
"""
class DisconnectingProtocol(DatagramProtocol):
started = False
stopped = False
inStartProtocol = False
stoppedInStart = False
def startProtocol(self):
self.started = True
self.inStartProtocol = True
self.transport.stopListening()
self.inStartProtocol = False
def stopProtocol(self):
self.stopped = True
self.stoppedInStart = self.inStartProtocol
reactor.stop()
reactor = self.buildReactor()
protocol = DisconnectingProtocol()
self.getListeningPort(reactor, protocol)
self.runReactor(reactor)
self.assertTrue(protocol.started)
self.assertTrue(protocol.stopped)
self.assertFalse(protocol.stoppedInStart)
class UDPPortTestsMixin(object):
"""
Tests for L{IReactorUDP.listenUDP} and
L{IReactorSocket.adoptDatagramPort}.
"""
def test_interface(self):
"""
L{IReactorUDP.listenUDP} returns an object providing L{IListeningPort}.
"""
reactor = self.buildReactor()
port = self.getListeningPort(reactor, DatagramProtocol())
self.assertTrue(verifyObject(IListeningPort, port))
def test_getHost(self):
"""
L{IListeningPort.getHost} returns an L{IPv4Address} giving a
dotted-quad of the IPv4 address the port is listening on as well as
the port number.
"""
host, portNumber = findFreePort(type=socket.SOCK_DGRAM)
reactor = self.buildReactor()
port = self.getListeningPort(
reactor, DatagramProtocol(), port=portNumber, interface=host)
self.assertEqual(
port.getHost(), IPv4Address('UDP', host, portNumber))
def test_getHostIPv6(self):
"""
L{IListeningPort.getHost} returns an L{IPv6Address} when listening on
an IPv6 interface.
"""
reactor = self.buildReactor()
port = self.getListeningPort(
reactor, DatagramProtocol(), interface='::1')
addr = port.getHost()
self.assertEqual(addr.host, "::1")
self.assertIsInstance(addr, IPv6Address)
def test_invalidInterface(self):
"""
An L{InvalidAddressError} is raised when trying to listen on an address
that isn't a valid IPv4 or IPv6 address.
"""
reactor = self.buildReactor()
self.assertRaises(
error.InvalidAddressError, reactor.listenUDP, DatagramProtocol(),
0, interface='example.com')
def test_logPrefix(self):
"""
Datagram transports implement L{ILoggingContext.logPrefix} to return a
message reflecting the protocol they are running.
"""
class CustomLogPrefixDatagramProtocol(DatagramProtocol):
def __init__(self, prefix):
self._prefix = prefix
self.system = Deferred()
def logPrefix(self):
return self._prefix
def datagramReceived(self, bytes, addr):
if self.system is not None:
system = self.system
self.system = None
system.callback(context.get(ILogContext)["system"])
reactor = self.buildReactor()
protocol = CustomLogPrefixDatagramProtocol("Custom Datagrams")
d = protocol.system
port = self.getListeningPort(reactor, protocol)
address = port.getHost()
def gotSystem(system):
self.assertEqual("Custom Datagrams (UDP)", system)
d.addCallback(gotSystem)
d.addErrback(err)
d.addCallback(lambda ignored: reactor.stop())
port.write(b"some bytes", ('127.0.0.1', address.port))
self.runReactor(reactor)
def test_str(self):
"""
C{str()} on the listening port object includes the port number.
"""
reactor = self.buildReactor()
port = self.getListeningPort(reactor, DatagramProtocol())
self.assertIn(str(port.getHost().port), str(port))
def test_repr(self):
"""
C{repr()} on the listening port object includes the port number.
"""
reactor = self.buildReactor()
port = self.getListeningPort(reactor, DatagramProtocol())
self.assertIn(repr(port.getHost().port), str(port))
def test_writeToIPv6Interface(self):
"""
Writing to an IPv6 UDP socket on the loopback interface succeeds.
"""
reactor = self.buildReactor()
server = Server()
serverStarted = server.startedDeferred = defer.Deferred()
self.getListeningPort(reactor, server, interface="::1")
client = GoodClient()
clientStarted = client.startedDeferred = defer.Deferred()
self.getListeningPort(reactor, client, interface="::1")
cAddr = client.transport.getHost()
def cbClientStarted(ignored):
"""
Send a datagram from the client once it's started.
@param ignored: a list of C{[None, None]}, which is ignored
@returns: a deferred which fires when the server has received a
datagram.
"""
client.transport.write(
b"spam", ("::1", server.transport.getHost().port))
serverReceived = server.packetReceived = defer.Deferred()
return serverReceived
def cbServerReceived(ignored):
"""
Stop the reactor after a datagram is received.
@param ignored: C{None}, which is ignored
@returns: C{None}
"""
reactor.stop()
d = defer.gatherResults([serverStarted, clientStarted])
d.addCallback(cbClientStarted)
d.addCallback(cbServerReceived)
d.addErrback(err)
self.runReactor(reactor)
packet = server.packets[0]
self.assertEqual(packet, (b'spam', (cAddr.host, cAddr.port)))
def test_connectedWriteToIPv6Interface(self):
"""
An IPv6 address can be passed as the C{interface} argument to
L{listenUDP}. The resulting Port accepts IPv6 datagrams.
"""
reactor = self.buildReactor()
server = Server()
serverStarted = server.startedDeferred = defer.Deferred()
self.getListeningPort(reactor, server, interface="::1")
client = GoodClient()
clientStarted = client.startedDeferred = defer.Deferred()
self.getListeningPort(reactor, client, interface="::1")
cAddr = client.transport.getHost()
def cbClientStarted(ignored):
"""
Send a datagram from the client once it's started.
@param ignored: a list of C{[None, None]}, which is ignored
@returns: a deferred which fires when the server has received a
datagram.
"""
client.transport.connect("::1", server.transport.getHost().port)
client.transport.write(b"spam")
serverReceived = server.packetReceived = defer.Deferred()
return serverReceived
def cbServerReceived(ignored):
"""
Stop the reactor after a datagram is received.
@param ignored: C{None}, which is ignored
@returns: C{None}
"""
reactor.stop()
d = defer.gatherResults([serverStarted, clientStarted])
d.addCallback(cbClientStarted)
d.addCallback(cbServerReceived)
d.addErrback(err)
self.runReactor(reactor)
packet = server.packets[0]
self.assertEqual(packet, (b'spam', (cAddr.host, cAddr.port)))
def test_writingToHostnameRaisesInvalidAddressError(self):
"""
Writing to a hostname instead of an IP address will raise an
L{InvalidAddressError}.
"""
reactor = self.buildReactor()
port = self.getListeningPort(reactor, DatagramProtocol())
self.assertRaises(
error.InvalidAddressError,
port.write, 'spam', ('example.invalid', 1))
def test_writingToIPv6OnIPv4RaisesInvalidAddressError(self):
"""
Writing to an IPv6 address on an IPv4 socket will raise an
L{InvalidAddressError}.
"""
reactor = self.buildReactor()
port = self.getListeningPort(
reactor, DatagramProtocol(), interface="127.0.0.1")
self.assertRaises(
error.InvalidAddressError, port.write, 'spam', ('::1', 1))
def test_writingToIPv4OnIPv6RaisesInvalidAddressError(self):
"""
Writing to an IPv6 address on an IPv4 socket will raise an
L{InvalidAddressError}.
"""
reactor = self.buildReactor()
port = self.getListeningPort(
reactor, DatagramProtocol(), interface="::1")
self.assertRaises(
error.InvalidAddressError, port.write, 'spam', ('127.0.0.1', 1))
def test_connectingToHostnameRaisesInvalidAddressError(self):
"""
Connecting to a hostname instead of an IP address will raise an
L{InvalidAddressError}.
"""
reactor = self.buildReactor()
port = self.getListeningPort(reactor, DatagramProtocol())
self.assertRaises(
error.InvalidAddressError, port.connect, 'example.invalid', 1)
def test_allowBroadcast(self):
"""
L{IListeningPort.setBroadcastAllowed} sets broadcast to be allowed
on the socket.
"""
reactor = self.buildReactor()
port = self.getListeningPort(reactor, DatagramProtocol())
port.setBroadcastAllowed(True)
self.assertTrue(port.getBroadcastAllowed())
class UDPServerTestsBuilder(ReactorBuilder,
UDPPortTestsMixin, DatagramTransportTestsMixin):
"""
Run L{UDPPortTestsMixin} tests using newly created UDP
sockets.
"""
requiredInterfaces = (IReactorUDP,)
def getListeningPort(self, reactor, protocol, port=0, interface='',
maxPacketSize=8192):
"""
Get a UDP port from a reactor.
@param reactor: A reactor used to build the returned
L{IListeningPort} provider.
@type reactor: L{twisted.internet.interfaces.IReactorUDP}
@see: L{twisted.internet.IReactorUDP.listenUDP} for other
argument and return types.
"""
return reactor.listenUDP(port, protocol, interface=interface,
maxPacketSize=maxPacketSize)
class UDPFDServerTestsBuilder(ReactorBuilder,
UDPPortTestsMixin, DatagramTransportTestsMixin):
"""
Run L{UDPPortTestsMixin} tests using adopted UDP sockets.
"""
requiredInterfaces = (IReactorSocket,)
def getListeningPort(self, reactor, protocol, port=0, interface='',
maxPacketSize=8192):
"""
Get a UDP port from a reactor, wrapping an already-initialized file
descriptor.
@param reactor: A reactor used to build the returned
L{IListeningPort} provider.
@type reactor: L{twisted.internet.interfaces.IReactorSocket}
@param port: A port number to which the adopted socket will be
bound.
@type port: C{int}
@param interface: The local IPv4 or IPv6 address to which the
adopted socket will be bound. defaults to '', ie all IPv4
addresses.
@type interface: C{str}
@see: L{twisted.internet.IReactorSocket.adoptDatagramPort} for other
argument and return types.
"""
if IReactorSocket.providedBy(reactor):
if ':' in interface:
domain = socket.AF_INET6
address = socket.getaddrinfo(interface, port)[0][4]
else:
domain = socket.AF_INET
address = (interface, port)
portSock = socket.socket(domain, socket.SOCK_DGRAM)
portSock.bind(address)
portSock.setblocking(False)
try:
return reactor.adoptDatagramPort(
portSock.fileno(), portSock.family, protocol,
maxPacketSize)
finally:
# The socket should still be open; fileno will raise if it is
# not.
portSock.fileno()
# Now clean it up, because the rest of the test does not need
# it.
portSock.close()
else:
raise SkipTest("Reactor does not provide IReactorSocket")
globals().update(UDPServerTestsBuilder.makeTestCaseClasses())
globals().update(UDPFDServerTestsBuilder.makeTestCaseClasses())
| mit |
glwu/python-for-android | python3-alpha/python3-src/Lib/test/test_contextlib.py | 55 | 10288 | """Unit tests for contextlib.py, and other context managers."""
import sys
import tempfile
import unittest
from contextlib import * # Tests __all__
from test import support
try:
import threading
except ImportError:
threading = None
class ContextManagerTestCase(unittest.TestCase):
def test_contextmanager_plain(self):
state = []
@contextmanager
def woohoo():
state.append(1)
yield 42
state.append(999)
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
self.assertEqual(state, [1, 42, 999])
def test_contextmanager_finally(self):
state = []
@contextmanager
def woohoo():
state.append(1)
try:
yield 42
finally:
state.append(999)
with self.assertRaises(ZeroDivisionError):
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
raise ZeroDivisionError()
self.assertEqual(state, [1, 42, 999])
def test_contextmanager_no_reraise(self):
@contextmanager
def whee():
yield
ctx = whee()
ctx.__enter__()
# Calling __exit__ should not result in an exception
self.assertFalse(ctx.__exit__(TypeError, TypeError("foo"), None))
def test_contextmanager_trap_yield_after_throw(self):
@contextmanager
def whoo():
try:
yield
except:
yield
ctx = whoo()
ctx.__enter__()
self.assertRaises(
RuntimeError, ctx.__exit__, TypeError, TypeError("foo"), None
)
def test_contextmanager_except(self):
state = []
@contextmanager
def woohoo():
state.append(1)
try:
yield 42
except ZeroDivisionError as e:
state.append(e.args[0])
self.assertEqual(state, [1, 42, 999])
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
raise ZeroDivisionError(999)
self.assertEqual(state, [1, 42, 999])
def _create_contextmanager_attribs(self):
def attribs(**kw):
def decorate(func):
for k,v in kw.items():
setattr(func,k,v)
return func
return decorate
@contextmanager
@attribs(foo='bar')
def baz(spam):
"""Whee!"""
return baz
def test_contextmanager_attribs(self):
baz = self._create_contextmanager_attribs()
self.assertEqual(baz.__name__,'baz')
self.assertEqual(baz.foo, 'bar')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_contextmanager_doc_attrib(self):
baz = self._create_contextmanager_attribs()
self.assertEqual(baz.__doc__, "Whee!")
class ClosingTestCase(unittest.TestCase):
# XXX This needs more work
def test_closing(self):
state = []
class C:
def close(self):
state.append(1)
x = C()
self.assertEqual(state, [])
with closing(x) as y:
self.assertEqual(x, y)
self.assertEqual(state, [1])
def test_closing_error(self):
state = []
class C:
def close(self):
state.append(1)
x = C()
self.assertEqual(state, [])
with self.assertRaises(ZeroDivisionError):
with closing(x) as y:
self.assertEqual(x, y)
1 / 0
self.assertEqual(state, [1])
class FileContextTestCase(unittest.TestCase):
def testWithOpen(self):
tfn = tempfile.mktemp()
try:
f = None
with open(tfn, "w") as f:
self.assertFalse(f.closed)
f.write("Booh\n")
self.assertTrue(f.closed)
f = None
with self.assertRaises(ZeroDivisionError):
with open(tfn, "r") as f:
self.assertFalse(f.closed)
self.assertEqual(f.read(), "Booh\n")
1 / 0
self.assertTrue(f.closed)
finally:
support.unlink(tfn)
@unittest.skipUnless(threading, 'Threading required for this test.')
class LockContextTestCase(unittest.TestCase):
def boilerPlate(self, lock, locked):
self.assertFalse(locked())
with lock:
self.assertTrue(locked())
self.assertFalse(locked())
with self.assertRaises(ZeroDivisionError):
with lock:
self.assertTrue(locked())
1 / 0
self.assertFalse(locked())
def testWithLock(self):
lock = threading.Lock()
self.boilerPlate(lock, lock.locked)
def testWithRLock(self):
lock = threading.RLock()
self.boilerPlate(lock, lock._is_owned)
def testWithCondition(self):
lock = threading.Condition()
def locked():
return lock._is_owned()
self.boilerPlate(lock, locked)
def testWithSemaphore(self):
lock = threading.Semaphore()
def locked():
if lock.acquire(False):
lock.release()
return False
else:
return True
self.boilerPlate(lock, locked)
def testWithBoundedSemaphore(self):
lock = threading.BoundedSemaphore()
def locked():
if lock.acquire(False):
lock.release()
return False
else:
return True
self.boilerPlate(lock, locked)
class mycontext(ContextDecorator):
started = False
exc = None
catch = False
def __enter__(self):
self.started = True
return self
def __exit__(self, *exc):
self.exc = exc
return self.catch
class TestContextDecorator(unittest.TestCase):
def test_contextdecorator(self):
context = mycontext()
with context as result:
self.assertIs(result, context)
self.assertTrue(context.started)
self.assertEqual(context.exc, (None, None, None))
def test_contextdecorator_with_exception(self):
context = mycontext()
with self.assertRaisesRegex(NameError, 'foo'):
with context:
raise NameError('foo')
self.assertIsNotNone(context.exc)
self.assertIs(context.exc[0], NameError)
context = mycontext()
context.catch = True
with context:
raise NameError('foo')
self.assertIsNotNone(context.exc)
self.assertIs(context.exc[0], NameError)
def test_decorator(self):
context = mycontext()
@context
def test():
self.assertIsNone(context.exc)
self.assertTrue(context.started)
test()
self.assertEqual(context.exc, (None, None, None))
def test_decorator_with_exception(self):
context = mycontext()
@context
def test():
self.assertIsNone(context.exc)
self.assertTrue(context.started)
raise NameError('foo')
with self.assertRaisesRegex(NameError, 'foo'):
test()
self.assertIsNotNone(context.exc)
self.assertIs(context.exc[0], NameError)
def test_decorating_method(self):
context = mycontext()
class Test(object):
@context
def method(self, a, b, c=None):
self.a = a
self.b = b
self.c = c
# these tests are for argument passing when used as a decorator
test = Test()
test.method(1, 2)
self.assertEqual(test.a, 1)
self.assertEqual(test.b, 2)
self.assertEqual(test.c, None)
test = Test()
test.method('a', 'b', 'c')
self.assertEqual(test.a, 'a')
self.assertEqual(test.b, 'b')
self.assertEqual(test.c, 'c')
test = Test()
test.method(a=1, b=2)
self.assertEqual(test.a, 1)
self.assertEqual(test.b, 2)
def test_typo_enter(self):
class mycontext(ContextDecorator):
def __unter__(self):
pass
def __exit__(self, *exc):
pass
with self.assertRaises(AttributeError):
with mycontext():
pass
def test_typo_exit(self):
class mycontext(ContextDecorator):
def __enter__(self):
pass
def __uxit__(self, *exc):
pass
with self.assertRaises(AttributeError):
with mycontext():
pass
def test_contextdecorator_as_mixin(self):
class somecontext(object):
started = False
exc = None
def __enter__(self):
self.started = True
return self
def __exit__(self, *exc):
self.exc = exc
class mycontext(somecontext, ContextDecorator):
pass
context = mycontext()
@context
def test():
self.assertIsNone(context.exc)
self.assertTrue(context.started)
test()
self.assertEqual(context.exc, (None, None, None))
def test_contextmanager_as_decorator(self):
@contextmanager
def woohoo(y):
state.append(y)
yield
state.append(999)
state = []
@woohoo(1)
def test(x):
self.assertEqual(state, [1])
state.append(x)
test('something')
self.assertEqual(state, [1, 'something', 999])
# Issue #11647: Ensure the decorated function is 'reusable'
state = []
test('something else')
self.assertEqual(state, [1, 'something else', 999])
# This is needed to make the test actually run under regrtest.py!
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| apache-2.0 |
vongazman/libcloud | libcloud/test/compute/test_deployment.py | 3 | 20466 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more§
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
import sys
import time
import unittest
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import u
from libcloud.utils.py3 import PY3
from libcloud.compute.deployment import MultiStepDeployment, Deployment
from libcloud.compute.deployment import SSHKeyDeployment, ScriptDeployment
from libcloud.compute.deployment import ScriptFileDeployment, FileDeployment
from libcloud.compute.base import Node
from libcloud.compute.types import NodeState, DeploymentError, LibcloudError
from libcloud.compute.ssh import BaseSSHClient
from libcloud.compute.drivers.rackspace import RackspaceFirstGenNodeDriver as Rackspace
from libcloud.test import MockHttp, XML_HEADERS
from libcloud.test.file_fixtures import ComputeFileFixtures
from mock import Mock, patch
from libcloud.test.secrets import RACKSPACE_PARAMS
class MockDeployment(Deployment):
def run(self, node, client):
return node
class MockClient(BaseSSHClient):
def __init__(self, *args, **kwargs):
self.stdout = ''
self.stderr = ''
self.exit_status = 0
def put(self, path, contents, chmod=755, mode='w'):
return contents
def run(self, name):
return self.stdout, self.stderr, self.exit_status
def delete(self, name):
return True
class DeploymentTests(unittest.TestCase):
def setUp(self):
Rackspace.connectionCls.conn_class = RackspaceMockHttp
RackspaceMockHttp.type = None
self.driver = Rackspace(*RACKSPACE_PARAMS)
# normally authentication happens lazily, but we force it here
self.driver.connection._populate_hosts_and_request_paths()
self.driver.features = {'create_node': ['generates_password']}
self.node = Node(id=12345, name='test', state=NodeState.RUNNING,
public_ips=['1.2.3.4'], private_ips=['1.2.3.5'],
driver=Rackspace)
self.node2 = Node(id=123456, name='test', state=NodeState.RUNNING,
public_ips=['1.2.3.4'], private_ips=['1.2.3.5'],
driver=Rackspace)
def test_multi_step_deployment(self):
msd = MultiStepDeployment()
self.assertEqual(len(msd.steps), 0)
msd.add(MockDeployment())
self.assertEqual(len(msd.steps), 1)
self.assertEqual(self.node, msd.run(node=self.node, client=None))
def test_ssh_key_deployment(self):
sshd = SSHKeyDeployment(key='1234')
self.assertEqual(self.node, sshd.run(node=self.node,
client=MockClient(hostname='localhost')))
def test_file_deployment(self):
# use this file (__file__) for obtaining permissions
target = os.path.join('/tmp', os.path.basename(__file__))
fd = FileDeployment(__file__, target)
self.assertEqual(target, fd.target)
self.assertEqual(__file__, fd.source)
self.assertEqual(self.node, fd.run(
node=self.node, client=MockClient(hostname='localhost')))
def test_script_deployment(self):
sd1 = ScriptDeployment(script='foobar', delete=True)
sd2 = ScriptDeployment(script='foobar', delete=False)
sd3 = ScriptDeployment(
script='foobar', delete=False, name='foobarname')
self.assertTrue(sd1.name.find('deployment') != '1')
self.assertEqual(sd3.name, 'foobarname')
self.assertEqual(self.node, sd1.run(node=self.node,
client=MockClient(hostname='localhost')))
self.assertEqual(self.node, sd2.run(node=self.node,
client=MockClient(hostname='localhost')))
def test_script_file_deployment(self):
file_path = os.path.abspath(__file__)
with open(file_path, 'rb') as fp:
content = fp.read()
if PY3:
content = content.decode('utf-8')
sfd1 = ScriptFileDeployment(script_file=file_path)
self.assertEqual(sfd1.script, content)
def test_script_deployment_relative_path(self):
client = Mock()
client.put.return_value = '/home/ubuntu/relative.sh'
client.run.return_value = ('', '', 0)
sd = ScriptDeployment(script='echo "foo"', name='relative.sh')
sd.run(self.node, client)
client.run.assert_called_once_with('/home/ubuntu/relative.sh')
def test_script_deployment_absolute_path(self):
client = Mock()
client.put.return_value = '/home/ubuntu/relative.sh'
client.run.return_value = ('', '', 0)
sd = ScriptDeployment(script='echo "foo"', name='/root/relative.sh')
sd.run(self.node, client)
client.run.assert_called_once_with('/root/relative.sh')
def test_script_deployment_with_arguments(self):
client = Mock()
client.put.return_value = '/home/ubuntu/relative.sh'
client.run.return_value = ('', '', 0)
args = ['arg1', 'arg2', '--option1=test']
sd = ScriptDeployment(script='echo "foo"', args=args,
name='/root/relative.sh')
sd.run(self.node, client)
expected = '/root/relative.sh arg1 arg2 --option1=test'
client.run.assert_called_once_with(expected)
client.reset_mock()
args = []
sd = ScriptDeployment(script='echo "foo"', args=args,
name='/root/relative.sh')
sd.run(self.node, client)
expected = '/root/relative.sh'
client.run.assert_called_once_with(expected)
def test_script_file_deployment_with_arguments(self):
file_path = os.path.abspath(__file__)
client = Mock()
client.put.return_value = '/home/ubuntu/relative.sh'
client.run.return_value = ('', '', 0)
args = ['arg1', 'arg2', '--option1=test', 'option2']
sfd = ScriptFileDeployment(script_file=file_path, args=args,
name='/root/relative.sh')
sfd.run(self.node, client)
expected = '/root/relative.sh arg1 arg2 --option1=test option2'
client.run.assert_called_once_with(expected)
def test_script_deployment_and_sshkey_deployment_argument_types(self):
class FileObject(object):
def __init__(self, name):
self.name = name
def read(self):
return 'bar'
ScriptDeployment(script='foobar')
ScriptDeployment(script=u('foobar'))
ScriptDeployment(script=FileObject('test'))
SSHKeyDeployment(key='foobar')
SSHKeyDeployment(key=u('foobar'))
SSHKeyDeployment(key=FileObject('test'))
try:
ScriptDeployment(script=[])
except TypeError:
pass
else:
self.fail('TypeError was not thrown')
try:
SSHKeyDeployment(key={})
except TypeError:
pass
else:
self.fail('TypeError was not thrown')
def test_wait_until_running_running_instantly(self):
node2, ips = self.driver.wait_until_running(
nodes=[self.node], wait_period=1,
timeout=0.5)[0]
self.assertEqual(self.node.uuid, node2.uuid)
self.assertEqual(['67.23.21.33'], ips)
def test_wait_until_running_without_ip(self):
RackspaceMockHttp.type = 'NO_IP'
try:
node2, ips = self.driver.wait_until_running(
nodes=[self.node], wait_period=1,
timeout=0.5)[0]
except LibcloudError:
e = sys.exc_info()[1]
self.assertTrue(e.value.find('Timed out after 0.5 second') != -1)
else:
self.fail('Exception was not thrown')
def test_wait_until_running_with_only_ipv6(self):
RackspaceMockHttp.type = 'IPV6'
try:
node2, ips = self.driver.wait_until_running(
nodes=[self.node], wait_period=1,
timeout=0.5)[0]
except LibcloudError:
e = sys.exc_info()[1]
self.assertTrue(e.value.find('Timed out after 0.5 second') != -1)
else:
self.fail('Exception was not thrown')
def test_wait_until_running_with_ipv6_ok(self):
RackspaceMockHttp.type = 'IPV6'
node2, ips = self.driver.wait_until_running(
nodes=[self.node], wait_period=1, force_ipv4=False,
timeout=0.5)[0]
self.assertEqual(self.node.uuid, node2.uuid)
self.assertEqual(['2001:DB8::1'], ips)
def test_wait_until_running_running_after_1_second(self):
RackspaceMockHttp.type = '05_SECOND_DELAY'
node2, ips = self.driver.wait_until_running(
nodes=[self.node], wait_period=1,
timeout=0.5)[0]
self.assertEqual(self.node.uuid, node2.uuid)
self.assertEqual(['67.23.21.33'], ips)
def test_wait_until_running_running_after_1_second_private_ips(self):
RackspaceMockHttp.type = '05_SECOND_DELAY'
node2, ips = self.driver.wait_until_running(
nodes=[self.node], wait_period=1,
timeout=0.5, ssh_interface='private_ips')[0]
self.assertEqual(self.node.uuid, node2.uuid)
self.assertEqual(['10.176.168.218'], ips)
def test_wait_until_running_invalid_ssh_interface_argument(self):
try:
self.driver.wait_until_running(nodes=[self.node], wait_period=1,
ssh_interface='invalid')
except ValueError:
pass
else:
self.fail('Exception was not thrown')
def test_wait_until_running_timeout(self):
RackspaceMockHttp.type = 'TIMEOUT'
try:
self.driver.wait_until_running(nodes=[self.node], wait_period=0.1,
timeout=0.5)
except LibcloudError:
e = sys.exc_info()[1]
self.assertTrue(e.value.find('Timed out') != -1)
else:
self.fail('Exception was not thrown')
def test_wait_until_running_running_node_missing_from_list_nodes(self):
RackspaceMockHttp.type = 'MISSING'
try:
self.driver.wait_until_running(nodes=[self.node], wait_period=0.1,
timeout=0.5)
except LibcloudError:
e = sys.exc_info()[1]
self.assertTrue(e.value.find('Timed out after 0.5 second') != -1)
else:
self.fail('Exception was not thrown')
def test_wait_until_running_running_multiple_nodes_have_same_uuid(self):
RackspaceMockHttp.type = 'SAME_UUID'
try:
self.driver.wait_until_running(nodes=[self.node], wait_period=0.1,
timeout=0.5)
except LibcloudError:
e = sys.exc_info()[1]
self.assertTrue(
e.value.find('Unable to match specified uuids') != -1)
else:
self.fail('Exception was not thrown')
def test_wait_until_running_running_wait_for_multiple_nodes(self):
RackspaceMockHttp.type = 'MULTIPLE_NODES'
nodes = self.driver.wait_until_running(
nodes=[self.node, self.node2], wait_period=0.1,
timeout=0.5)
self.assertEqual(self.node.uuid, nodes[0][0].uuid)
self.assertEqual(self.node2.uuid, nodes[1][0].uuid)
self.assertEqual(['67.23.21.33'], nodes[0][1])
self.assertEqual(['67.23.21.34'], nodes[1][1])
def test_ssh_client_connect_success(self):
mock_ssh_client = Mock()
mock_ssh_client.return_value = None
ssh_client = self.driver._ssh_client_connect(
ssh_client=mock_ssh_client,
timeout=0.5)
self.assertEqual(mock_ssh_client, ssh_client)
def test_ssh_client_connect_timeout(self):
mock_ssh_client = Mock()
mock_ssh_client.connect = Mock()
mock_ssh_client.connect.side_effect = IOError('bam')
try:
self.driver._ssh_client_connect(ssh_client=mock_ssh_client,
timeout=0.5)
except LibcloudError:
e = sys.exc_info()[1]
self.assertTrue(e.value.find('Giving up') != -1)
else:
self.fail('Exception was not thrown')
def test_run_deployment_script_success(self):
task = Mock()
ssh_client = Mock()
ssh_client2 = self.driver._run_deployment_script(task=task,
node=self.node,
ssh_client=ssh_client,
max_tries=2)
self.assertTrue(isinstance(ssh_client2, Mock))
def test_run_deployment_script_exception(self):
task = Mock()
task.run = Mock()
task.run.side_effect = Exception('bar')
ssh_client = Mock()
try:
self.driver._run_deployment_script(task=task,
node=self.node,
ssh_client=ssh_client,
max_tries=2)
except LibcloudError:
e = sys.exc_info()[1]
self.assertTrue(e.value.find('Failed after 2 tries') != -1)
else:
self.fail('Exception was not thrown')
@patch('libcloud.compute.base.SSHClient')
@patch('libcloud.compute.ssh')
def test_deploy_node_success(self, mock_ssh_module, _):
self.driver.create_node = Mock()
self.driver.create_node.return_value = self.node
mock_ssh_module.have_paramiko = True
deploy = Mock()
node = self.driver.deploy_node(deploy=deploy)
self.assertEqual(self.node.id, node.id)
@patch('libcloud.compute.base.SSHClient')
@patch('libcloud.compute.ssh')
def test_deploy_node_exception_run_deployment_script(self, mock_ssh_module,
_):
self.driver.create_node = Mock()
self.driver.create_node.return_value = self.node
mock_ssh_module.have_paramiko = True
deploy = Mock()
deploy.run = Mock()
deploy.run.side_effect = Exception('foo')
try:
self.driver.deploy_node(deploy=deploy)
except DeploymentError:
e = sys.exc_info()[1]
self.assertTrue(e.node.id, self.node.id)
else:
self.fail('Exception was not thrown')
@patch('libcloud.compute.base.SSHClient')
@patch('libcloud.compute.ssh')
def test_deploy_node_exception_ssh_client_connect(self, mock_ssh_module,
ssh_client):
self.driver.create_node = Mock()
self.driver.create_node.return_value = self.node
mock_ssh_module.have_paramiko = True
deploy = Mock()
ssh_client.side_effect = IOError('bar')
try:
self.driver.deploy_node(deploy=deploy)
except DeploymentError:
e = sys.exc_info()[1]
self.assertTrue(e.node.id, self.node.id)
else:
self.fail('Exception was not thrown')
@patch('libcloud.compute.ssh')
def test_deploy_node_depoy_node_not_implemented(self, mock_ssh_module):
self.driver.features = {'create_node': []}
mock_ssh_module.have_paramiko = True
try:
self.driver.deploy_node(deploy=Mock())
except NotImplementedError:
pass
else:
self.fail('Exception was not thrown')
self.driver.features = {}
try:
self.driver.deploy_node(deploy=Mock())
except NotImplementedError:
pass
else:
self.fail('Exception was not thrown')
@patch('libcloud.compute.base.SSHClient')
@patch('libcloud.compute.ssh')
def test_deploy_node_password_auth(self, mock_ssh_module, _):
self.driver.features = {'create_node': ['password']}
mock_ssh_module.have_paramiko = True
self.driver.create_node = Mock()
self.driver.create_node.return_value = self.node
node = self.driver.deploy_node(deploy=Mock())
self.assertEqual(self.node.id, node.id)
@patch('libcloud.compute.base.SSHClient')
@patch('libcloud.compute.ssh')
def test_exception_is_thrown_is_paramiko_is_not_available(self,
mock_ssh_module,
_):
self.driver.features = {'create_node': ['password']}
self.driver.create_node = Mock()
self.driver.create_node.return_value = self.node
mock_ssh_module.have_paramiko = False
try:
self.driver.deploy_node(deploy=Mock())
except RuntimeError:
e = sys.exc_info()[1]
self.assertTrue(str(e).find('paramiko is not installed') != -1)
else:
self.fail('Exception was not thrown')
mock_ssh_module.have_paramiko = True
node = self.driver.deploy_node(deploy=Mock())
self.assertEqual(self.node.id, node.id)
class RackspaceMockHttp(MockHttp):
fixtures = ComputeFileFixtures('openstack')
def _v2_0_tokens(self, method, url, body, headers):
body = self.fixtures.load('_v2_0__auth_deployment.json')
headers = {
'content-type': 'application/json'
}
return (httplib.OK, body, headers,
httplib.responses[httplib.OK])
def _v1_0_slug_servers_detail(self, method, url, body, headers):
body = self.fixtures.load(
'v1_slug_servers_detail_deployment_success.xml')
return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK])
def _v1_0_slug_servers_detail_05_SECOND_DELAY(self, method, url, body, headers):
time.sleep(0.5)
body = self.fixtures.load(
'v1_slug_servers_detail_deployment_success.xml')
return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK])
def _v1_0_slug_servers_detail_TIMEOUT(self, method, url, body, headers):
body = self.fixtures.load(
'v1_slug_servers_detail_deployment_pending.xml')
return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK])
def _v1_0_slug_servers_detail_MISSING(self, method, url, body, headers):
body = self.fixtures.load(
'v1_slug_servers_detail_deployment_missing.xml')
return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK])
def _v1_0_slug_servers_detail_SAME_UUID(self, method, url, body, headers):
body = self.fixtures.load(
'v1_slug_servers_detail_deployment_same_uuid.xml')
return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK])
def _v1_0_slug_servers_detail_MULTIPLE_NODES(self, method, url, body, headers):
body = self.fixtures.load(
'v1_slug_servers_detail_deployment_multiple_nodes.xml')
return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK])
def _v1_0_slug_servers_detail_IPV6(self, method, url, body, headers):
body = self.fixtures.load(
'v1_slug_servers_detail_deployment_ipv6.xml')
return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK])
def _v1_0_slug_servers_detail_NO_IP(self, method, url, body, headers):
body = self.fixtures.load(
'v1_slug_servers_detail_deployment_no_ip.xml')
return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 |
anaruse/chainer | chainer/links/connection/deformable_convolution_2d.py | 1 | 5529 | from chainer.functions import deformable_convolution_2d_sampler
from chainer import initializers
from chainer.initializers import constant
from chainer import link
from chainer.links.connection.convolution_2d import Convolution2D
from chainer import variable
class DeformableConvolution2D(link.Chain):
"""Two-dimensional deformable convolutional layer.
This link wraps the
convolution layer for offset prediction and
the :func:`~chainer.functions.deformable_convolution_2d_sampler`
function.
This also holds the filter weights and bias vectors of two
convolution layers as parameters.
Args:
in_channels (int): Number of channels of input arrays. If ``None``,
parameter initialization will be deferred until the first forward
data pass at which time the size will be determined.
channel_multiplier (int): Channel multiplier number. Number of output
arrays equal ``in_channels * channel_multiplier``.
ksize (int or pair of ints): Size of filters (a.k.a. kernels).
``ksize=k`` and ``ksize=(k, k)`` are equivalent.
stride (int or pair of ints): Stride of filter applications.
``stride=s`` and ``stride=(s, s)`` are equivalent.
pad (int or pair of ints): Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p)`` are equivalent.
offset_nobias (bool): If ``True``, then this link does not use the
bias term for the first convolution layer.
offset_initialW (:ref:`initializer <initializer>`): Initializer to
initialize the weight of the first convolution layer.
When it is :class:`numpy.ndarray`, its ``ndim`` should be 4.
offset_initial_bias (:ref:`initializer <initializer>`): Initializer to
initialize the bias of the first convolution layer.
If ``None``, the bias will be initialized to
zero. When it is :class:`numpy.ndarray`, its ``ndim`` should be 1.
deform_nobias (bool): If ``True``, then this link does not use the
bias term for the second convolution layer.
deform_initialW (:ref:`initializer <initializer>`): Initializer to
initialize the weight for the second convolution layer.
When it is :class:`numpy.ndarray`,
its ``ndim`` should be 4.
deform_initial_bias (:ref:`initializer <initializer>`): Initializer to
initialize the bias for the second convolution layer.
If ``None``, the bias will be initialized to
zero. When it is :class:`numpy.ndarray`, its ``ndim`` should be 1.
.. seealso::
See :func:`chainer.functions.deformable_convolution_2d_sampler`.
"""
def __init__(self, in_channels, out_channels, ksize, stride=1, pad=0,
offset_nobias=False, offset_initialW=None,
offset_initial_bias=None,
deform_nobias=False,
deform_initialW=None, deform_initial_bias=None):
super(DeformableConvolution2D, self).__init__()
kh, kw = _pair(ksize)
with self.init_scope():
self.offset_conv = Convolution2D(
in_channels, 2 * kh * kw, ksize, stride, pad,
offset_nobias, offset_initialW, offset_initial_bias)
self.deform_conv = DeformableConvolution2DSampler(
in_channels, out_channels, ksize, stride, pad,
deform_nobias, deform_initialW, deform_initial_bias)
def __call__(self, x):
"""Applies the deformable convolution.
Args:
x (~chainer.Variable): Input image.
Returns:
~chainer.Variable: Output of the deformable convolution.
"""
offset = self.offset_conv(x)
return self.deform_conv(x, offset)
class DeformableConvolution2DSampler(link.Link):
"""Apply a two-dimensional deformable convolution layer using offsets"""
def __init__(self, in_channels, out_channels, ksize, stride=1, pad=0,
nobias=False, initialW=None, initial_bias=None):
super(DeformableConvolution2DSampler, self).__init__()
self.ksize = ksize
self.stride = _pair(stride)
self.pad = _pair(pad)
self.out_channels = out_channels
self.initialW = initialW
if initialW is None:
initialW = constant.Zero()
with self.init_scope():
W_initializer = initializers._get_initializer(initialW)
self.W = variable.Parameter(W_initializer)
if nobias:
self.b = None
else:
if initial_bias is None:
initial_bias = initializers.Constant(0)
bias_initializer = initializers._get_initializer(initial_bias)
self.b = variable.Parameter(bias_initializer)
if in_channels is not None:
self._initialize_params(in_channels)
def _initialize_params(self, in_channels):
kh, kw = _pair(self.ksize)
W_shape = (self.out_channels, in_channels, kh, kw)
self.W.initialize(W_shape)
if self.b is not None:
self.b.initialize(self.out_channels)
def __call__(self, x, offset):
if self.W.data is None:
self._initialize_params(x.shape[1])
return deformable_convolution_2d_sampler(
x, offset, self.W, self.b, self.stride, self.pad)
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
| mit |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/zope.interface-4.1.2/src/zope/interface/interfaces.py | 49 | 43112 | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Interface Package Interfaces
"""
__docformat__ = 'restructuredtext'
from zope.interface.interface import Attribute
from zope.interface.interface import Interface
from zope.interface.declarations import implementer
from zope.interface._compat import _u
_BLANK = _u('')
class IElement(Interface):
"""Objects that have basic documentation and tagged values.
"""
__name__ = Attribute('__name__', 'The object name')
__doc__ = Attribute('__doc__', 'The object doc string')
def getTaggedValue(tag):
"""Returns the value associated with `tag`.
Raise a `KeyError` of the tag isn't set.
"""
def queryTaggedValue(tag, default=None):
"""Returns the value associated with `tag`.
Return the default value of the tag isn't set.
"""
def getTaggedValueTags():
"""Returns a list of all tags."""
def setTaggedValue(tag, value):
"""Associates `value` with `key`."""
class IAttribute(IElement):
"""Attribute descriptors"""
interface = Attribute('interface',
'Stores the interface instance in which the '
'attribute is located.')
class IMethod(IAttribute):
"""Method attributes"""
def getSignatureInfo():
"""Returns the signature information.
This method returns a dictionary with the following keys:
o `positional` - All positional arguments.
o `required` - A list of all required arguments.
o `optional` - A list of all optional arguments.
o `varargs` - The name of the varargs argument.
o `kwargs` - The name of the kwargs argument.
"""
def getSignatureString():
"""Return a signature string suitable for inclusion in documentation.
This method returns the function signature string. For example, if you
have `func(a, b, c=1, d='f')`, then the signature string is `(a, b,
c=1, d='f')`.
"""
class ISpecification(Interface):
"""Object Behavioral specifications"""
def providedBy(object):
"""Test whether the interface is implemented by the object
Return true of the object asserts that it implements the
interface, including asserting that it implements an extended
interface.
"""
def implementedBy(class_):
"""Test whether the interface is implemented by instances of the class
Return true of the class asserts that its instances implement the
interface, including asserting that they implement an extended
interface.
"""
def isOrExtends(other):
"""Test whether the specification is or extends another
"""
def extends(other, strict=True):
"""Test whether a specification extends another
The specification extends other if it has other as a base
interface or if one of it's bases extends other.
If strict is false, then the specification extends itself.
"""
def weakref(callback=None):
"""Return a weakref to the specification
This method is, regrettably, needed to allow weakrefs to be
computed to security-proxied specifications. While the
zope.interface package does not require zope.security or
zope.proxy, it has to be able to coexist with it.
"""
__bases__ = Attribute("""Base specifications
A tuple if specifications from which this specification is
directly derived.
""")
__sro__ = Attribute("""Specification-resolution order
A tuple of the specification and all of it's ancestor
specifications from most specific to least specific.
(This is similar to the method-resolution order for new-style classes.)
""")
__iro__ = Attribute("""Interface-resolution order
A tuple of the of the specification's ancestor interfaces from
most specific to least specific. The specification itself is
included if it is an interface.
(This is similar to the method-resolution order for new-style classes.)
""")
def get(name, default=None):
"""Look up the description for a name
If the named attribute is not defined, the default is
returned.
"""
class IInterface(ISpecification, IElement):
"""Interface objects
Interface objects describe the behavior of an object by containing
useful information about the object. This information includes:
o Prose documentation about the object. In Python terms, this
is called the "doc string" of the interface. In this element,
you describe how the object works in prose language and any
other useful information about the object.
o Descriptions of attributes. Attribute descriptions include
the name of the attribute and prose documentation describing
the attributes usage.
o Descriptions of methods. Method descriptions can include:
- Prose "doc string" documentation about the method and its
usage.
- A description of the methods arguments; how many arguments
are expected, optional arguments and their default values,
the position or arguments in the signature, whether the
method accepts arbitrary arguments and whether the method
accepts arbitrary keyword arguments.
o Optional tagged data. Interface objects (and their attributes and
methods) can have optional, application specific tagged data
associated with them. Examples uses for this are examples,
security assertions, pre/post conditions, and other possible
information you may want to associate with an Interface or its
attributes.
Not all of this information is mandatory. For example, you may
only want the methods of your interface to have prose
documentation and not describe the arguments of the method in
exact detail. Interface objects are flexible and let you give or
take any of these components.
Interfaces are created with the Python class statement using
either Interface.Interface or another interface, as in::
from zope.interface import Interface
class IMyInterface(Interface):
'''Interface documentation'''
def meth(arg1, arg2):
'''Documentation for meth'''
# Note that there is no self argument
class IMySubInterface(IMyInterface):
'''Interface documentation'''
def meth2():
'''Documentation for meth2'''
You use interfaces in two ways:
o You assert that your object implement the interfaces.
There are several ways that you can assert that an object
implements an interface:
1. Call zope.interface.implements in your class definition.
2. Call zope.interfaces.directlyProvides on your object.
3. Call 'zope.interface.classImplements' to assert that instances
of a class implement an interface.
For example::
from zope.interface import classImplements
classImplements(some_class, some_interface)
This approach is useful when it is not an option to modify
the class source. Note that this doesn't affect what the
class itself implements, but only what its instances
implement.
o You query interface meta-data. See the IInterface methods and
attributes for details.
"""
def names(all=False):
"""Get the interface attribute names
Return a sequence of the names of the attributes, including
methods, included in the interface definition.
Normally, only directly defined attributes are included. If
a true positional or keyword argument is given, then
attributes defined by base classes will be included.
"""
def namesAndDescriptions(all=False):
"""Get the interface attribute names and descriptions
Return a sequence of the names and descriptions of the
attributes, including methods, as name-value pairs, included
in the interface definition.
Normally, only directly defined attributes are included. If
a true positional or keyword argument is given, then
attributes defined by base classes will be included.
"""
def __getitem__(name):
"""Get the description for a name
If the named attribute is not defined, a KeyError is raised.
"""
def direct(name):
"""Get the description for the name if it was defined by the interface
If the interface doesn't define the name, returns None.
"""
def validateInvariants(obj, errors=None):
"""Validate invariants
Validate object to defined invariants. If errors is None,
raises first Invalid error; if errors is a list, appends all errors
to list, then raises Invalid with the errors as the first element
of the "args" tuple."""
def __contains__(name):
"""Test whether the name is defined by the interface"""
def __iter__():
"""Return an iterator over the names defined by the interface
The names iterated include all of the names defined by the
interface directly and indirectly by base interfaces.
"""
__module__ = Attribute("""The name of the module defining the interface""")
class IDeclaration(ISpecification):
"""Interface declaration
Declarations are used to express the interfaces implemented by
classes or provided by objects.
"""
def __contains__(interface):
"""Test whether an interface is in the specification
Return true if the given interface is one of the interfaces in
the specification and false otherwise.
"""
def __iter__():
"""Return an iterator for the interfaces in the specification
"""
def flattened():
"""Return an iterator of all included and extended interfaces
An iterator is returned for all interfaces either included in
or extended by interfaces included in the specifications
without duplicates. The interfaces are in "interface
resolution order". The interface resolution order is such that
base interfaces are listed after interfaces that extend them
and, otherwise, interfaces are included in the order that they
were defined in the specification.
"""
def __sub__(interfaces):
"""Create an interface specification with some interfaces excluded
The argument can be an interface or an interface
specifications. The interface or interfaces given in a
specification are subtracted from the interface specification.
Removing an interface that is not in the specification does
not raise an error. Doing so has no effect.
Removing an interface also removes sub-interfaces of the interface.
"""
def __add__(interfaces):
"""Create an interface specification with some interfaces added
The argument can be an interface or an interface
specifications. The interface or interfaces given in a
specification are added to the interface specification.
Adding an interface that is already in the specification does
not raise an error. Doing so has no effect.
"""
def __nonzero__():
"""Return a true value of the interface specification is non-empty
"""
class IInterfaceDeclaration(Interface):
"""Declare and check the interfaces of objects
The functions defined in this interface are used to declare the
interfaces that objects provide and to query the interfaces that have
been declared.
Interfaces can be declared for objects in two ways:
- Interfaces are declared for instances of the object's class
- Interfaces are declared for the object directly.
The interfaces declared for an object are, therefore, the union of
interfaces declared for the object directly and the interfaces
declared for instances of the object's class.
Note that we say that a class implements the interfaces provided
by it's instances. An instance can also provide interfaces
directly. The interfaces provided by an object are the union of
the interfaces provided directly and the interfaces implemented by
the class.
"""
def providedBy(ob):
"""Return the interfaces provided by an object
This is the union of the interfaces directly provided by an
object and interfaces implemented by it's class.
The value returned is an IDeclaration.
"""
def implementedBy(class_):
"""Return the interfaces implemented for a class' instances
The value returned is an IDeclaration.
"""
def classImplements(class_, *interfaces):
"""Declare additional interfaces implemented for instances of a class
The arguments after the class are one or more interfaces or
interface specifications (IDeclaration objects).
The interfaces given (including the interfaces in the
specifications) are added to any interfaces previously
declared.
Consider the following example::
class C(A, B):
...
classImplements(C, I1, I2)
Instances of ``C`` provide ``I1``, ``I2``, and whatever interfaces
instances of ``A`` and ``B`` provide.
"""
def implementer(*interfaces):
"""Create a decorator for declaring interfaces implemented by a facory
A callable is returned that makes an implements declaration on
objects passed to it.
"""
def classImplementsOnly(class_, *interfaces):
"""Declare the only interfaces implemented by instances of a class
The arguments after the class are one or more interfaces or
interface specifications (IDeclaration objects).
The interfaces given (including the interfaces in the
specifications) replace any previous declarations.
Consider the following example::
class C(A, B):
...
classImplements(C, IA, IB. IC)
classImplementsOnly(C. I1, I2)
Instances of ``C`` provide only ``I1``, ``I2``, and regardless of
whatever interfaces instances of ``A`` and ``B`` implement.
"""
def implementer_only(*interfaces):
"""Create a decorator for declaring the only interfaces implemented
A callable is returned that makes an implements declaration on
objects passed to it.
"""
def directlyProvidedBy(object):
"""Return the interfaces directly provided by the given object
The value returned is an IDeclaration.
"""
def directlyProvides(object, *interfaces):
"""Declare interfaces declared directly for an object
The arguments after the object are one or more interfaces or
interface specifications (IDeclaration objects).
The interfaces given (including the interfaces in the
specifications) replace interfaces previously
declared for the object.
Consider the following example::
class C(A, B):
...
ob = C()
directlyProvides(ob, I1, I2)
The object, ``ob`` provides ``I1``, ``I2``, and whatever interfaces
instances have been declared for instances of ``C``.
To remove directly provided interfaces, use ``directlyProvidedBy`` and
subtract the unwanted interfaces. For example::
directlyProvides(ob, directlyProvidedBy(ob)-I2)
removes I2 from the interfaces directly provided by
``ob``. The object, ``ob`` no longer directly provides ``I2``,
although it might still provide ``I2`` if it's class
implements ``I2``.
To add directly provided interfaces, use ``directlyProvidedBy`` and
include additional interfaces. For example::
directlyProvides(ob, directlyProvidedBy(ob), I2)
adds I2 to the interfaces directly provided by ob.
"""
def alsoProvides(object, *interfaces):
"""Declare additional interfaces directly for an object::
alsoProvides(ob, I1)
is equivalent to::
directlyProvides(ob, directlyProvidedBy(ob), I1)
"""
def noLongerProvides(object, interface):
"""Remove an interface from the list of an object's directly
provided interfaces::
noLongerProvides(ob, I1)
is equivalent to::
directlyProvides(ob, directlyProvidedBy(ob)-I1)
with the exception that if ``I1`` is an interface that is
provided by ``ob`` through the class's implementation,
ValueError is raised.
"""
def implements(*interfaces):
"""Declare interfaces implemented by instances of a class
This function is called in a class definition (Python 2.x only).
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
The interfaces given (including the interfaces in the
specifications) are added to any interfaces previously
declared.
Previous declarations include declarations for base classes
unless implementsOnly was used.
This function is provided for convenience. It provides a more
convenient way to call classImplements. For example::
implements(I1)
is equivalent to calling::
classImplements(C, I1)
after the class has been created.
Consider the following example (Python 2.x only)::
class C(A, B):
implements(I1, I2)
Instances of ``C`` implement ``I1``, ``I2``, and whatever interfaces
instances of ``A`` and ``B`` implement.
"""
def implementsOnly(*interfaces):
"""Declare the only interfaces implemented by instances of a class
This function is called in a class definition (Python 2.x only).
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
Previous declarations including declarations for base classes
are overridden.
This function is provided for convenience. It provides a more
convenient way to call classImplementsOnly. For example::
implementsOnly(I1)
is equivalent to calling::
classImplementsOnly(I1)
after the class has been created.
Consider the following example (Python 2.x only)::
class C(A, B):
implementsOnly(I1, I2)
Instances of ``C`` implement ``I1``, ``I2``, regardless of what
instances of ``A`` and ``B`` implement.
"""
def classProvides(*interfaces):
"""Declare interfaces provided directly by a class
This function is called in a class definition.
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
The given interfaces (including the interfaces in the
specifications) are used to create the class's direct-object
interface specification. An error will be raised if the module
class has an direct interface specification. In other words, it is
an error to call this function more than once in a class
definition.
Note that the given interfaces have nothing to do with the
interfaces implemented by instances of the class.
This function is provided for convenience. It provides a more
convenient way to call directlyProvides for a class. For example::
classProvides(I1)
is equivalent to calling::
directlyProvides(theclass, I1)
after the class has been created.
"""
def provider(*interfaces):
"""A class decorator version of classProvides"""
def moduleProvides(*interfaces):
"""Declare interfaces provided by a module
This function is used in a module definition.
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
The given interfaces (including the interfaces in the
specifications) are used to create the module's direct-object
interface specification. An error will be raised if the module
already has an interface specification. In other words, it is
an error to call this function more than once in a module
definition.
This function is provided for convenience. It provides a more
convenient way to call directlyProvides for a module. For example::
moduleImplements(I1)
is equivalent to::
directlyProvides(sys.modules[__name__], I1)
"""
def Declaration(*interfaces):
"""Create an interface specification
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
A new interface specification (IDeclaration) with
the given interfaces is returned.
"""
class IAdapterRegistry(Interface):
"""Provide an interface-based registry for adapters
This registry registers objects that are in some sense "from" a
sequence of specification to an interface and a name.
No specific semantics are assumed for the registered objects,
however, the most common application will be to register factories
that adapt objects providing required specifications to a provided
interface.
"""
def register(required, provided, name, value):
"""Register a value
A value is registered for a *sequence* of required specifications, a
provided interface, and a name.
"""
def registered(required, provided, name=_BLANK):
"""Return the component registered for the given interfaces and name
Unlike the lookup method, this methods won't retrieve
components registered for more specific required interfaces or
less specific provided interfaces.
If no component was registered exactly for the given
interfaces and name, then None is returned.
"""
def lookup(required, provided, name='', default=None):
"""Lookup a value
A value is looked up based on a *sequence* of required
specifications, a provided interface, and a name.
"""
def queryMultiAdapter(objects, provided, name=_BLANK, default=None):
"""Adapt a sequence of objects to a named, provided, interface
"""
def lookup1(required, provided, name=_BLANK, default=None):
"""Lookup a value using a single required interface
A value is looked up based on a single required
specifications, a provided interface, and a name.
"""
def queryAdapter(object, provided, name=_BLANK, default=None):
"""Adapt an object using a registered adapter factory.
"""
def adapter_hook(provided, object, name=_BLANK, default=None):
"""Adapt an object using a registered adapter factory.
"""
def lookupAll(required, provided):
"""Find all adapters from the required to the provided interfaces
An iterable object is returned that provides name-value two-tuples.
"""
def names(required, provided):
"""Return the names for which there are registered objects
"""
def subscribe(required, provided, subscriber, name=_BLANK):
"""Register a subscriber
A subscriber is registered for a *sequence* of required
specifications, a provided interface, and a name.
Multiple subscribers may be registered for the same (or
equivalent) interfaces.
"""
def subscriptions(required, provided, name=_BLANK):
"""Get a sequence of subscribers
Subscribers for a *sequence* of required interfaces, and a provided
interface are returned.
"""
def subscribers(objects, provided, name=_BLANK):
"""Get a sequence of subscription adapters
"""
# begin formerly in zope.component
class ComponentLookupError(LookupError):
"""A component could not be found."""
class Invalid(Exception):
"""A component doesn't satisfy a promise."""
class IObjectEvent(Interface):
"""An event related to an object.
The object that generated this event is not necessarily the object
refered to by location.
"""
object = Attribute("The subject of the event.")
@implementer(IObjectEvent)
class ObjectEvent(object):
def __init__(self, object):
self.object = object
class IComponentLookup(Interface):
"""Component Manager for a Site
This object manages the components registered at a particular site. The
definition of a site is intentionally vague.
"""
adapters = Attribute(
"Adapter Registry to manage all registered adapters.")
utilities = Attribute(
"Adapter Registry to manage all registered utilities.")
def queryAdapter(object, interface, name=_BLANK, default=None):
"""Look for a named adapter to an interface for an object
If a matching adapter cannot be found, returns the default.
"""
def getAdapter(object, interface, name=_BLANK):
"""Look for a named adapter to an interface for an object
If a matching adapter cannot be found, a ComponentLookupError
is raised.
"""
def queryMultiAdapter(objects, interface, name=_BLANK, default=None):
"""Look for a multi-adapter to an interface for multiple objects
If a matching adapter cannot be found, returns the default.
"""
def getMultiAdapter(objects, interface, name=_BLANK):
"""Look for a multi-adapter to an interface for multiple objects
If a matching adapter cannot be found, a ComponentLookupError
is raised.
"""
def getAdapters(objects, provided):
"""Look for all matching adapters to a provided interface for objects
Return an iterable of name-adapter pairs for adapters that
provide the given interface.
"""
def subscribers(objects, provided):
"""Get subscribers
Subscribers are returned that provide the provided interface
and that depend on and are comuted from the sequence of
required objects.
"""
def handle(*objects):
"""Call handlers for the given objects
Handlers registered for the given objects are called.
"""
def queryUtility(interface, name='', default=None):
"""Look up a utility that provides an interface.
If one is not found, returns default.
"""
def getUtilitiesFor(interface):
"""Look up the registered utilities that provide an interface.
Returns an iterable of name-utility pairs.
"""
def getAllUtilitiesRegisteredFor(interface):
"""Return all registered utilities for an interface
This includes overridden utilities.
An iterable of utility instances is returned. No names are
returned.
"""
class IRegistration(Interface):
"""A registration-information object
"""
registry = Attribute("The registry having the registration")
name = Attribute("The registration name")
info = Attribute("""Information about the registration
This is information deemed useful to people browsing the
configuration of a system. It could, for example, include
commentary or information about the source of the configuration.
""")
class IUtilityRegistration(IRegistration):
"""Information about the registration of a utility
"""
factory = Attribute("The factory used to create the utility. Optional.")
component = Attribute("The object registered")
provided = Attribute("The interface provided by the component")
class _IBaseAdapterRegistration(IRegistration):
"""Information about the registration of an adapter
"""
factory = Attribute("The factory used to create adapters")
required = Attribute("""The adapted interfaces
This is a sequence of interfaces adapters by the registered
factory. The factory will be caled with a sequence of objects, as
positional arguments, that provide these interfaces.
""")
provided = Attribute("""The interface provided by the adapters.
This interface is implemented by the factory
""")
class IAdapterRegistration(_IBaseAdapterRegistration):
"""Information about the registration of an adapter
"""
class ISubscriptionAdapterRegistration(_IBaseAdapterRegistration):
"""Information about the registration of a subscription adapter
"""
class IHandlerRegistration(IRegistration):
handler = Attribute("An object called used to handle an event")
required = Attribute("""The handled interfaces
This is a sequence of interfaces handled by the registered
handler. The handler will be caled with a sequence of objects, as
positional arguments, that provide these interfaces.
""")
class IRegistrationEvent(IObjectEvent):
"""An event that involves a registration"""
@implementer(IRegistrationEvent)
class RegistrationEvent(ObjectEvent):
"""There has been a change in a registration
"""
def __repr__(self):
return "%s event:\n%r" % (self.__class__.__name__, self.object)
class IRegistered(IRegistrationEvent):
"""A component or factory was registered
"""
@implementer(IRegistered)
class Registered(RegistrationEvent):
pass
class IUnregistered(IRegistrationEvent):
"""A component or factory was unregistered
"""
@implementer(IUnregistered)
class Unregistered(RegistrationEvent):
"""A component or factory was unregistered
"""
pass
class IComponentRegistry(Interface):
"""Register components
"""
def registerUtility(component=None, provided=None, name=_BLANK,
info=_BLANK, factory=None):
"""Register a utility
factory
Factory for the component to be registerd.
component
The registered component
provided
This is the interface provided by the utility. If the
component provides a single interface, then this
argument is optional and the component-implemented
interface will be used.
name
The utility name.
info
An object that can be converted to a string to provide
information about the registration.
Only one of component and factory can be used.
A Registered event is generated with an IUtilityRegistration.
"""
def unregisterUtility(component=None, provided=None, name=_BLANK,
factory=None):
"""Unregister a utility
A boolean is returned indicating whether the registry was
changed. If the given component is None and there is no
component registered, or if the given component is not
None and is not registered, then the function returns
False, otherwise it returns True.
factory
Factory for the component to be unregisterd.
component
The registered component The given component can be
None, in which case any component registered to provide
the given provided interface with the given name is
unregistered.
provided
This is the interface provided by the utility. If the
component is not None and provides a single interface,
then this argument is optional and the
component-implemented interface will be used.
name
The utility name.
Only one of component and factory can be used.
An UnRegistered event is generated with an IUtilityRegistration.
"""
def registeredUtilities():
"""Return an iterable of IUtilityRegistration instances.
These registrations describe the current utility registrations
in the object.
"""
def registerAdapter(factory, required=None, provided=None, name=_BLANK,
info=_BLANK):
"""Register an adapter factory
Parameters:
factory
The object used to compute the adapter
required
This is a sequence of specifications for objects to be
adapted. If omitted, then the value of the factory's
__component_adapts__ attribute will be used. The
__component_adapts__ attribute is usually attribute is
normally set in class definitions using adapts
function, or for callables using the adapter
decorator. If the factory doesn't have a
__component_adapts__ adapts attribute, then this
argument is required.
provided
This is the interface provided by the adapter and
implemented by the factory. If the factory
implements a single interface, then this argument is
optional and the factory-implemented interface will be
used.
name
The adapter name.
info
An object that can be converted to a string to provide
information about the registration.
A Registered event is generated with an IAdapterRegistration.
"""
def unregisterAdapter(factory=None, required=None,
provided=None, name=_BLANK):
"""Register an adapter factory
A boolean is returned indicating whether the registry was
changed. If the given component is None and there is no
component registered, or if the given component is not
None and is not registered, then the function returns
False, otherwise it returns True.
Parameters:
factory
This is the object used to compute the adapter. The
factory can be None, in which case any factory
registered to implement the given provided interface
for the given required specifications with the given
name is unregistered.
required
This is a sequence of specifications for objects to be
adapted. If the factory is not None and the required
arguments is omitted, then the value of the factory's
__component_adapts__ attribute will be used. The
__component_adapts__ attribute attribute is normally
set in class definitions using adapts function, or for
callables using the adapter decorator. If the factory
is None or doesn't have a __component_adapts__ adapts
attribute, then this argument is required.
provided
This is the interface provided by the adapter and
implemented by the factory. If the factory is not
None and implements a single interface, then this
argument is optional and the factory-implemented
interface will be used.
name
The adapter name.
An Unregistered event is generated with an IAdapterRegistration.
"""
def registeredAdapters():
"""Return an iterable of IAdapterRegistration instances.
These registrations describe the current adapter registrations
in the object.
"""
def registerSubscriptionAdapter(factory, required=None, provides=None,
name=_BLANK, info=''):
"""Register a subscriber factory
Parameters:
factory
The object used to compute the adapter
required
This is a sequence of specifications for objects to be
adapted. If omitted, then the value of the factory's
__component_adapts__ attribute will be used. The
__component_adapts__ attribute is usually attribute is
normally set in class definitions using adapts
function, or for callables using the adapter
decorator. If the factory doesn't have a
__component_adapts__ adapts attribute, then this
argument is required.
provided
This is the interface provided by the adapter and
implemented by the factory. If the factory implements
a single interface, then this argument is optional and
the factory-implemented interface will be used.
name
The adapter name.
Currently, only the empty string is accepted. Other
strings will be accepted in the future when support for
named subscribers is added.
info
An object that can be converted to a string to provide
information about the registration.
A Registered event is generated with an
ISubscriptionAdapterRegistration.
"""
def unregisterSubscriptionAdapter(factory=None, required=None,
provides=None, name=_BLANK):
"""Unregister a subscriber factory.
A boolean is returned indicating whether the registry was
changed. If the given component is None and there is no
component registered, or if the given component is not
None and is not registered, then the function returns
False, otherwise it returns True.
Parameters:
factory
This is the object used to compute the adapter. The
factory can be None, in which case any factories
registered to implement the given provided interface
for the given required specifications with the given
name are unregistered.
required
This is a sequence of specifications for objects to be
adapted. If the factory is not None and the required
arguments is omitted, then the value of the factory's
__component_adapts__ attribute will be used. The
__component_adapts__ attribute attribute is normally
set in class definitions using adapts function, or for
callables using the adapter decorator. If the factory
is None or doesn't have a __component_adapts__ adapts
attribute, then this argument is required.
provided
This is the interface provided by the adapter and
implemented by the factory. If the factory is not
None implements a single interface, then this argument
is optional and the factory-implemented interface will
be used.
name
The adapter name.
Currently, only the empty string is accepted. Other
strings will be accepted in the future when support for
named subscribers is added.
An Unregistered event is generated with an
ISubscriptionAdapterRegistration.
"""
def registeredSubscriptionAdapters():
"""Return an iterable of ISubscriptionAdapterRegistration instances.
These registrations describe the current subscription adapter
registrations in the object.
"""
def registerHandler(handler, required=None, name=_BLANK, info=''):
"""Register a handler.
A handler is a subscriber that doesn't compute an adapter
but performs some function when called.
Parameters:
handler
The object used to handle some event represented by
the objects passed to it.
required
This is a sequence of specifications for objects to be
adapted. If omitted, then the value of the factory's
__component_adapts__ attribute will be used. The
__component_adapts__ attribute is usually attribute is
normally set in class definitions using adapts
function, or for callables using the adapter
decorator. If the factory doesn't have a
__component_adapts__ adapts attribute, then this
argument is required.
name
The handler name.
Currently, only the empty string is accepted. Other
strings will be accepted in the future when support for
named handlers is added.
info
An object that can be converted to a string to provide
information about the registration.
A Registered event is generated with an IHandlerRegistration.
"""
def unregisterHandler(handler=None, required=None, name=_BLANK):
"""Unregister a handler.
A handler is a subscriber that doesn't compute an adapter
but performs some function when called.
A boolean is returned indicating whether the registry was
changed.
Parameters:
handler
This is the object used to handle some event
represented by the objects passed to it. The handler
can be None, in which case any handlers registered for
the given required specifications with the given are
unregistered.
required
This is a sequence of specifications for objects to be
adapted. If omitted, then the value of the factory's
__component_adapts__ attribute will be used. The
__component_adapts__ attribute is usually attribute is
normally set in class definitions using adapts
function, or for callables using the adapter
decorator. If the factory doesn't have a
__component_adapts__ adapts attribute, then this
argument is required.
name
The handler name.
Currently, only the empty string is accepted. Other
strings will be accepted in the future when support for
named handlers is added.
An Unregistered event is generated with an IHandlerRegistration.
"""
def registeredHandlers():
"""Return an iterable of IHandlerRegistration instances.
These registrations describe the current handler registrations
in the object.
"""
class IComponents(IComponentLookup, IComponentRegistry):
"""Component registration and access
"""
# end formerly in zope.component
| mit |
rue89-tech/edx-platform | cms/djangoapps/contentstore/tests/test_orphan.py | 77 | 4598 | """
Test finding orphans via the view and django config
"""
import json
from contentstore.tests.utils import CourseTestCase
from student.models import CourseEnrollment
from contentstore.utils import reverse_course_url
class TestOrphanBase(CourseTestCase):
"""
Base class for Studio tests that require orphaned modules
"""
def setUp(self):
super(TestOrphanBase, self).setUp()
# create chapters and add them to course tree
chapter1 = self.store.create_child(self.user.id, self.course.location, 'chapter', "Chapter1")
self.store.publish(chapter1.location, self.user.id)
chapter2 = self.store.create_child(self.user.id, self.course.location, 'chapter', "Chapter2")
self.store.publish(chapter2.location, self.user.id)
# orphan chapter
orphan_chapter = self.store.create_item(self.user.id, self.course.id, 'chapter', "OrphanChapter")
self.store.publish(orphan_chapter.location, self.user.id)
# create vertical and add it as child to chapter1
vertical1 = self.store.create_child(self.user.id, chapter1.location, 'vertical', "Vertical1")
self.store.publish(vertical1.location, self.user.id)
# create orphan vertical
orphan_vertical = self.store.create_item(self.user.id, self.course.id, 'vertical', "OrphanVert")
self.store.publish(orphan_vertical.location, self.user.id)
# create component and add it to vertical1
html1 = self.store.create_child(self.user.id, vertical1.location, 'html', "Html1")
self.store.publish(html1.location, self.user.id)
# create component and add it as a child to vertical1 and orphan_vertical
multi_parent_html = self.store.create_child(self.user.id, vertical1.location, 'html', "multi_parent_html")
self.store.publish(multi_parent_html.location, self.user.id)
orphan_vertical.children.append(multi_parent_html.location)
self.store.update_item(orphan_vertical, self.user.id)
# create an orphaned html module
orphan_html = self.store.create_item(self.user.id, self.course.id, 'html', "OrphanHtml")
self.store.publish(orphan_html.location, self.user.id)
self.store.create_child(self.user.id, self.course.location, 'static_tab', "staticuno")
self.store.create_child(self.user.id, self.course.location, 'about', "overview")
self.store.create_child(self.user.id, self.course.location, 'course_info', "updates")
class TestOrphan(TestOrphanBase):
"""
Test finding orphans via view and django config
"""
def setUp(self):
super(TestOrphan, self).setUp()
self.orphan_url = reverse_course_url('orphan_handler', self.course.id)
def test_mongo_orphan(self):
"""
Test that old mongo finds the orphans
"""
orphans = json.loads(
self.client.get(
self.orphan_url,
HTTP_ACCEPT='application/json'
).content
)
self.assertEqual(len(orphans), 3, "Wrong # {}".format(orphans))
location = self.course.location.replace(category='chapter', name='OrphanChapter')
self.assertIn(location.to_deprecated_string(), orphans)
location = self.course.location.replace(category='vertical', name='OrphanVert')
self.assertIn(location.to_deprecated_string(), orphans)
location = self.course.location.replace(category='html', name='OrphanHtml')
self.assertIn(location.to_deprecated_string(), orphans)
def test_mongo_orphan_delete(self):
"""
Test that old mongo deletes the orphans
"""
self.client.delete(self.orphan_url)
orphans = json.loads(
self.client.get(self.orphan_url, HTTP_ACCEPT='application/json').content
)
self.assertEqual(len(orphans), 0, "Orphans not deleted {}".format(orphans))
# make sure that any children with one orphan parent and one non-orphan
# parent are not deleted
self.assertTrue(self.store.has_item(self.course.id.make_usage_key('html', "multi_parent_html")))
def test_not_permitted(self):
"""
Test that auth restricts get and delete appropriately
"""
test_user_client, test_user = self.create_non_staff_authed_user_client()
CourseEnrollment.enroll(test_user, self.course.id)
response = test_user_client.get(self.orphan_url)
self.assertEqual(response.status_code, 403)
response = test_user_client.delete(self.orphan_url)
self.assertEqual(response.status_code, 403)
| agpl-3.0 |
agermanidis/Pattern | graph/__init__.py | 1 | 46675 | #### PATTERN | GRAPH #################################################################################
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
######################################################################################################
# This module can benefit greatly from loading psyco.
from math import sqrt, pow
from math import sin, cos, atan2, degrees, radians, pi
from random import random
from heapq import heappush, heappop
from warnings import warn
from codecs import open
# float("inf") doesn't work on windows.
INFINITE = 1e20
# This module is standalone, line(), ellipse() and Text.draw()
# must be either implemented or patched:
def line(x1, y1, x2, y2, stroke=(0,0,0,1), strokewidth=1):
pass
def ellipse(x, y, width, height, fill=(0,0,0,1), stroke=None, strokewidth=1):
pass
class Text:
def __init__(self, string, **kwargs):
self.string = string
self.__dict__.update(kwargs)
def copy(self):
k = self.__dict__.copy()
k.pop("string")
return Text(self.string, **k)
def draw(self):
pass
class Vector(object):
def __init__(self, x=0, y=0):
self.x = x
self.y = y
class Base(object):
pass
#--- NODE --------------------------------------------------------------------------------------------
def _copy(x):
# A color can be represented as a tuple or as a nodebox.graphics.Color object,
# in which case it needs to be copied by invoking Color.copy().
return hasattr(x, "copy") and x.copy() or x
class Node(object):
def __init__(self, id="", radius=5, **kwargs):
""" A node with a unique id in the graph.
Node.id is drawn as a text label, unless optional parameter text=False.
Optional parameters include: fill, stroke, strokewidth, text, font, fontsize, fontweight.
"""
self.graph = None
self.links = Links()
self.id = id
self._x = 0 # Calculated by Graph.layout.update().
self._y = 0 # Calculated by Graph.layout.update().
self.force = Vector(0,0)
self.radius = radius
self.fill = kwargs.get("fill", None)
self.stroke = kwargs.get("stroke", (0,0,0,1))
self.strokewidth = kwargs.get("strokewidth", 1)
self.text = kwargs.get("text", True) and \
Text(unicode(id),
width = 85,
fill = kwargs.pop("text", (0,0,0,1)),
fontsize = kwargs.pop("fontsize", 11), **kwargs) or None
self._weight = None # Calculated by Graph.eigenvector_centrality().
self._centrality = None # Calculated by Graph.betweenness_centrality().
@property
def _distance(self):
# Graph.distance controls the (x,y) spacing between nodes.
return self.graph and float(self.graph.distance) or 1.0
def _get_x(self):
return self._x * self._distance
def _get_y(self):
return self._y * self._distance
def _set_x(self, v):
self._x = v / self._distance
def _set_y(self, v):
self._y = v / self._distance
x = property(_get_x, _set_x)
y = property(_get_y, _set_y)
@property
def edges(self):
return self.graph is not None \
and [e for e in self.graph.edges if self.id in (e.node1, e.node2)] \
or []
@property
def weight(self):
if self.graph and self._weight is None:
self.graph.eigenvector_centrality()
return self._weight
@property
def centrality(self):
if self.graph and self._centrality is None:
self.graph.betweenness_centrality()
return self._centrality
def flatten(self, depth=1, _visited=None):
""" Recursively lists the node and nodes linked to it.
Depth 0 returns a list with the node.
Depth 1 returns a list with the node and all the directly linked nodes.
Depth 2 includes the linked nodes' links, and so on.
"""
_visited = _visited or {}
_visited[self.id] = (self, depth)
if depth >= 1:
for n in self.links:
if n.id not in _visited or _visited[n.id][1] < depth-1:
n.flatten(depth-1, _visited)
return [n for n,d in _visited.values()] # Fast, but not order-preserving.
def draw(self, weighted=False):
""" Draws the node as a circle with the given radius, fill, stroke and strokewidth.
Draws the node centrality as a shadow effect when weighted=True.
Draws the node text label.
Override this method in a subclass for custom drawing.
"""
# Draw the node weight as a shadow (based on node betweenness centrality).
if weighted is not False and self.centrality > (weighted==True and -1 or weighted):
w = self.centrality * 35
ellipse(
self.x,
self.y,
self.radius*2 + w,
self.radius*2 + w, fill=(0,0,0,0.2), stroke=None)
# Draw the node.
ellipse(
self.x,
self.y,
self.radius*2,
self.radius*2, fill=self.fill, stroke=self.stroke, strokewidth=self.strokewidth)
# Draw the node text label.
if self.text:
self.text.draw(
self.x + self.radius,
self.y + self.radius)
def contains(self, x, y):
return abs(self.x - x) < self.radius*2 and \
abs(self.y - y) < self.radius*2
def __repr__(self):
return "%s(id=%s)" % (self.__class__.__name__, repr(self.id))
def __eq__(self, node):
return isinstance(node, Node) and self.id == node.id
def __ne__(self, node):
return not self.__eq__(node)
def copy(self):
""" Returns a shallow copy of the node (i.e. linked nodes are not copied).
"""
n = Node(self.id, self.radius,
text = None,
fill = _copy(self.fill),
stroke = _copy(self.stroke),
strokewidth = self.strokewidth)
if self.text:
n.text = self.text.copy()
n.__class__ = self.__class__
return n
class Links(list):
def __init__(self):
""" A list in which each node has an associated edge.
The edge() method returns the edge for a given node id.
"""
self.edges = dict()
def append(self, node, edge=None):
if node.id not in self.edges:
list.append(self, node)
self.edges[node.id] = edge
def remove(self, node):
list.remove(self, node)
self.edges.pop(node.id, None)
def edge(self, node):
return self.edges.get(isinstance(node, Node) and node.id or node)
#--- EDGE --------------------------------------------------------------------------------------------
coordinates = lambda x, y, d, a: (x + d*cos(radians(a)), y + d*sin(radians(a)))
class Edge(object):
def __init__(self, node1, node2, weight=0.0, length=1.0, type=None, stroke=(0,0,0,1), strokewidth=1):
""" A connection between two nodes.
Its weight indicates the importance (not the cost) of the connection.
Its type is useful in a semantic network (e.g. "is-a", "is-part-of", ...)
"""
self.node1 = node1
self.node2 = node2
self.weight = weight
self.length = length
self.type = type
self.stroke = stroke
self.strokewidth = strokewidth
def draw(self, weighted=False, directed=False):
""" Draws the edge as a line with the given stroke and strokewidth (increased with Edge.weight).
Override this method in a subclass for custom drawing.
"""
w = weighted and self.weight or 0
line(
self.node1.x,
self.node1.y,
self.node2.x,
self.node2.y, stroke=self.stroke, strokewidth=self.strokewidth+w)
if directed:
self.draw_arrow(stroke=self.stroke, strokewidth=self.strokewidth+w)
def draw_arrow(self, **kwargs):
""" Draws the direction of the edge as an arrow on the rim of the receiving node.
"""
x0, y0 = self.node1.x, self.node1.y
x1, y1 = self.node2.x, self.node2.y
# Find the edge's angle based on node1 and node2 position.
a = degrees(atan2(y1-y0, x1-x0))
# The arrow points to node2's rim instead of it's center.
r = self.node2.radius
d = sqrt(pow(x1-x0, 2) + pow(y1-y0, 2))
x01, y01 = coordinates(x0, y0, d-r-1, a)
# Find the two other arrow corners under the given angle.
r = max(kwargs.get("strokewidth", 1) * 3, 6)
dx1, dy1 = coordinates(x01, y01, -r, a-20)
dx2, dy2 = coordinates(x01, y01, -r, a+20)
line(x01, y01, dx1, dy1, **kwargs)
line(x01, y01, dx2, dy2, **kwargs)
line(dx1, dy1, dx2, dy2, **kwargs)
def __repr__(self):
return "%s(id1=%s, id2=%s)" % (self.__class__.__name__, repr(self.node1.id), repr(self.node2.id))
def copy(self, node1, node2):
e = Edge(node1, node2, self.weight, self.length, self.type, _copy(self.stroke), self.strokewidth)
e.__class__ = self.__class__
return e
#--- GRAPH -------------------------------------------------------------------------------------------
def unique(list):
u, b = [], {}
for item in list:
if item not in b: u.append(item); b[item]=1
return u
# Graph layouts:
SPRING = "spring"
# Graph node sort order:
WEIGHT, CENTRALITY = "weight", "centrality"
ALL = "all"
class Graph(dict):
def __init__(self, layout=SPRING, distance=10.0):
""" A network of nodes connected by edges that can be drawn with a given layout.
"""
self.nodes = []
self.edges = []
self.root = None
self.distance = distance
self.layout = layout==SPRING and GraphSpringLayout(self) or GraphLayout(self)
def append(self, type, *args, **kwargs):
""" Appends a Node or Edge to the graph: Graph.append(Node, id="rabbit").
"""
if type is Node:
return self.add_node(*args, **kwargs)
if type is Edge:
return self.add_edge(*args, **kwargs)
def add_node(self, id, *args, **kwargs):
""" Appends a new Node to the graph.
"""
n = isinstance(id, Node) and id or self.get(id) or Node(id, *args, **kwargs)
if n.id not in self:
self.nodes.append(n)
self[n.id] = n; n.graph = self
self.root = kwargs.get("root", False) and n or self.root
return n
def add_edge(self, id1, id2, *args, **kwargs):
""" Appends a new Edge to the graph.
"""
# Create nodes that are not yet part of the graph.
n1 = self.add_node(id1)
n2 = self.add_node(id2)
# Creates an Edge instance.
# If an edge (in the same direction) already exists, yields that edge instead.
e1 = n1.links.edge(n2)
if e1 and e1.node1 == n1 and e1.node2 == n2:
return e1
e2 = Edge(n1, n2, *args, **kwargs)
self.edges.append(e2)
# Synchronizes Node.links:
# A.links.edge(B) yields edge A->B
# B.links.edge(A) yields edge B->A
n1.links.append(n2, edge=e2)
n2.links.append(n1, edge=e1 or e2)
return e2
def remove(self, x):
""" Removes the given Node (and all its edges) or Edge from the graph.
Note: removing Edge a->b does not remove Edge b->a.
"""
if isinstance(x, Node) and x.id in self:
self.pop(x.id)
self.nodes.remove(x); x.graph = None
# Remove all edges involving the given node.
for e in list(self.edges):
if x in (e.node1, e.node2):
if x in e.node1.links: e.node1.links.remove(x)
if x in e.node2.links: e.node2.links.remove(x)
self.edges.remove(e)
if isinstance(x, Edge):
self.edges.remove(x)
def node(self, id):
""" Returns the node in the graph with the given id.
"""
return self.get(id)
def edge(self, id1, id2):
""" Returns the edge between the nodes with given id1 and id2.
"""
return id1 in self and id2 in self and self[id1].links.edge(id2) or None
def shortest_path(self, node1, node2, heuristic=None, directed=False):
""" Returns a list of nodes connecting the two nodes.
"""
try:
p = dijkstra_shortest_path(self, node1.id, node2.id, heuristic, directed)
p = [self[id] for id in p]
return p
except IndexError:
return None
def eigenvector_centrality(self, normalized=True, reversed=True, rating={}, iterations=100, tolerance=0.0001):
""" Calculates eigenvector centrality and returns a node => weight dictionary.
Node.weight is updated in the process.
Node.weight is higher for nodes with a lot of (indirect) incoming traffic.
"""
ec = eigenvector_centrality(self, normalized, reversed, rating, iterations, tolerance)
ec = dict([(self[id], w) for id, w in ec.items()])
for n, w in ec.items():
n._weight = w
return ec
def betweenness_centrality(self, normalized=True, directed=False):
""" Calculates betweenness centrality and returns a node => weight dictionary.
Node.centrality is updated in the process.
Node.centrality is higher for nodes with a lot of passing traffic.
"""
bc = brandes_betweenness_centrality(self, normalized, directed)
bc = dict([(self[id], w) for id, w in bc.items()])
for n, w in bc.items():
n._centrality = w
return bc
def sorted(self, order=WEIGHT, threshold=0.0):
""" Returns a list of nodes sorted by WEIGHT or CENTRALITY.
Nodes with a lot of traffic will be at the start of the list.
"""
o = lambda node: getattr(node, order)
nodes = [(o(n), n) for n in self.nodes if o(n) > threshold]
nodes = reversed(sorted(nodes))
return [n for w, n in nodes]
def prune(self, depth=0):
""" Removes all nodes with less or equal links than depth.
"""
for n in [n for n in self.nodes if len(n.links) <= depth]:
self.remove(n)
def fringe(self, depth=0):
""" For depth=0, returns the list of leaf nodes (nodes with only one connection).
For depth=1, returns the list of leaf nodes and their connected nodes, and so on.
"""
u = []; [u.extend(n.flatten(depth)) for n in self.nodes if len(n.links) == 1]
return unique(u)
@property
def density(self):
# Number of edges vs. maximum number of possible edges.
# E.g. <0.35 => sparse, >0.65 => dense, 1.0 => complete.
return 2.0*len(self.edges) / (len(self.nodes) * (len(self.nodes)-1))
def split(self):
return partition(self)
def update(self, iterations=10, **kwargs):
""" Graph.layout.update() is called the given number of iterations.
"""
for i in range(iterations):
self.layout.update(**kwargs)
def draw(self, weighted=False, directed=False):
""" Draws all nodes and edges.
"""
for e in self.edges:
e.draw(weighted, directed)
for n in reversed(self.nodes): # New nodes (with Node._weight=None) first.
n.draw(weighted)
def node_at(self, x, y):
""" Returns the node at (x,y) or None.
"""
for n in self.nodes:
if n.contains(x, y): return n
def copy(self, nodes=ALL):
""" Returns a copy of the graph with the given list of nodes (and connecting edges).
The layout will be reset.
"""
g = Graph(layout=None, distance=self.distance)
g.layout = self.layout.copy(graph=g)
for n in (nodes==ALL and self.nodes or nodes):
g.append(n.copy(), root=self.root==n)
for e in self.edges:
if e.node1.id in g and e.node2.id in g:
g.append(e.copy(
node1=g[e.node1.id],
node2=g[e.node2.id]))
return g
#--- GRAPH LAYOUT ------------------------------------------------------------------------------------
# Graph drawing or graph layout, as a branch of graph theory,
# applies topology and geometry to derive two-dimensional representations of graphs.
class GraphLayout:
def __init__(self, graph):
""" Calculates node positions iteratively when GraphLayout.update() is called.
"""
self.graph = graph
self.iterations = 0
def update(self):
self.iterations += 1
def reset(self):
self.iterations = 0
for n in self.graph.nodes:
n._x = 0
n._y = 0
n.force = Vector(0,0)
@property
def bounds(self):
""" Returns a (x, y, width, height)-tuple of the approximate layout dimensions.
"""
x0, y0 = +INFINITE, +INFINITE
x1, y1 = -INFINITE, -INFINITE
for n in self.graph.nodes:
if (n.x < x0): x0 = n.x
if (n.y < y0): y0 = n.y
if (n.x > x1): x1 = n.x
if (n.y > y1): y1 = n.y
return (x0, y0, x1-x0, y1-y0)
def copy(self, graph):
return GraphLayout(self, graph)
class GraphSpringLayout(GraphLayout):
def __init__(self, graph):
""" A force-based layout in which edges are regarded as springs.
The forces are applied to the nodes, pulling them closer or pushing them apart.
"""
# Based on: http://snipplr.com/view/1950/graph-javascript-framework-version-001/
GraphLayout.__init__(self, graph)
self.k = 4.0 # Force constant.
self.force = 0.01 # Force multiplier.
self.repulsion = 15 # Maximum repulsive force radius.
def _distance(self, node1, node2):
# Yields a tuple with distances (dx, dy, d, d**2).
# Ensures that the distance is never zero (which deadlocks the animation).
dx = node2._x - node1._x
dy = node2._y - node1._y
d2 = dx*dx + dy*dy
if d2 < 0.01:
dx = random() * 0.1 + 0.1
dy = random() * 0.1 + 0.1
d2 = dx*dx + dy*dy
return dx, dy, sqrt(d2), d2
def _repulse(self, node1, node2):
# Updates Node.force with the repulsive force.
dx, dy, d, d2 = self._distance(node1, node2)
if d < self.repulsion:
f = self.k**2 / d2
node2.force.x += f * dx
node2.force.y += f * dy
node1.force.x -= f * dx
node1.force.y -= f * dy
def _attract(self, node1, node2, weight=0, length=1.0):
# Updates Node.force with the attractive edge force.
dx, dy, d, d2 = self._distance(node1, node2)
d = min(d, self.repulsion)
f = (d2 - self.k**2) / self.k * length
f *= weight * 0.5 + 1
f /= d
node2.force.x -= f * dx
node2.force.y -= f * dy
node1.force.x += f * dx
node1.force.y += f * dy
def update(self, weight=10.0, limit=0.5):
""" Updates the position of nodes in the graph.
The weight parameter determines the impact of edge weight.
The limit parameter determines the maximum movement each update().
"""
GraphLayout.update(self)
# Forces on all nodes due to node-node repulsions.
for i, n1 in enumerate(self.graph.nodes):
for j, n2 in enumerate(self.graph.nodes[i+1:]):
self._repulse(n1, n2)
# Forces on nodes due to edge attractions.
for e in self.graph.edges:
self._attract(e.node1, e.node2, weight*e.weight, 1.0/(e.length or 0.01))
# Move nodes by given force.
for n in self.graph.nodes:
n._x += max(-limit, min(self.force * n.force.x, limit))
n._y += max(-limit, min(self.force * n.force.y, limit))
n.force.x = 0
n.force.y = 0
def copy(self, graph):
g = GraphSpringLayout(graph)
g.k, g.force, g.repulsion = self.k, self.force, self.repulsion
return g
#--- GRAPH THEORY ------------------------------------------------------------------------------------
def depth_first_search(node, visit=lambda node: False, traversable=lambda node, edge: True, _visited=None):
""" Visits all the nodes connected to the given root node, depth-first.
The visit function is called on each node.
Recursion will stop if it returns True, and subsequently dfs() will return True.
The traversable function takes the current node and edge,
and returns True if we are allowed to follow this connection to the next node.
For example, the traversable for directed edges is follows:
lambda node, edge: node == edge.node1
"""
stop = visit(node)
_visited = _visited or {}
_visited[node.id] = True
for n in node.links:
if stop: return True
if not traversable(node, node.links.edge(n)): continue
if not n.id in _visited:
stop = depth_first_search(n, visit, traversable, _visited)
return stop
dfs = depth_first_search;
def breadth_first_search(node, visit=lambda node: False, traversable=lambda node, edge: True):
""" Visits all the nodes connected to the given root node, breadth-first.
"""
q = [node]
_visited = {}
while q:
node = q.pop(0)
if not node.id in _visited:
if visit(node):
return True
q.extend((n for n in node.links if traversable(node, node.links.edge(n))))
_visited[node.id] = True
return False
bfs = breadth_first_search;
def adjacency(graph, directed=False, reversed=False, stochastic=False, heuristic=None):
""" Returns a dictionary indexed by node id1's,
in which each value is a dictionary of connected node id2's linking to the edge weight.
If directed=True, edges go from id1 to id2, but not the other way.
If stochastic=True, all the weights for the neighbors of a given node sum to 1.
A heuristic function can be given that takes two node id's and returns
an additional cost for movement between the two nodes.
"""
map = {}
for n in graph.nodes:
map[n.id] = {}
for e in graph.edges:
id1, id2 = not reversed and (e.node1.id, e.node2.id) or (e.node2.id, e.node1.id)
map[id1][id2] = 1.0 - 0.5 * e.weight
if heuristic:
map[id1][id2] += heuristic(id1, id2)
if not directed:
map[id2][id1] = map[id1][id2]
if stochastic:
for id1 in map:
n = sum(map[id1].values())
for id2 in map[id1]:
map[id1][id2] /= n
return map
def dijkstra_shortest_path(graph, id1, id2, heuristic=None, directed=False):
""" Dijkstra algorithm for finding shortest paths.
Raises an IndexError between nodes on unconnected graphs.
"""
# Based on: Connelly Barnes, http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/119466
def flatten(list):
# Flattens a linked list of the form [0,[1,[2,[]]]]
while len(list) > 0:
yield list[0]; list=list[1]
G = adjacency(graph, directed=directed, heuristic=heuristic)
q = [(0, id1, ())] # Heap of (cost, path_head, path_rest).
visited = set() # Visited nodes.
while True:
(cost1, n1, path) = heappop(q)
if n1 not in visited:
visited.add(n1)
if n1 == id2:
return list(flatten(path))[::-1] + [n1]
path = (n1, path)
for (n2, cost2) in G[n1].iteritems():
if n2 not in visited:
heappush(q, (cost1 + cost2, n2, path))
def brandes_betweenness_centrality(graph, normalized=True, directed=False):
""" Betweenness centrality for nodes in the graph.
Betweenness centrality is a measure of the number of shortests paths that pass through a node.
Nodes in high-density areas will get a good score.
"""
# Ulrik Brandes, A Faster Algorithm for Betweenness Centrality,
# Journal of Mathematical Sociology 25(2):163-177, 2001,
# http://www.inf.uni-konstanz.de/algo/publications/b-fabc-01.pdf
# Based on: Dijkstra's algorithm for shortest paths modified from Eppstein.
# Based on: NetworkX 1.0.1: Aric Hagberg, Dan Schult and Pieter Swart.
# http://python-networkx.sourcearchive.com/documentation/1.0.1/centrality_8py-source.html
G = graph.keys()
W = adjacency(graph, directed=directed)
betweenness = dict.fromkeys(G, 0.0) # b[v]=0 for v in G
for s in G:
S = []
P = {}
for v in G: P[v] = []
sigma = dict.fromkeys(G, 0) # sigma[v]=0 for v in G
D = {}
sigma[s] = 1
seen = {s: 0}
Q = [] # use Q as heap with (distance, node id) tuples
heappush(Q, (0, s, s))
while Q:
(dist, pred, v) = heappop(Q)
if v in D: continue # already searched this node
sigma[v] = sigma[v] + sigma[pred] # count paths
S.append(v)
D[v] = dist
for w in W[v].keys():
vw_dist = D[v] + W[v][w]
if w not in D and (w not in seen or vw_dist < seen[w]):
seen[w] = vw_dist
heappush(Q, (vw_dist, v, w))
sigma[w] = 0
P[w] = [v]
elif vw_dist == seen[w]: # handle equal paths
sigma[w] = sigma[w] + sigma[v]
P[w].append(v)
delta = dict.fromkeys(G,0)
while S:
w = S.pop()
for v in P[w]:
delta[v] = delta[v] + (float(sigma[v]) / float(sigma[w])) * (1.0 + delta[w])
if w != s:
betweenness[w] = betweenness[w] + delta[w]
if normalized:
# Normalize between 0.0 and 1.0.
m = max(betweenness.values())
if m == 0: m = 1
else:
m = 1
betweenness = dict([(id, w/m) for id, w in betweenness.iteritems()])
return betweenness
def eigenvector_centrality(graph, normalized=True, reversed=True, rating={}, iterations=100, tolerance=0.0001):
""" Eigenvector centrality for nodes in the graph (cfr. Google's PageRank).
Eigenvector centrality is a measure of the importance of a node in a directed network.
It rewards nodes with a high potential of (indirectly) connecting to high-scoring nodes.
Nodes with no incoming connections have a score of zero.
If you want to measure outgoing connections, reversed should be False.
"""
# Based on: NetworkX, Aric Hagberg (hagberg@lanl.gov)
# http://python-networkx.sourcearchive.com/documentation/1.0.1/centrality_8py-source.html
def normalize(vector):
w = 1.0 / (sum(vector.values()) or 1)
for node in vector:
vector[node] *= w
return vector
G = adjacency(graph, directed=True, reversed=reversed)
v = normalize(dict([(n, random()) for n in graph])) # Node ID => weight vector.
# Eigenvector calculation using the power iteration method: y = Ax.
# It has no guarantee of convergence.
for i in range(iterations):
v0 = v
v = dict.fromkeys(v0.keys(), 0)
for n1 in v:
for n2 in G[n1]:
v[n1] += 0.01 + v0[n2] * G[n1][n2] * rating.get(n1, 1)
normalize(v)
e = sum([abs(v[n]-v0[n]) for n in v]) # Check for convergence.
if e < len(G) * tolerance:
if normalized:
# Normalize between 0.0 and 1.0.
m = max(v.values()) or 1
v = dict([(id, w/m) for id, w in v.items()])
return v
warn("node weight is 0 because eigenvector_centrality() did not converge.", Warning)
return dict([(n, 0) for n in G])
# a | b => all elements from a and all the elements from b.
# a & b => elements that appear in a as well as in b.
# a - b => elements that appear in a but not in b.
def union(a, b):
return [x for x in a] + [x for x in b if x not in a]
def intersection(a, b):
return [x for x in a if x in b]
def difference(a, b):
return [x for x in a if x not in b]
def partition(graph):
""" Returns a list of unconnected subgraphs.
"""
# Creates clusters of nodes and directly connected nodes.
# Iteratively merges two clusters if they overlap.
# Optimized: about 2x faster than original implementation.
g = []
for n in graph.nodes:
g.append(dict.fromkeys([n.id for n in n.flatten()], True))
for i in reversed(range(len(g))):
for j in reversed(range(i+1, len(g))):
if g[i] and g[j] and len(intersection(g[i], g[j])) > 0:
g[i] = union(g[i], g[j])
g[j] = []
g = [graph.copy(nodes=[graph[id] for id in n]) for n in g if n]
g.sort(lambda a, b: len(b) - len(a))
return g
#--- GRAPH MAINTENANCE -------------------------------------------------------------------------------
# Utility commands for safe linking and unlinking of nodes,
# with respect for the surrounding nodes.
def unlink(graph, node1, node2=None):
""" Removes the edges between node1 and node2.
If only node1 is given, removes all edges to and from it.
This does not remove node1 from the graph.
"""
for e in list(graph.edges):
if node1 in (e.node1, e.node2) and node2 in (e.node1, e.node2, None):
graph.edges.remove(e)
try:
node1.links.remove(node2)
node2.links.remove(node1)
except: # 'NoneType' object has no attribute 'links'
pass
def redirect(graph, node1, node2):
""" Connects all of node1's edges to node2 and unlinks node1.
"""
for e in graph.edges:
if node in (e.node1, e.node2):
if e.node1 == node1 and e.node2 != node2:
graph.append(e.copy(node2, e.node2))
if e.node2 == node1 and e.node1 != node2:
graph.append(e.copy(e.node1, node2))
unlink(graph, node1)
def cut(graph, node):
""" Unlinks the given node, but keeps edges intact by connecting the surrounding nodes.
If A, B, C, D are nodes and A->B, B->C, B->D, if we then cut B: A->C, A->D.
"""
for e in graph.edges:
if node in (e.node1, e.node2):
for n in node.links:
if e.node1 == node and e.node2 != n:
graph.append(e.copy(n, e.node2))
if e.node2 == node and e.node1 != n:
graph.append(e.copy(e.node1, n))
unlink(graph, node)
def insert(graph, node, a, b):
""" Inserts the given node between node a and node b.
If A, B, C are nodes and A->B, if we then insert C: A->C, C->B.
"""
for e in graph.edges:
for (n1,n2) in ((a,b), (b,a)):
if e.node1 == n1 and e.node2 == n2:
graph.append(e.copy(node, n2))
if e.node1 == n2 and e.node2 == n1:
graph.append(e.copy(n2, node))
unlink(graph, a, b)
#--- HTML CANVAS RENDERER ----------------------------------------------------------------------------
import os, shutil, glob
try:
MODULE = os.path.dirname(__file__)
except:
MODULE = ""
DEFAULT, INLINE = "default", "inline"
HTML, CANVAS, STYLE, SCRIPT, DATA = "html", "canvas", "style", "script", "data"
class HTMLCanvasRenderer:
def __init__(self, graph, **kwargs):
self.graph = graph
self._source = \
"<!DOCTYPE html>\n" \
"<html>\n" \
"<head>\n" \
"\t<title>%s</title>\n" \
"\t<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\" />\n" \
"\t%s\n" \
"\t<!--[if lte IE 8]><script type=\"text/javascript\" src=\"%sexcanvas.js\"></script><![endif]-->\n" \
"\t<script type=\"text/javascript\" src=\"%sgraph.js\"></script>\n" \
"\t%s\n" \
"</head>\n" \
"<body onload=\"javascript:init_%s();\">\n" \
"\t<div id=\"%s\" style=\"width:%spx; height:%spx;\">\n" \
"\t\t<canvas id=\"%s\" width=\"%s\" height=\"%s\">\n" \
"\t\t</canvas>\n" \
"\t</div>\n" \
"\t<p>Generated with " \
"<a href=\"http://www.clips.ua.ac.be/pages/pattern\">Pattern</a>.</p>\n" \
"</body>\n" \
"</html>"
# HTML
self.title = "Graph" # <title>Graph</title>
self.javascript = "js/" # Path to excanvas.js + graph.js.
self.stylesheet = INLINE # Either None, INLINE, DEFAULT (screen.css) or a custom path.
self.id = "graph" # <div id="graph">
self.ctx = "_ctx" # <canvas id="_ctx" width=700 height=500>
self.width = 700 # Canvas width in pixels.
self.height = 500 # Canvas height in pixels.
# Javascript:Graph
self.frames = 500 # Number of frames of animation.
self.fps = 20 # Frames per second.
self.ipf = 2 # Iterations per frame.
self.weighted = False # Indicate betweenness centrality as a shadow?
self.directed = False # Indicate edge direction with an arrow?
self.prune = None # None or int, calls Graph.prune() in Javascript.
self.pack = True # Shortens leaf edges, adds eigenvector weight to node radius.
# Javascript:GraphLayout
self.distance = 10 # Node spacing.
self.k = 4.0 # Force constant.
self.force = 0.01 # Force dampener.
self.repulsion = 50 # Repulsive force radius.
# Data
self.weight = [WEIGHT, CENTRALITY] # Calculate these in Python, or True (in Javascript).
self.href = {} # Dictionary of Node.id => URL.
self.css = {} # Dictionary of Node.id => CSS classname.
# Default options.
# If a Node or Edge has one of these settings,
# it is not passed to Javascript to save bandwidth.
self.default = {
"radius": 5,
"fill": None,
"stroke": (0,0,0,1),
"strokewidth": 1,
"text": (0,0,0,1),
"fontsize": 11,
}
def _escape(self, s):
return s.replace("\"", "\\\"")
def _rgba(self, clr):
# Color or tuple to a CSS "rgba(255,255,255,1.0)" string.
return "\"rgba(%s,%s,%s,%.2f)\"" % (int(clr[0]*255), int(clr[1]*255), int(clr[2]*255), clr[3])
@property
def data(self):
""" Yields a string of Javascript code that loads the nodes and edges into variable g,
which is a Javascript Graph object (see graph.js).
This can be the response to a XMLHttpRequest, after wich you move g into your own variable.
"""
return "".join(self._data())
def _data(self):
if self.graph.nodes and isinstance(self.weight, (list, tuple)):
if WEIGHT in self.weight and self.graph.nodes[-1]._weight is None:
self.graph.eigenvector_centrality()
if CENTRALITY in self.weight and self.graph.nodes[-1]._centrality is None:
self.graph.betweenness_centrality()
s = []
s.append("var g = new Graph(document.getElementById(\"%s\"), %s);\n" % (self.ctx, self.distance))
s.append("var n = {")
if len(self.graph.nodes) > 0:
s.append("\n")
# Translate node properties to Javascript dictionary (var n).
for n in self.graph.nodes:
p = []
if n._x != 0:
p.append("x:%i" % n._x) # 0
if n._y != 0:
p.append("y:%i" % n._y) # 0
if n.radius != self.default["radius"]:
p.append("radius:%.1f" % n.radius) # 5.0
if n._weight is not None:
p.append("weight:%.2f" % n.weight) # 0.00
if n._centrality is not None:
p.append("centrality:%.2f" % n.centrality) # 0.00
if n.fill != self.default["fill"]:
p.append("fill:%s" % self._rgba(n.fill)) # [0,0,0,1.0]
if n.stroke != self.default["stroke"]:
p.append("stroke:%s" % self._rgba(n.stroke)) # [0,0,0,1.0]
if n.strokewidth != self.default["strokewidth"]:
p.append("strokewidth:%.1f" % n.strokewidth) # 0.5
if n.text and n.text.fill != self.default["text"]:
p.append("text:%s" % self._rgba(n.text.fill)) # [0,0,0,1.0]
if n.text and "font" in n.text.__dict__:
p.append("font:\"%s\"" % n.text.__dict__["font"]) # "sans-serif"
if n.text and n.text.__dict__.get("fontsize", self.default["fontsize"]) != self.default["fontsize"]:
p.append("fontsize:%i" % int(max(1, n.text.fontsize)))
if n.text and "fontweight" in n.text.__dict__: # "bold"
p.append("fontweight:\"%s\"" % n.text.__dict__["fontweight"])
if n.text and n.text.string != n.id:
p.append("label:\"%s\"" % n.text.string)
if n.id in self.href:
p.append("href:\"%s\"" % self.href[n.id])
if n.id in self.css:
p.append("css:\"%s\"" % self.css[n.id])
s.append("\t\"%s\": {%s},\n" % (self._escape(n.id), ", ".join(p)))
s.append("};\n")
s.append("var e = [")
if len(self.graph.edges) > 0:
s.append("\n")
# Translate edge properties to Javascript dictionary (var e).
for e in self.graph.edges:
id1, id2 = self._escape(e.node1.id), self._escape(e.node2.id)
p = []
if e.weight != 0:
p.append("weight:%.2f" % e.weight) # 0.00
if e.length != 1:
p.append("length:%.2f" % e.length) # 1.00
if e.type is not None:
p.append("type:\"%s\"" % self.type) # "is-part-of"
if e.stroke != self.default["stroke"]:
p.append("stroke:%s" % self._rgba(e.stroke)) # [0,0,0,1.0]
if e.strokewidth != self.default["strokewidth"]:
p.append("strokewidth:%.2f" % e.strokewidth) # 0.5
s.append("\t[\"%s\", \"%s\", {%s}],\n" % (id1, id2, ", ".join(p)))
s.append("];\n")
# Append the nodes to graph g.
s.append("for (var id in n) {\n"
"\tg.addNode(id, n[id]);\n"
"}\n")
# Append the edges to graph g.
s.append("for (var i=0; i < e.length; i++) {\n"
"\tvar n1 = g.nodeset[e[i][0]];\n"
"\tvar n2 = g.nodeset[e[i][1]];\n"
"\tg.addEdge(n1, n2, e[i][2]);\n"
"}")
return s
@property
def script(self):
""" Yields a string of Javascript code that loads the nodes and edges into variable g (Graph),
and starts the animation of the visualization by calling g.loop().
"""
return "".join(self._script())
def _script(self):
s = self._data()
s.append("\n")
# Apply node weight to node radius.
if self.pack:
s.append(
"for (var i=0; i < g.nodes.length; i++) {\n"
"\tvar n = g.nodes[i];\n"
"\tn.radius = n.radius + n.radius * n.weight;\n"
"}\n")
# Apply edge length (leaves get shorter edges).
if self.pack:
s.append(
"for (var i=0; i < g.nodes.length; i++) {\n"
"\tvar e = g.nodes[i].edges();\n"
"\tif (e.length == 1) {\n"
"\t\te[0].length *= 0.2;\n"
"\t}\n"
"}\n")
# Apply eigenvector and betweenness centrality.
if self.weight is True:
s.append(
"g.eigenvectorCentrality();\n"
"g.betweennessCentrality();\n")
# Apply pruning.
if self.prune is not None:
s.append(
"g.prune(%s);\n" % self.prune)
# Include the layout settings (for clarity).
s.append("g.layout.k = %s; // Force constant (= edge length).\n"
"g.layout.force = %s; // Repulsive strength.\n"
"g.layout.repulsion = %s; // Repulsive radius.\n" % (
self.k, self.force, self.repulsion))
# Start the graph animation loop.
s.append("// Start the animation loop.\n")
s.append("g.loop({frames:%s, fps:%s, ipf:%s, weighted:%s, directed:%s});" % (
int(self.frames),
int(self.fps),
int(self.ipf),
str(self.weighted).lower(),
str(self.directed).lower()))
return s
@property
def canvas(self):
""" Yields a string of HTML with a <div id="graph"> containing a HTML5 <canvas> element.
"""
s = [
"<div id=\"%s\" style=\"width:%spx; height:%spx;\">\n" % (self.id, self.width, self.height),
"\t<canvas id=\"%s\" width=\"%s\" height=\"%s\">\n" % (self.ctx, self.width, self.height),
"\t</canvas>\n",
"</div>"
]
#s.append("\n<script type=\"text/javascript\">\n")
#s.append("".join(self._script()).replace("\n", "\n\t"))
#s.append("\n</script>")
return "".join(s)
@property
def style(self):
""" Yields a string of CSS for <div id="graph">.
"""
return \
"body { font: 11px sans-serif; }\n" \
"a { color: dodgerblue; }\n" \
"#%s {\n" \
"\tdisplay: block;\n" \
"\tposition: relative;\n" \
"\toverflow: hidden;\n" \
"\tborder: 1px solid #ccc;\n" \
"}\n" \
"#%s canvas { }\n" \
".node-label { font-size: 11px; }" % (self.id, self.id)
@property
def html(self):
""" Yields a string of HTML to visualize the graph using a force-based spring layout.
The js parameter sets the path to graph.js and excanvas.js (by default, "./").
"""
js = self.javascript.rstrip("/")
js = (js and js or ".")+"/"
if self.stylesheet == INLINE:
css = self.style.replace("\n","\n\t\t").rstrip("\t")
css = "<style type=\"text/css\">\n\t\t%s\n\t</style>" % css
elif self.stylesheet == DEFAULT:
css = "<link rel=\"stylesheet\" href=\"screen.css\" type=\"text/css\" media=\"screen\" />"
elif self.stylesheet is not None:
css = "<link rel=\"stylesheet\" href=\"%s\" type=\"text/css\" media=\"screen\" />" % self.stylesheet
s = self._script()
s = "".join(s)
s = s.replace("\n", "\n\t\t")
s = "<script type=\"text/javascript\">\n\tfunction init_%s() {\n\t\t%s\n\t}\n\t</script>" % (self.id, s)
s = s.rstrip()
s = self._source % (
self.title,
css,
js,
js,
s,
self.id,
self.id,
self.width,
self.height,
self.ctx,
self.width,
self.height)
return s
def render(self, type=HTML):
if type == HTML:
return self.html
if type == CANVAS:
return self.canvas
if type == STYLE:
return self.style
if type == SCRIPT:
return self.script
if type == DATA:
return self.data
def export(self, path, overwrite=False, encoding="utf-8"):
""" Generates a folder at the given path containing an index.html
that visualizes the graph using the HTML5 <canvas> tag.
"""
if overwrite and os.path.exists(path):
shutil.rmtree(path)
os.mkdir(path) # With overwrite=False, raises OSError if the path already exists.
os.mkdir(os.path.join(path, "js"))
# Copy js/graph.js + js/excanvas.js (unless a custom path is given.)
if self.javascript == "js/":
for f in glob.glob(os.path.join(MODULE, "js", "*.js")):
shutil.copy(f, os.path.join(path, "js", os.path.basename(f)))
# Create screen.css.
if self.stylesheet == DEFAULT:
f = open(os.path.join(path, "screen.css"), "w")
f.write(self.style)
f.close()
# Create index.html.
f = open(os.path.join(path, "index.html"), "w", encoding=encoding)
f.write(self.html)
f.close()
def render(graph, type=HTML, **kwargs):
renderer = HTMLCanvasRenderer(graph)
renderer.default.update(kwargs.get("default", {}))
kwargs["default"] = renderer.default
kwargs["stylesheet"] = kwargs.get("stylesheet", INLINE)
for k,v in kwargs.items():
if k in renderer.__dict__:
renderer.__dict__[k] = v
return renderer.render(type)
def export(graph, path, overwrite=False, encoding="utf-8", **kwargs):
renderer = HTMLCanvasRenderer(graph)
renderer.default.update(kwargs.get("default", {}))
kwargs["default"] = renderer.default
kwargs["stylesheet"] = kwargs.get("stylesheet", DEFAULT)
for k,v in kwargs.items():
if k in renderer.__dict__:
renderer.__dict__[k] = v
return renderer.export(path, overwrite)
| bsd-3-clause |
edlabh/SickRage | lib/github/StatsPunchCard.py | 74 | 2341 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.NamedUser
class StatsPunchCard(github.GithubObject.NonCompletableGithubObject):
"""
This class represents the punch card. The reference can be found here http://developer.github.com/v3/repos/statistics/#get-the-number-of-commits-per-hour-in-each-day
"""
def get(self, day, hour):
"""
Get a specific element
:param day: int
:param hour: int
:rtype: int
"""
return self._dict[(day, hour)]
def _initAttributes(self):
self._dict = {}
def _useAttributes(self, attributes):
for day, hour, commits in attributes:
self._dict[(day, hour)] = commits
| gpl-3.0 |
mj10777/QGIS | tests/src/python/test_qgssearchwidgettoolbutton.py | 45 | 7453 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsSearchWidgetToolButton.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '18/05/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
import qgis # NOQA switch sip api
from qgis.gui import QgsSearchWidgetToolButton, QgsSearchWidgetWrapper
from qgis.testing import start_app, unittest
start_app()
class TestQgsSearchWidgetToolButton(unittest.TestCase):
def testAvailableFlags(self):
"""
Test setting available flags
"""
w = QgsSearchWidgetToolButton()
w.setAvailableFlags(QgsSearchWidgetWrapper.EqualTo |
QgsSearchWidgetWrapper.NotEqualTo |
QgsSearchWidgetWrapper.CaseInsensitive)
flags = w.availableFlags()
self.assertTrue(flags & QgsSearchWidgetWrapper.EqualTo)
self.assertTrue(flags & QgsSearchWidgetWrapper.NotEqualTo)
self.assertTrue(flags & QgsSearchWidgetWrapper.CaseInsensitive)
self.assertFalse(flags & QgsSearchWidgetWrapper.Between)
# setting available flags should update active flags
w.setActiveFlags(QgsSearchWidgetWrapper.NotEqualTo | QgsSearchWidgetWrapper.CaseInsensitive)
w.setAvailableFlags(QgsSearchWidgetWrapper.EqualTo |
QgsSearchWidgetWrapper.NotEqualTo)
flags = w.activeFlags()
self.assertFalse(flags & QgsSearchWidgetWrapper.EqualTo)
self.assertTrue(flags & QgsSearchWidgetWrapper.NotEqualTo)
self.assertFalse(flags & QgsSearchWidgetWrapper.CaseInsensitive)
def testActiveFlags(self):
"""
Test setting/retrieving active flag logic
"""
w = QgsSearchWidgetToolButton()
w.setAvailableFlags(QgsSearchWidgetWrapper.EqualTo |
QgsSearchWidgetWrapper.NotEqualTo |
QgsSearchWidgetWrapper.CaseInsensitive)
w.setActiveFlags(QgsSearchWidgetWrapper.EqualTo)
flags = w.activeFlags()
self.assertTrue(flags & QgsSearchWidgetWrapper.EqualTo)
self.assertFalse(flags & QgsSearchWidgetWrapper.NotEqualTo)
w.setActiveFlags(QgsSearchWidgetWrapper.EqualTo | QgsSearchWidgetWrapper.CaseInsensitive)
flags = w.activeFlags()
self.assertTrue(flags & QgsSearchWidgetWrapper.EqualTo)
self.assertTrue(flags & QgsSearchWidgetWrapper.CaseInsensitive)
# setting a non-available flag as active
w.setAvailableFlags(QgsSearchWidgetWrapper.EqualTo |
QgsSearchWidgetWrapper.NotEqualTo)
w.setActiveFlags(QgsSearchWidgetWrapper.EqualTo | QgsSearchWidgetWrapper.CaseInsensitive)
flags = w.activeFlags()
self.assertTrue(flags & QgsSearchWidgetWrapper.EqualTo)
self.assertFalse(flags & QgsSearchWidgetWrapper.CaseInsensitive)
# setting conflicting flags
w.setActiveFlags(QgsSearchWidgetWrapper.EqualTo | QgsSearchWidgetWrapper.NotEqualTo)
flags = w.activeFlags()
self.assertTrue(flags & QgsSearchWidgetWrapper.EqualTo)
self.assertFalse(flags & QgsSearchWidgetWrapper.NotEqualTo)
def testToggleFlag(self):
""" Test toggling flags """
w = QgsSearchWidgetToolButton()
w.setAvailableFlags(QgsSearchWidgetWrapper.EqualTo |
QgsSearchWidgetWrapper.NotEqualTo |
QgsSearchWidgetWrapper.CaseInsensitive)
w.setActiveFlags(QgsSearchWidgetWrapper.EqualTo)
# should set flag
w.toggleFlag(QgsSearchWidgetWrapper.CaseInsensitive)
flags = w.activeFlags()
self.assertTrue(flags & QgsSearchWidgetWrapper.EqualTo)
self.assertFalse(flags & QgsSearchWidgetWrapper.NotEqualTo)
self.assertTrue(flags & QgsSearchWidgetWrapper.CaseInsensitive)
# should clear flag
w.toggleFlag(QgsSearchWidgetWrapper.CaseInsensitive)
flags = w.activeFlags()
self.assertTrue(flags & QgsSearchWidgetWrapper.EqualTo)
self.assertFalse(flags & QgsSearchWidgetWrapper.NotEqualTo)
self.assertFalse(flags & QgsSearchWidgetWrapper.CaseInsensitive)
# toggling non-available flag should be ignored
w.setAvailableFlags(QgsSearchWidgetWrapper.Between |
QgsSearchWidgetWrapper.NotEqualTo)
w.setActiveFlags(QgsSearchWidgetWrapper.Between)
# should be ignored
w.toggleFlag(QgsSearchWidgetWrapper.CaseInsensitive)
w.toggleFlag(QgsSearchWidgetWrapper.LessThan)
flags = w.activeFlags()
self.assertFalse(flags & QgsSearchWidgetWrapper.CaseInsensitive)
self.assertFalse(flags & QgsSearchWidgetWrapper.LessThan)
self.assertTrue(flags & QgsSearchWidgetWrapper.Between)
# toggling exclusive flag should result in other exclusive flags being cleared
w.setAvailableFlags(QgsSearchWidgetWrapper.Between |
QgsSearchWidgetWrapper.NotEqualTo |
QgsSearchWidgetWrapper.CaseInsensitive)
w.setActiveFlags(QgsSearchWidgetWrapper.Between | QgsSearchWidgetWrapper.CaseInsensitive)
w.toggleFlag(QgsSearchWidgetWrapper.Between)
flags = w.activeFlags()
self.assertTrue(flags & QgsSearchWidgetWrapper.CaseInsensitive)
self.assertFalse(flags & QgsSearchWidgetWrapper.NotEqualTo)
self.assertTrue(flags & QgsSearchWidgetWrapper.Between)
w.toggleFlag(QgsSearchWidgetWrapper.NotEqualTo)
flags = w.activeFlags()
self.assertTrue(flags & QgsSearchWidgetWrapper.CaseInsensitive)
self.assertTrue(flags & QgsSearchWidgetWrapper.NotEqualTo)
self.assertFalse(flags & QgsSearchWidgetWrapper.Between)
def testSetInactive(self):
""" Test setting the search as inactive """
w = QgsSearchWidgetToolButton()
w.setAvailableFlags(QgsSearchWidgetWrapper.EqualTo |
QgsSearchWidgetWrapper.NotEqualTo |
QgsSearchWidgetWrapper.CaseInsensitive)
w.setActiveFlags(QgsSearchWidgetWrapper.EqualTo |
QgsSearchWidgetWrapper.CaseInsensitive)
self.assertTrue(w.isActive())
w.setInactive()
flags = w.activeFlags()
self.assertFalse(flags & QgsSearchWidgetWrapper.EqualTo)
self.assertTrue(flags & QgsSearchWidgetWrapper.CaseInsensitive)
self.assertFalse(w.isActive())
def testSetActive(self):
""" Test setting the search as active should adopt default flags"""
w = QgsSearchWidgetToolButton()
w.setAvailableFlags(QgsSearchWidgetWrapper.Between |
QgsSearchWidgetWrapper.NotEqualTo |
QgsSearchWidgetWrapper.CaseInsensitive)
w.setActiveFlags(QgsSearchWidgetWrapper.CaseInsensitive)
w.setDefaultFlags(QgsSearchWidgetWrapper.NotEqualTo)
self.assertFalse(w.isActive())
w.setActive()
flags = w.activeFlags()
self.assertTrue(flags & QgsSearchWidgetWrapper.NotEqualTo)
self.assertTrue(flags & QgsSearchWidgetWrapper.CaseInsensitive)
self.assertTrue(w.isActive())
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
ephes/scikit-learn | sklearn/__init__.py | 154 | 3014 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.17.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'lda', 'learning_curve',
'linear_model', 'manifold', 'metrics', 'mixture', 'multiclass',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| bsd-3-clause |
manishpatell/erpcustomizationssaiimpex123qwe | addons/hw_posbox_upgrade/__init__.py | 1894 | 1075 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import controllers
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
txemagon/1984 | modules/Telegram-bot-python/build/lib/telegram/vendor/ptb_urllib3/urllib3/response.py | 150 | 22662 | from __future__ import absolute_import
from contextlib import contextmanager
import zlib
import io
import logging
from socket import timeout as SocketTimeout
from socket import error as SocketError
from ._collections import HTTPHeaderDict
from .exceptions import (
BodyNotHttplibCompatible, ProtocolError, DecodeError, ReadTimeoutError,
ResponseNotChunked, IncompleteRead, InvalidHeader
)
from .packages.six import string_types as basestring, binary_type, PY3
from .packages.six.moves import http_client as httplib
from .connection import HTTPException, BaseSSLError
from .util.response import is_fp_closed, is_response_to_head
log = logging.getLogger(__name__)
class DeflateDecoder(object):
def __init__(self):
self._first_try = True
self._data = binary_type()
self._obj = zlib.decompressobj()
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not data:
return data
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
return self._obj.decompress(data)
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None
class GzipDecoder(object):
def __init__(self):
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not data:
return data
return self._obj.decompress(data)
def _get_decoder(mode):
if mode == 'gzip':
return GzipDecoder()
return DeflateDecoder()
class HTTPResponse(io.IOBase):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed. This
class is also compatible with the Python standard library's :mod:`io`
module, and can hence be treated as a readable object in the context of that
framework.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, attempts to decode specific content-encoding's based on headers
(like 'gzip' and 'deflate') will be skipped and raw data will be used
instead.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
:param retries:
The retries contains the last :class:`~urllib3.util.retry.Retry` that
was used during the request.
:param enforce_content_length:
Enforce content length checking. Body returned by server must match
value of Content-Length header, if present. Otherwise, raise error.
"""
CONTENT_DECODERS = ['gzip', 'deflate']
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None,
retries=None, enforce_content_length=False, request_method=None):
if isinstance(headers, HTTPHeaderDict):
self.headers = headers
else:
self.headers = HTTPHeaderDict(headers)
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self.decode_content = decode_content
self.retries = retries
self.enforce_content_length = enforce_content_length
self._decoder = None
self._body = None
self._fp = None
self._original_response = original_response
self._fp_bytes_read = 0
if body and isinstance(body, (basestring, binary_type)):
self._body = body
self._pool = pool
self._connection = connection
if hasattr(body, 'read'):
self._fp = body
# Are we using the chunked-style of transfer encoding?
self.chunked = False
self.chunk_left = None
tr_enc = self.headers.get('transfer-encoding', '').lower()
# Don't incur the penalty of creating a list and then discarding it
encodings = (enc.strip() for enc in tr_enc.split(","))
if "chunked" in encodings:
self.chunked = True
# Determine length of response
self.length_remaining = self._init_length(request_method)
# If requested, preload the body.
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
@property
def connection(self):
return self._connection
def tell(self):
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``HTTPResponse.read`` if bytes
are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def _init_length(self, request_method):
"""
Set initial length value for Response content if available.
"""
length = self.headers.get('content-length')
if length is not None and self.chunked:
# This Response will fail with an IncompleteRead if it can't be
# received as chunked. This method falls back to attempt reading
# the response before raising an exception.
log.warning("Received response with both Content-Length and "
"Transfer-Encoding set. This is expressly forbidden "
"by RFC 7230 sec 3.3.2. Ignoring Content-Length and "
"attempting to process response as Transfer-Encoding: "
"chunked.")
return None
elif length is not None:
try:
# RFC 7230 section 3.3.2 specifies multiple content lengths can
# be sent in a single Content-Length header
# (e.g. Content-Length: 42, 42). This line ensures the values
# are all valid ints and that as long as the `set` length is 1,
# all values are the same. Otherwise, the header is invalid.
lengths = set([int(val) for val in length.split(',')])
if len(lengths) > 1:
raise InvalidHeader("Content-Length contained multiple "
"unmatching values (%s)" % length)
length = lengths.pop()
except ValueError:
length = None
else:
if length < 0:
length = None
# Convert status to int for comparison
# In some cases, httplib returns a status of "_UNKNOWN"
try:
status = int(self.status)
except ValueError:
status = 0
# Check for responses that shouldn't include a body
if status in (204, 304) or 100 <= status < 200 or request_method == 'HEAD':
length = 0
return length
def _init_decoder(self):
"""
Set-up the _decoder attribute if necessary.
"""
# Note: content-encoding value should be case-insensitive, per RFC 7230
# Section 3.2
content_encoding = self.headers.get('content-encoding', '').lower()
if self._decoder is None and content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
def _decode(self, data, decode_content, flush_decoder):
"""
Decode the data passed in and potentially flush the decoder.
"""
try:
if decode_content and self._decoder:
data = self._decoder.decompress(data)
except (IOError, zlib.error) as e:
content_encoding = self.headers.get('content-encoding', '').lower()
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding, e)
if flush_decoder and decode_content:
data += self._flush_decoder()
return data
def _flush_decoder(self):
"""
Flushes the decoder. Should only be called if the decoder is actually
being used.
"""
if self._decoder:
buf = self._decoder.decompress(b'')
return buf + self._decoder.flush()
return b''
@contextmanager
def _error_catcher(self):
"""
Catch low-level python exceptions, instead re-raising urllib3
variants, so that low-level exceptions are not leaked in the
high-level api.
On exit, release the connection back to the pool.
"""
clean_exit = False
try:
try:
yield
except SocketTimeout:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
# there is yet no clean way to get at it from this context.
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except BaseSSLError as e:
# FIXME: Is there a better way to differentiate between SSLErrors?
if 'read operation timed out' not in str(e): # Defensive:
# This shouldn't happen but just in case we're missing an edge
# case, let's avoid swallowing SSL errors.
raise
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except (HTTPException, SocketError) as e:
# This includes IncompleteRead.
raise ProtocolError('Connection broken: %r' % e, e)
# If no exception is thrown, we should avoid cleaning up
# unnecessarily.
clean_exit = True
finally:
# If we didn't terminate cleanly, we need to throw away our
# connection.
if not clean_exit:
# The response may not be closed but we're not going to use it
# anymore so close it now to ensure that the connection is
# released back to the pool.
if self._original_response:
self._original_response.close()
# Closing the response may not actually be sufficient to close
# everything, so if we have a hold of the connection close that
# too.
if self._connection:
self._connection.close()
# If we hold the original response but it's closed now, we should
# return the connection back to the pool.
if self._original_response and self._original_response.isclosed():
self.release_conn()
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
self._init_decoder()
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return
flush_decoder = False
data = None
with self._error_catcher():
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt)
if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do
# not properly close the connection in all cases. There is
# no harm in redundantly calling close.
self._fp.close()
flush_decoder = True
if self.enforce_content_length and self.length_remaining not in (0, None):
# This is an edge case that httplib failed to cover due
# to concerns of backward compatibility. We're
# addressing it here to make sure IncompleteRead is
# raised during streaming, so all calls with incorrect
# Content-Length are caught.
raise IncompleteRead(self._fp_bytes_read, self.length_remaining)
if data:
self._fp_bytes_read += len(data)
if self.length_remaining is not None:
self.length_remaining -= len(data)
data = self._decode(data, decode_content, flush_decoder)
if cache_content:
self._body = data
return data
def stream(self, amt=2**16, decode_content=None):
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
if self.chunked and self.supports_chunked_reads():
for line in self.read_chunked(amt, decode_content=decode_content):
yield line
else:
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
headers = r.msg
if not isinstance(headers, HTTPHeaderDict):
if PY3: # Python 3
headers = HTTPHeaderDict(headers.items())
else: # Python 2
headers = HTTPHeaderDict.from_httplib(headers)
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
resp = ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
return resp
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
# Overrides from io.IOBase
def close(self):
if not self.closed:
self._fp.close()
if self._connection:
self._connection.close()
@property
def closed(self):
if self._fp is None:
return True
elif hasattr(self._fp, 'isclosed'):
return self._fp.isclosed()
elif hasattr(self._fp, 'closed'):
return self._fp.closed
else:
return True
def fileno(self):
if self._fp is None:
raise IOError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise IOError("The file-like object this HTTPResponse is wrapped "
"around has no file descriptor")
def flush(self):
if self._fp is not None and hasattr(self._fp, 'flush'):
return self._fp.flush()
def readable(self):
# This method is required for `io` module compatibility.
return True
def readinto(self, b):
# This method is required for `io` module compatibility.
temp = self.read(len(b))
if len(temp) == 0:
return 0
else:
b[:len(temp)] = temp
return len(temp)
def supports_chunked_reads(self):
"""
Checks if the underlying file-like object looks like a
httplib.HTTPResponse object. We do this by testing for the fp
attribute. If it is present we assume it returns raw chunks as
processed by read_chunked().
"""
return hasattr(self._fp, 'fp')
def _update_chunk_length(self):
# First, we'll figure out length of a chunk and then
# we'll try to read it from socket.
if self.chunk_left is not None:
return
line = self._fp.fp.readline()
line = line.split(b';', 1)[0]
try:
self.chunk_left = int(line, 16)
except ValueError:
# Invalid chunked protocol response, abort.
self.close()
raise httplib.IncompleteRead(line)
def _handle_chunk(self, amt):
returned_chunk = None
if amt is None:
chunk = self._fp._safe_read(self.chunk_left)
returned_chunk = chunk
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
elif amt < self.chunk_left:
value = self._fp._safe_read(amt)
self.chunk_left = self.chunk_left - amt
returned_chunk = value
elif amt == self.chunk_left:
value = self._fp._safe_read(amt)
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
returned_chunk = value
else: # amt > self.chunk_left
returned_chunk = self._fp._safe_read(self.chunk_left)
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
return returned_chunk
def read_chunked(self, amt=None, decode_content=None):
"""
Similar to :meth:`HTTPResponse.read`, but with an additional
parameter: ``decode_content``.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
self._init_decoder()
# FIXME: Rewrite this method and make it a class with a better structured logic.
if not self.chunked:
raise ResponseNotChunked(
"Response is not chunked. "
"Header 'transfer-encoding: chunked' is missing.")
if not self.supports_chunked_reads():
raise BodyNotHttplibCompatible(
"Body should be httplib.HTTPResponse like. "
"It should have have an fp attribute which returns raw chunks.")
# Don't bother reading the body of a HEAD request.
if self._original_response and is_response_to_head(self._original_response):
self._original_response.close()
return
with self._error_catcher():
while True:
self._update_chunk_length()
if self.chunk_left == 0:
break
chunk = self._handle_chunk(amt)
decoded = self._decode(chunk, decode_content=decode_content,
flush_decoder=False)
if decoded:
yield decoded
if decode_content:
# On CPython and PyPy, we should never need to flush the
# decoder. However, on Jython we *might* need to, so
# lets defensively do it anyway.
decoded = self._flush_decoder()
if decoded: # Platform-specific: Jython.
yield decoded
# Chunk content ends with \r\n: discard it.
while True:
line = self._fp.fp.readline()
if not line:
# Some sites may not end with '\r\n'.
break
if line == b'\r\n':
break
# We read everything; close the "file".
if self._original_response:
self._original_response.close()
| gpl-3.0 |
vitan/hue | desktop/core/ext-py/python-ldap-2.3.13/Tests/t_search.py | 40 | 2986 | import ldap, unittest
import slapd
from ldap.ldapobject import LDAPObject
server = None
class TestSearch(unittest.TestCase):
def setUp(self):
global server
if server is None:
server = slapd.Slapd()
server.start()
base = server.get_dn_suffix()
# insert some Foo* objects via ldapadd
server.ldapadd("\n".join([
"dn: cn=Foo1,"+base,
"objectClass: organizationalRole",
"cn: Foo1",
"",
"dn: cn=Foo2,"+base,
"objectClass: organizationalRole",
"cn: Foo2",
"",
"dn: cn=Foo3,"+base,
"objectClass: organizationalRole",
"cn: Foo3",
"",
"dn: ou=Container,"+base,
"objectClass: organizationalUnit",
"ou: Container",
"",
"dn: cn=Foo4,ou=Container,"+base,
"objectClass: organizationalRole",
"cn: Foo4",
"",
])+"\n")
l = LDAPObject(server.get_url())
l.protocol_version = 3
l.set_option(ldap.OPT_REFERRALS,0)
l.simple_bind_s(server.get_root_dn(),
server.get_root_password())
self.ldap = l
self.server = server
def test_search_subtree(self):
base = self.server.get_dn_suffix()
l = self.ldap
result = l.search_s(base, ldap.SCOPE_SUBTREE, '(cn=Foo*)', ['*'])
result.sort()
self.assertEquals(result,
[('cn=Foo1,'+base,
{'cn': ['Foo1'], 'objectClass': ['organizationalRole']}),
('cn=Foo2,'+base,
{'cn': ['Foo2'], 'objectClass': ['organizationalRole']}),
('cn=Foo3,'+base,
{'cn': ['Foo3'], 'objectClass': ['organizationalRole']}),
('cn=Foo4,ou=Container,'+base,
{'cn': ['Foo4'], 'objectClass': ['organizationalRole']}),
]
)
def test_search_onelevel(self):
base = self.server.get_dn_suffix()
l = self.ldap
result = l.search_s(base, ldap.SCOPE_ONELEVEL, '(cn=Foo*)', ['*'])
result.sort()
self.assertEquals(result,
[('cn=Foo1,'+base,
{'cn': ['Foo1'], 'objectClass': ['organizationalRole']}),
('cn=Foo2,'+base,
{'cn': ['Foo2'], 'objectClass': ['organizationalRole']}),
('cn=Foo3,'+base,
{'cn': ['Foo3'], 'objectClass': ['organizationalRole']}),
]
)
def test_search_oneattr(self):
base = self.server.get_dn_suffix()
l = self.ldap
result = l.search_s(base, ldap.SCOPE_SUBTREE, '(cn=Foo4)', ['cn'])
result.sort()
self.assertEquals(result,
[('cn=Foo4,ou=Container,'+base, {'cn': ['Foo4']})]
)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
direvus/ansible | lib/ansible/modules/cloud/amazon/ec2_asg_facts.py | 69 | 14012 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_asg_facts
short_description: Gather facts about ec2 Auto Scaling Groups (ASGs) in AWS
description:
- Gather facts about ec2 Auto Scaling Groups (ASGs) in AWS
version_added: "2.2"
requirements: [ boto3 ]
author: "Rob White (@wimnat)"
options:
name:
description:
- The prefix or name of the auto scaling group(s) you are searching for.
- "Note: This is a regular expression match with implicit '^' (beginning of string). Append '$' for a complete name match."
required: false
tags:
description:
- >
A dictionary/hash of tags in the format { tag1_name: 'tag1_value', tag2_name: 'tag2_value' } to match against the auto scaling
group(s) you are searching for.
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Find all groups
- ec2_asg_facts:
register: asgs
# Find a group with matching name/prefix
- ec2_asg_facts:
name: public-webserver-asg
register: asgs
# Find a group with matching tags
- ec2_asg_facts:
tags:
project: webapp
env: production
register: asgs
# Find a group with matching name/prefix and tags
- ec2_asg_facts:
name: myproject
tags:
env: production
register: asgs
# Fail if no groups are found
- ec2_asg_facts:
name: public-webserver-asg
register: asgs
failed_when: "{{ asgs.results | length == 0 }}"
# Fail if more than 1 group is found
- ec2_asg_facts:
name: public-webserver-asg
register: asgs
failed_when: "{{ asgs.results | length > 1 }}"
'''
RETURN = '''
---
auto_scaling_group_arn:
description: The Amazon Resource Name of the ASG
returned: success
type: string
sample: "arn:aws:autoscaling:us-west-2:1234567890:autoScalingGroup:10787c52-0bcb-427d-82ba-c8e4b008ed2e:autoScalingGroupName/public-webapp-production-1"
auto_scaling_group_name:
description: Name of autoscaling group
returned: success
type: str
sample: "public-webapp-production-1"
availability_zones:
description: List of Availability Zones that are enabled for this ASG.
returned: success
type: list
sample: ["us-west-2a", "us-west-2b", "us-west-2a"]
created_time:
description: The date and time this ASG was created, in ISO 8601 format.
returned: success
type: string
sample: "2015-11-25T00:05:36.309Z"
default_cooldown:
description: The default cooldown time in seconds.
returned: success
type: int
sample: 300
desired_capacity:
description: The number of EC2 instances that should be running in this group.
returned: success
type: int
sample: 3
health_check_period:
description: Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
returned: success
type: int
sample: 30
health_check_type:
description: The service you want the health status from, one of "EC2" or "ELB".
returned: success
type: str
sample: "ELB"
instances:
description: List of EC2 instances and their status as it relates to the ASG.
returned: success
type: list
sample: [
{
"availability_zone": "us-west-2a",
"health_status": "Healthy",
"instance_id": "i-es22ad25",
"launch_configuration_name": "public-webapp-production-1",
"lifecycle_state": "InService",
"protected_from_scale_in": "false"
}
]
launch_config_name:
description: >
Name of launch configuration associated with the ASG. Same as launch_configuration_name,
provided for compatibility with ec2_asg module.
returned: success
type: str
sample: "public-webapp-production-1"
launch_configuration_name:
description: Name of launch configuration associated with the ASG.
returned: success
type: str
sample: "public-webapp-production-1"
load_balancer_names:
description: List of load balancers names attached to the ASG.
returned: success
type: list
sample: ["elb-webapp-prod"]
max_size:
description: Maximum size of group
returned: success
type: int
sample: 3
min_size:
description: Minimum size of group
returned: success
type: int
sample: 1
new_instances_protected_from_scale_in:
description: Whether or not new instances a protected from automatic scaling in.
returned: success
type: boolean
sample: "false"
placement_group:
description: Placement group into which instances are launched, if any.
returned: success
type: str
sample: None
status:
description: The current state of the group when DeleteAutoScalingGroup is in progress.
returned: success
type: str
sample: None
tags:
description: List of tags for the ASG, and whether or not each tag propagates to instances at launch.
returned: success
type: list
sample: [
{
"key": "Name",
"value": "public-webapp-production-1",
"resource_id": "public-webapp-production-1",
"resource_type": "auto-scaling-group",
"propagate_at_launch": "true"
},
{
"key": "env",
"value": "production",
"resource_id": "public-webapp-production-1",
"resource_type": "auto-scaling-group",
"propagate_at_launch": "true"
}
]
target_group_arns:
description: List of ARNs of the target groups that the ASG populates
returned: success
type: list
sample: [
"arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-host-hello/1a2b3c4d5e6f1a2b",
"arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-path-world/abcd1234abcd1234"
]
target_group_names:
description: List of names of the target groups that the ASG populates
returned: success
type: list
sample: [
"target-group-host-hello",
"target-group-path-world"
]
termination_policies:
description: A list of termination policies for the group.
returned: success
type: str
sample: ["Default"]
'''
import re
try:
from botocore.exceptions import ClientError
except ImportError:
pass # caught by imported HAS_BOTO3
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (get_aws_connection_info, boto3_conn, ec2_argument_spec,
camel_dict_to_snake_dict, HAS_BOTO3)
def match_asg_tags(tags_to_match, asg):
for key, value in tags_to_match.items():
for tag in asg['Tags']:
if key == tag['Key'] and value == tag['Value']:
break
else:
return False
return True
def find_asgs(conn, module, name=None, tags=None):
"""
Args:
conn (boto3.AutoScaling.Client): Valid Boto3 ASG client.
name (str): Optional name of the ASG you are looking for.
tags (dict): Optional dictionary of tags and values to search for.
Basic Usage:
>>> name = 'public-webapp-production'
>>> tags = { 'env': 'production' }
>>> conn = boto3.client('autoscaling', region_name='us-west-2')
>>> results = find_asgs(name, conn)
Returns:
List
[
{
"auto_scaling_group_arn": (
"arn:aws:autoscaling:us-west-2:275977225706:autoScalingGroup:58abc686-9783-4528-b338-3ad6f1cbbbaf:"
"autoScalingGroupName/public-webapp-production"
),
"auto_scaling_group_name": "public-webapp-production",
"availability_zones": ["us-west-2c", "us-west-2b", "us-west-2a"],
"created_time": "2016-02-02T23:28:42.481000+00:00",
"default_cooldown": 300,
"desired_capacity": 2,
"enabled_metrics": [],
"health_check_grace_period": 300,
"health_check_type": "ELB",
"instances":
[
{
"availability_zone": "us-west-2c",
"health_status": "Healthy",
"instance_id": "i-047a12cb",
"launch_configuration_name": "public-webapp-production-1",
"lifecycle_state": "InService",
"protected_from_scale_in": false
},
{
"availability_zone": "us-west-2a",
"health_status": "Healthy",
"instance_id": "i-7a29df2c",
"launch_configuration_name": "public-webapp-production-1",
"lifecycle_state": "InService",
"protected_from_scale_in": false
}
],
"launch_config_name": "public-webapp-production-1",
"launch_configuration_name": "public-webapp-production-1",
"load_balancer_names": ["public-webapp-production-lb"],
"max_size": 4,
"min_size": 2,
"new_instances_protected_from_scale_in": false,
"placement_group": None,
"status": None,
"suspended_processes": [],
"tags":
[
{
"key": "Name",
"propagate_at_launch": true,
"resource_id": "public-webapp-production",
"resource_type": "auto-scaling-group",
"value": "public-webapp-production"
},
{
"key": "env",
"propagate_at_launch": true,
"resource_id": "public-webapp-production",
"resource_type": "auto-scaling-group",
"value": "production"
}
],
"target_group_names": [],
"target_group_arns": [],
"termination_policies":
[
"Default"
],
"vpc_zone_identifier":
[
"subnet-a1b1c1d1",
"subnet-a2b2c2d2",
"subnet-a3b3c3d3"
]
}
]
"""
try:
asgs_paginator = conn.get_paginator('describe_auto_scaling_groups')
asgs = asgs_paginator.paginate().build_full_result()
except ClientError as e:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
if not asgs:
return asgs
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
elbv2 = boto3_conn(module, conn_type='client', resource='elbv2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except ClientError as e:
# This is nice to have, not essential
elbv2 = None
matched_asgs = []
if name is not None:
# if the user didn't specify a name
name_prog = re.compile(r'^' + name)
for asg in asgs['AutoScalingGroups']:
if name:
matched_name = name_prog.search(asg['AutoScalingGroupName'])
else:
matched_name = True
if tags:
matched_tags = match_asg_tags(tags, asg)
else:
matched_tags = True
if matched_name and matched_tags:
asg = camel_dict_to_snake_dict(asg)
# compatibility with ec2_asg module
asg['launch_config_name'] = asg['launch_configuration_name']
# workaround for https://github.com/ansible/ansible/pull/25015
if 'target_group_ar_ns' in asg:
asg['target_group_arns'] = asg['target_group_ar_ns']
del(asg['target_group_ar_ns'])
if asg.get('target_group_arns'):
if elbv2:
try:
tg_paginator = elbv2.get_paginator('describe_target_groups')
tg_result = tg_paginator.paginate(TargetGroupArns=asg['target_group_arns']).build_full_result()
asg['target_group_names'] = [tg['TargetGroupName'] for tg in tg_result['TargetGroups']]
except ClientError as e:
if e.response['Error']['Code'] == 'TargetGroupNotFound':
asg['target_group_names'] = []
else:
asg['target_group_names'] = []
matched_asgs.append(asg)
return matched_asgs
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(type='str'),
tags=dict(type='dict'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
asg_name = module.params.get('name')
asg_tags = module.params.get('tags')
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
autoscaling = boto3_conn(module, conn_type='client', resource='autoscaling', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except ClientError as e:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
results = find_asgs(autoscaling, module, name=asg_name, tags=asg_tags)
module.exit_json(results=results)
if __name__ == '__main__':
main()
| gpl-3.0 |
hanvo/MusicCloud | Crawler/Install Files/pygame/test/run_tests__tests/failures1/fake_3_test.py | 270 | 1136 | if __name__ == '__main__':
import sys
import os
pkg_dir = (os.path.split(
os.path.split(
os.path.split(
os.path.abspath(__file__))[0])[0])[0])
parent_dir, pkg_name = os.path.split(pkg_dir)
is_pygame_pkg = (pkg_name == 'tests' and
os.path.split(parent_dir)[1] == 'pygame')
if not is_pygame_pkg:
sys.path.insert(0, parent_dir)
else:
is_pygame_pkg = __name__.startswith('pygame.tests.')
if is_pygame_pkg:
from pygame.tests import test_utils
from pygame.tests.test_utils import unittest
else:
from test import test_utils
from test.test_utils import unittest
class KeyModuleTest(unittest.TestCase):
def test_get_focused(self):
self.assert_(True)
def test_get_mods(self):
self.assert_(True)
def test_get_pressed(self):
self.assert_(True)
def test_name(self):
self.assert_(True)
def test_set_mods(self):
self.assert_(True)
def test_set_repeat(self):
self.assert_(True)
if __name__ == '__main__':
unittest.main() | bsd-3-clause |
40223139/39g7test | static/Brython3.1.3-20150514-095342/Lib/_collections.py | 603 | 19111 | # "High performance data structures
# "
# copied from pypy repo
#
# Copied and completed from the sandbox of CPython
# (nondist/sandbox/collections/pydeque.py rev 1.1, Raymond Hettinger)
#
# edited for Brython line 558 : catch ImportError instead of AttributeError
import operator
#try:
# from thread import get_ident as _thread_ident
#except ImportError:
def _thread_ident():
return -1
n = 30
LFTLNK = n
RGTLNK = n+1
BLOCKSIZ = n+2
# The deque's size limit is d.maxlen. The limit can be zero or positive, or
# None. After an item is added to a deque, we check to see if the size has
# grown past the limit. If it has, we get the size back down to the limit by
# popping an item off of the opposite end. The methods that can trigger this
# are append(), appendleft(), extend(), and extendleft().
#class deque(object):
class deque:
def __new__(cls, iterable=(), *args, **kw):
#fixme
#self = super(deque, cls).__new__(cls, *args, **kw)
self=object.__new__(cls, *args, **kw)
self.clear()
return self
def __init__(self, iterable=(), maxlen=None):
object.__init__(self)
self.clear()
if maxlen is not None:
if maxlen < 0:
raise ValueError("maxlen must be non-negative")
self._maxlen = maxlen
add = self.append
for elem in iterable:
add(elem)
@property
def maxlen(self):
return self._maxlen
def clear(self):
self.right = self.left = [None] * BLOCKSIZ
self.rightndx = n//2 # points to last written element
self.leftndx = n//2+1
self.length = 0
self.state = 0
def append(self, x):
self.state += 1
self.rightndx += 1
if self.rightndx == n:
newblock = [None] * BLOCKSIZ
self.right[RGTLNK] = newblock
newblock[LFTLNK] = self.right
self.right = newblock
self.rightndx = 0
self.length += 1
self.right[self.rightndx] = x
if self.maxlen is not None and self.length > self.maxlen:
self.popleft()
def appendleft(self, x):
self.state += 1
self.leftndx -= 1
if self.leftndx == -1:
newblock = [None] * BLOCKSIZ
self.left[LFTLNK] = newblock
newblock[RGTLNK] = self.left
self.left = newblock
self.leftndx = n-1
self.length += 1
self.left[self.leftndx] = x
if self.maxlen is not None and self.length > self.maxlen:
self.pop()
def extend(self, iterable):
if iterable is self:
iterable = list(iterable)
for elem in iterable:
self.append(elem)
def extendleft(self, iterable):
if iterable is self:
iterable = list(iterable)
for elem in iterable:
self.appendleft(elem)
def pop(self):
if self.left is self.right and self.leftndx > self.rightndx:
#raise IndexError, "pop from an empty deque" # does not work in brython
raise IndexError("pop from an empty deque")
x = self.right[self.rightndx]
self.right[self.rightndx] = None
self.length -= 1
self.rightndx -= 1
self.state += 1
if self.rightndx == -1:
prevblock = self.right[LFTLNK]
if prevblock is None:
# the deque has become empty; recenter instead of freeing block
self.rightndx = n//2
self.leftndx = n//2+1
else:
prevblock[RGTLNK] = None
self.right[LFTLNK] = None
self.right = prevblock
self.rightndx = n-1
return x
def popleft(self):
if self.left is self.right and self.leftndx > self.rightndx:
#raise IndexError, "pop from an empty deque"
raise IndexError("pop from an empty deque")
x = self.left[self.leftndx]
self.left[self.leftndx] = None
self.length -= 1
self.leftndx += 1
self.state += 1
if self.leftndx == n:
prevblock = self.left[RGTLNK]
if prevblock is None:
# the deque has become empty; recenter instead of freeing block
self.rightndx = n//2
self.leftndx = n//2+1
else:
prevblock[LFTLNK] = None
self.left[RGTLNK] = None
self.left = prevblock
self.leftndx = 0
return x
def count(self, value):
c = 0
for item in self:
if item == value:
c += 1
return c
def remove(self, value):
# Need to be defensive for mutating comparisons
for i in range(len(self)):
if self[i] == value:
del self[i]
return
raise ValueError("deque.remove(x): x not in deque")
def rotate(self, n=1):
length = len(self)
if length == 0:
return
halflen = (length+1) >> 1
if n > halflen or n < -halflen:
n %= length
if n > halflen:
n -= length
elif n < -halflen:
n += length
while n > 0:
self.appendleft(self.pop())
n -= 1
while n < 0:
self.append(self.popleft())
n += 1
def reverse(self):
"reverse *IN PLACE*"
leftblock = self.left
rightblock = self.right
leftindex = self.leftndx
rightindex = self.rightndx
for i in range(self.length // 2):
# Validate that pointers haven't met in the middle
assert leftblock != rightblock or leftindex < rightindex
# Swap
(rightblock[rightindex], leftblock[leftindex]) = (
leftblock[leftindex], rightblock[rightindex])
# Advance left block/index pair
leftindex += 1
if leftindex == n:
leftblock = leftblock[RGTLNK]
assert leftblock is not None
leftindex = 0
# Step backwards with the right block/index pair
rightindex -= 1
if rightindex == -1:
rightblock = rightblock[LFTLNK]
assert rightblock is not None
rightindex = n - 1
def __repr__(self):
threadlocalattr = '__repr' + str(_thread_ident())
if threadlocalattr in self.__dict__:
return 'deque([...])'
else:
self.__dict__[threadlocalattr] = True
try:
if self.maxlen is not None:
return 'deque(%r, maxlen=%s)' % (list(self), self.maxlen)
else:
return 'deque(%r)' % (list(self),)
finally:
del self.__dict__[threadlocalattr]
def __iter__(self):
return deque_iterator(self, self._iter_impl)
def _iter_impl(self, original_state, giveup):
if self.state != original_state:
giveup()
block = self.left
while block:
l, r = 0, n
if block is self.left:
l = self.leftndx
if block is self.right:
r = self.rightndx + 1
for elem in block[l:r]:
yield elem
if self.state != original_state:
giveup()
block = block[RGTLNK]
def __reversed__(self):
return deque_iterator(self, self._reversed_impl)
def _reversed_impl(self, original_state, giveup):
if self.state != original_state:
giveup()
block = self.right
while block:
l, r = 0, n
if block is self.left:
l = self.leftndx
if block is self.right:
r = self.rightndx + 1
for elem in reversed(block[l:r]):
yield elem
if self.state != original_state:
giveup()
block = block[LFTLNK]
def __len__(self):
#sum = 0
#block = self.left
#while block:
# sum += n
# block = block[RGTLNK]
#return sum + self.rightndx - self.leftndx + 1 - n
return self.length
def __getref(self, index):
if index >= 0:
block = self.left
while block:
l, r = 0, n
if block is self.left:
l = self.leftndx
if block is self.right:
r = self.rightndx + 1
span = r-l
if index < span:
return block, l+index
index -= span
block = block[RGTLNK]
else:
block = self.right
while block:
l, r = 0, n
if block is self.left:
l = self.leftndx
if block is self.right:
r = self.rightndx + 1
negative_span = l-r
if index >= negative_span:
return block, r+index
index -= negative_span
block = block[LFTLNK]
raise IndexError("deque index out of range")
def __getitem__(self, index):
block, index = self.__getref(index)
return block[index]
def __setitem__(self, index, value):
block, index = self.__getref(index)
block[index] = value
def __delitem__(self, index):
length = len(self)
if index >= 0:
if index >= length:
raise IndexError("deque index out of range")
self.rotate(-index)
self.popleft()
self.rotate(index)
else:
#index = ~index #todo until bit wise operators are in bython
index= index^(2**31)
if index >= length:
raise IndexError("deque index out of range")
self.rotate(index)
self.pop()
self.rotate(-index)
def __reduce_ex__(self, proto):
return type(self), (list(self), self.maxlen)
def __hash__(self):
#raise TypeError, "deque objects are unhashable"
raise TypeError("deque objects are unhashable")
def __copy__(self):
return self.__class__(self, self.maxlen)
# XXX make comparison more efficient
def __eq__(self, other):
if isinstance(other, deque):
return list(self) == list(other)
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, deque):
return list(self) != list(other)
else:
return NotImplemented
def __lt__(self, other):
if isinstance(other, deque):
return list(self) < list(other)
else:
return NotImplemented
def __le__(self, other):
if isinstance(other, deque):
return list(self) <= list(other)
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, deque):
return list(self) > list(other)
else:
return NotImplemented
def __ge__(self, other):
if isinstance(other, deque):
return list(self) >= list(other)
else:
return NotImplemented
def __iadd__(self, other):
self.extend(other)
return self
class deque_iterator(object):
def __init__(self, deq, itergen):
self.counter = len(deq)
def giveup():
self.counter = 0
#raise RuntimeError, "deque mutated during iteration"
raise RuntimeError("deque mutated during iteration")
self._gen = itergen(deq.state, giveup)
def next(self):
res = self._gen.next()
self.counter -= 1
return res
def __iter__(self):
return self
class defaultdict(dict):
def __init__(self, *args, **kwds):
if len(args) > 0:
default_factory = args[0]
args = args[1:]
if not callable(default_factory) and default_factory is not None:
raise TypeError("first argument must be callable")
else:
default_factory = None
dict.__init__(self, args, kwds)
self.default_factory = default_factory
self.update(args, kwds)
#super(defaultdict, self).__init__(*args, **kwds)
#fixme.. had to add this function to get defaultdict working with brython correctly
def __getitem__(self, key):
if self.__contains__(key):
return dict.__getitem__(self,key)
return self.__missing__(key)
def __missing__(self, key):
# from defaultdict docs
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __repr__(self, recurse=set()):
if id(self) in recurse:
return "defaultdict(...)"
try:
recurse.add(id(self))
return "defaultdict(%s, %s)" % (repr(self.default_factory), super(defaultdict, self).__repr__())
finally:
recurse.remove(id(self))
def copy(self):
return type(self)(self.default_factory, self)
def __copy__(self):
return self.copy()
def __reduce__(self):
#
#__reduce__ must return a 5-tuple as follows:
#
# - factory function
# - tuple of args for the factory function
# - additional state (here None)
# - sequence iterator (here None)
# - dictionary iterator (yielding successive (key, value) pairs
# This API is used by pickle.py and copy.py.
#
return (type(self), (self.default_factory,), None, None, self.iteritems())
from operator import itemgetter as _itemgetter
from keyword import iskeyword as _iskeyword
import sys as _sys
def namedtuple(typename, field_names, verbose=False, rename=False):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', 'x y')
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template injection attacks.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
field_names = tuple(map(str, field_names))
if rename:
names = list(field_names)
seen = set()
for i, name in enumerate(names):
if (not min(c.isalnum() or c=='_' for c in name) or _iskeyword(name)
or not name or name[0].isdigit() or name.startswith('_')
or name in seen):
names[i] = '_%d' % i
seen.add(name)
field_names = tuple(names)
for name in (typename,) + field_names:
if not min(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
seen_names = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: %r' % name)
if name in seen_names:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_names.add(name)
# Create and fill-in the class template
numfields = len(field_names)
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
template = '''class %(typename)s(tuple):
'%(typename)s(%(argtxt)s)' \n
__slots__ = () \n
_fields = %(field_names)r \n
def __new__(_cls, %(argtxt)s):
return tuple.__new__(_cls, (%(argtxt)s)) \n
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new %(typename)s object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != %(numfields)d:
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
return result \n
def __repr__(self):
return '%(typename)s(%(reprtxt)s)' %% self \n
def _asdict(self):
'Return a new dict which maps field names to their values'
return dict(zip(self._fields, self)) \n
def _replace(_self, **kwds):
'Return a new %(typename)s object replacing specified fields with new values'
result = _self._make(map(kwds.pop, %(field_names)r, _self))
if kwds:
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
return result \n
def __getnewargs__(self):
return tuple(self) \n\n''' % locals()
for i, name in enumerate(field_names):
template += ' %s = _property(_itemgetter(%d))\n' % (name, i)
if verbose:
print(template)
# Execute the template string in a temporary namespace
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
_property=property, _tuple=tuple)
try:
exec(template,namespace)
except SyntaxError as e:
raise SyntaxError(e.message + ':\n' + template)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result
if __name__ == '__main__':
Point = namedtuple('Point', ['x', 'y'])
p = Point(11, y=22)
print(p[0]+p[1])
x,y=p
print(x,y)
print(p.x+p.y)
print(p)
| gpl-3.0 |
pragle/craft | web/model/db_model.py | 1 | 1193 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Michal Szczepanski'
from sqlalchemy.sql.schema import Column, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Integer, String, Boolean, Binary
Base = declarative_base()
class DatabaseConnection(Base):
__tablename__ = 'database_connection'
connection_name = Column(String, unique=True, primary_key=True)
name = Column(String)
host = Column(String)
port = Column(Integer)
username = Column(String)
password = Column(String)
database = Column(String)
'''
class SSHConnection(Base):
__tablename__ = 'ssh_connection'
id = Column(Integer, primary_key=True)
name = Column(String)
host = Column(String)
port = Column(String)
auth_id = Column(Integer, ForeignKey('ssh_connection_auth.id'))
class SSHConnectionAuth(Base):
__tablename__ = 'ssh_connection_auth'
id = Column(Integer, primary_key=True)
key = Column(Boolean, default=False)
key_data = Column(Binary)
username = Column(String)
password = Column(String)
connections = relationship('SSHConnection')
'''
| bsd-3-clause |
lewiskan/heron | heron/tools/tracker/src/python/handlers/clustershandler.py | 10 | 1130 | # Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' clustershandler.py '''
import tornado.gen
from heron.tools.tracker.src.python.handlers import BaseHandler
# pylint: disable=attribute-defined-outside-init
class ClustersHandler(BaseHandler):
"""
URL - /clusters
The response JSON is a list of clusters
"""
def initialize(self, tracker):
""" initialize """
self.tracker = tracker
@tornado.gen.coroutine
def get(self):
""" get method """
clusters = [statemgr.name for statemgr in self.tracker.state_managers]
self.write_success_response(clusters)
| apache-2.0 |
PeterWangIntel/chromium-crosswalk | third_party/closure_linter/closure_linter/requireprovidesorter.py | 84 | 11383 | #!/usr/bin/env python
#
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains logic for sorting goog.provide and goog.require statements.
Closurized JavaScript files use goog.provide and goog.require statements at the
top of the file to manage dependencies. These statements should be sorted
alphabetically, however, it is common for them to be accompanied by inline
comments or suppression annotations. In order to sort these statements without
disrupting their comments and annotations, the association between statements
and comments/annotations must be maintained while sorting.
RequireProvideSorter: Handles checking/fixing of provide/require statements.
"""
from closure_linter import javascripttokens
from closure_linter import tokenutil
# Shorthand
Type = javascripttokens.JavaScriptTokenType
class RequireProvideSorter(object):
"""Checks for and fixes alphabetization of provide and require statements.
When alphabetizing, comments on the same line or comments directly above a
goog.provide or goog.require statement are associated with that statement and
stay with the statement as it gets sorted.
"""
def CheckProvides(self, token):
"""Checks alphabetization of goog.provide statements.
Iterates over tokens in given token stream, identifies goog.provide tokens,
and checks that they occur in alphabetical order by the object being
provided.
Args:
token: A token in the token stream before any goog.provide tokens.
Returns:
The first provide token in the token stream.
None is returned if all goog.provide statements are already sorted.
"""
provide_tokens = self._GetRequireOrProvideTokens(token, 'goog.provide')
provide_strings = self._GetRequireOrProvideTokenStrings(provide_tokens)
sorted_provide_strings = sorted(provide_strings)
if provide_strings != sorted_provide_strings:
return provide_tokens[0]
return None
def CheckRequires(self, token):
"""Checks alphabetization of goog.require statements.
Iterates over tokens in given token stream, identifies goog.require tokens,
and checks that they occur in alphabetical order by the dependency being
required.
Args:
token: A token in the token stream before any goog.require tokens.
Returns:
The first require token in the token stream.
None is returned if all goog.require statements are already sorted.
"""
require_tokens = self._GetRequireOrProvideTokens(token, 'goog.require')
require_strings = self._GetRequireOrProvideTokenStrings(require_tokens)
sorted_require_strings = sorted(require_strings)
if require_strings != sorted_require_strings:
return require_tokens[0]
return None
def FixProvides(self, token):
"""Sorts goog.provide statements in the given token stream alphabetically.
Args:
token: The first token in the token stream.
"""
self._FixProvidesOrRequires(
self._GetRequireOrProvideTokens(token, 'goog.provide'))
def FixRequires(self, token):
"""Sorts goog.require statements in the given token stream alphabetically.
Args:
token: The first token in the token stream.
"""
self._FixProvidesOrRequires(
self._GetRequireOrProvideTokens(token, 'goog.require'))
def _FixProvidesOrRequires(self, tokens):
"""Sorts goog.provide or goog.require statements.
Args:
tokens: A list of goog.provide or goog.require tokens in the order they
appear in the token stream. i.e. the first token in this list must
be the first goog.provide or goog.require token.
"""
strings = self._GetRequireOrProvideTokenStrings(tokens)
sorted_strings = sorted(strings)
# Make a separate pass to remove any blank lines between goog.require/
# goog.provide tokens.
first_token = tokens[0]
last_token = tokens[-1]
i = last_token
while i != first_token and i is not None:
if i.type is Type.BLANK_LINE:
tokenutil.DeleteToken(i)
i = i.previous
# A map from required/provided object name to tokens that make up the line
# it was on, including any comments immediately before it or after it on the
# same line.
tokens_map = self._GetTokensMap(tokens)
# Iterate over the map removing all tokens.
for name in tokens_map:
tokens_to_delete = tokens_map[name]
for i in tokens_to_delete:
tokenutil.DeleteToken(i)
# Save token to rest of file. Sorted token will be inserted before this.
rest_of_file = tokens_map[strings[-1]][-1].next
# Re-add all tokens in the map in alphabetical order.
insert_after = tokens[0].previous
for string in sorted_strings:
for i in tokens_map[string]:
if rest_of_file:
tokenutil.InsertTokenBefore(i, rest_of_file)
else:
tokenutil.InsertTokenAfter(i, insert_after)
insert_after = i
def _GetRequireOrProvideTokens(self, token, token_string):
"""Gets all goog.provide or goog.require tokens in the given token stream.
Args:
token: The first token in the token stream.
token_string: One of 'goog.provide' or 'goog.require' to indicate which
tokens to find.
Returns:
A list of goog.provide or goog.require tokens in the order they appear in
the token stream.
"""
tokens = []
while token:
if token.type == Type.IDENTIFIER:
if token.string == token_string:
tokens.append(token)
elif token.string not in [
'goog.provide', 'goog.require', 'goog.setTestOnly']:
# These 3 identifiers are at the top of the file. So if any other
# identifier is encountered, return.
break
token = token.next
return tokens
def _GetRequireOrProvideTokenStrings(self, tokens):
"""Gets a list of strings corresponding to the given list of tokens.
The string will be the next string in the token stream after each token in
tokens. This is used to find the object being provided/required by a given
goog.provide or goog.require token.
Args:
tokens: A list of goog.provide or goog.require tokens.
Returns:
A list of object names that are being provided or required by the given
list of tokens. For example:
['object.a', 'object.c', 'object.b']
"""
token_strings = []
for token in tokens:
if not token.is_deleted:
name = tokenutil.GetStringAfterToken(token)
token_strings.append(name)
return token_strings
def _GetTokensMap(self, tokens):
"""Gets a map from object name to tokens associated with that object.
Starting from the goog.provide/goog.require token, searches backwards in the
token stream for any lines that start with a comment. These lines are
associated with the goog.provide/goog.require token. Also associates any
tokens on the same line as the goog.provide/goog.require token with that
token.
Args:
tokens: A list of goog.provide or goog.require tokens.
Returns:
A dictionary that maps object names to the tokens associated with the
goog.provide or goog.require of that object name. For example:
{
'object.a': [JavaScriptToken, JavaScriptToken, ...],
'object.b': [...]
}
The list of tokens includes any comment lines above the goog.provide or
goog.require statement and everything after the statement on the same
line. For example, all of the following would be associated with
'object.a':
/** @suppress {extraRequire} */
goog.require('object.a'); // Some comment.
"""
tokens_map = {}
for token in tokens:
object_name = tokenutil.GetStringAfterToken(token)
# If the previous line starts with a comment, presume that the comment
# relates to the goog.require or goog.provide and keep them together when
# sorting.
first_token = token
previous_first_token = tokenutil.GetFirstTokenInPreviousLine(first_token)
while (previous_first_token and
previous_first_token.IsAnyType(Type.COMMENT_TYPES)):
first_token = previous_first_token
previous_first_token = tokenutil.GetFirstTokenInPreviousLine(
first_token)
# Find the last token on the line.
last_token = tokenutil.GetLastTokenInSameLine(token)
all_tokens = self._GetTokenList(first_token, last_token)
tokens_map[object_name] = all_tokens
return tokens_map
def _GetTokenList(self, first_token, last_token):
"""Gets a list of all tokens from first_token to last_token, inclusive.
Args:
first_token: The first token to get.
last_token: The last token to get.
Returns:
A list of all tokens between first_token and last_token, including both
first_token and last_token.
Raises:
Exception: If the token stream ends before last_token is reached.
"""
token_list = []
token = first_token
while token != last_token:
if not token:
raise Exception('ran out of tokens')
token_list.append(token)
token = token.next
token_list.append(last_token)
return token_list
def GetFixedRequireString(self, token):
"""Get fixed/sorted order of goog.require statements.
Args:
token: The first token in the token stream.
Returns:
A string for correct sorted order of goog.require.
"""
return self._GetFixedRequireOrProvideString(
self._GetRequireOrProvideTokens(token, 'goog.require'))
def GetFixedProvideString(self, token):
"""Get fixed/sorted order of goog.provide statements.
Args:
token: The first token in the token stream.
Returns:
A string for correct sorted order of goog.provide.
"""
return self._GetFixedRequireOrProvideString(
self._GetRequireOrProvideTokens(token, 'goog.provide'))
def _GetFixedRequireOrProvideString(self, tokens):
"""Sorts goog.provide or goog.require statements.
Args:
tokens: A list of goog.provide or goog.require tokens in the order they
appear in the token stream. i.e. the first token in this list must
be the first goog.provide or goog.require token.
Returns:
A string for sorted goog.require or goog.provide statements
"""
# A map from required/provided object name to tokens that make up the line
# it was on, including any comments immediately before it or after it on the
# same line.
tokens_map = self._GetTokensMap(tokens)
sorted_strings = sorted(tokens_map.keys())
new_order = ''
for string in sorted_strings:
for i in tokens_map[string]:
new_order += i.string
if i.IsLastInLine():
new_order += '\n'
return new_order
| bsd-3-clause |
ecolitan/fatics | venv/lib/python2.7/site-packages/twisted/mail/test/test_pop3.py | 38 | 30556 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for Ltwisted.mail.pop3} module.
"""
import StringIO
import hmac
import base64
import itertools
from zope.interface import implements
from twisted.internet import defer
from twisted.trial import unittest, util
from twisted import mail
import twisted.mail.protocols
import twisted.mail.pop3
import twisted.internet.protocol
from twisted import internet
from twisted.mail import pop3
from twisted.protocols import loopback
from twisted.python import failure
from twisted.python.util import OrderedDict
from twisted import cred
import twisted.cred.portal
import twisted.cred.checkers
import twisted.cred.credentials
from twisted.test.proto_helpers import LineSendingProtocol
class UtilityTestCase(unittest.TestCase):
"""
Test the various helper functions and classes used by the POP3 server
protocol implementation.
"""
def testLineBuffering(self):
"""
Test creating a LineBuffer and feeding it some lines. The lines should
build up in its internal buffer for a while and then get spat out to
the writer.
"""
output = []
input = iter(itertools.cycle(['012', '345', '6', '7', '8', '9']))
c = pop3._IteratorBuffer(output.extend, input, 6)
i = iter(c)
self.assertEqual(output, []) # nothing is buffer
i.next()
self.assertEqual(output, []) # '012' is buffered
i.next()
self.assertEqual(output, []) # '012345' is buffered
i.next()
self.assertEqual(output, ['012', '345', '6']) # nothing is buffered
for n in range(5):
i.next()
self.assertEqual(output, ['012', '345', '6', '7', '8', '9', '012', '345'])
def testFinishLineBuffering(self):
"""
Test that a LineBuffer flushes everything when its iterator is
exhausted, and itself raises StopIteration.
"""
output = []
input = iter(['a', 'b', 'c'])
c = pop3._IteratorBuffer(output.extend, input, 5)
for i in c:
pass
self.assertEqual(output, ['a', 'b', 'c'])
def testSuccessResponseFormatter(self):
"""
Test that the thing that spits out POP3 'success responses' works
right.
"""
self.assertEqual(
pop3.successResponse('Great.'),
'+OK Great.\r\n')
def testStatLineFormatter(self):
"""
Test that the function which formats stat lines does so appropriately.
"""
statLine = list(pop3.formatStatResponse([]))[-1]
self.assertEqual(statLine, '+OK 0 0\r\n')
statLine = list(pop3.formatStatResponse([10, 31, 0, 10101]))[-1]
self.assertEqual(statLine, '+OK 4 10142\r\n')
def testListLineFormatter(self):
"""
Test that the function which formats the lines in response to a LIST
command does so appropriately.
"""
listLines = list(pop3.formatListResponse([]))
self.assertEqual(
listLines,
['+OK 0\r\n', '.\r\n'])
listLines = list(pop3.formatListResponse([1, 2, 3, 100]))
self.assertEqual(
listLines,
['+OK 4\r\n', '1 1\r\n', '2 2\r\n', '3 3\r\n', '4 100\r\n', '.\r\n'])
def testUIDListLineFormatter(self):
"""
Test that the function which formats lines in response to a UIDL
command does so appropriately.
"""
UIDs = ['abc', 'def', 'ghi']
listLines = list(pop3.formatUIDListResponse([], UIDs.__getitem__))
self.assertEqual(
listLines,
['+OK \r\n', '.\r\n'])
listLines = list(pop3.formatUIDListResponse([123, 431, 591], UIDs.__getitem__))
self.assertEqual(
listLines,
['+OK \r\n', '1 abc\r\n', '2 def\r\n', '3 ghi\r\n', '.\r\n'])
listLines = list(pop3.formatUIDListResponse([0, None, 591], UIDs.__getitem__))
self.assertEqual(
listLines,
['+OK \r\n', '1 abc\r\n', '3 ghi\r\n', '.\r\n'])
class MyVirtualPOP3(mail.protocols.VirtualPOP3):
magic = '<moshez>'
def authenticateUserAPOP(self, user, digest):
user, domain = self.lookupDomain(user)
return self.service.domains['baz.com'].authenticateUserAPOP(user, digest, self.magic, domain)
class DummyDomain:
def __init__(self):
self.users = {}
def addUser(self, name):
self.users[name] = []
def addMessage(self, name, message):
self.users[name].append(message)
def authenticateUserAPOP(self, name, digest, magic, domain):
return pop3.IMailbox, ListMailbox(self.users[name]), lambda: None
class ListMailbox:
def __init__(self, list):
self.list = list
def listMessages(self, i=None):
if i is None:
return map(len, self.list)
return len(self.list[i])
def getMessage(self, i):
return StringIO.StringIO(self.list[i])
def getUidl(self, i):
return i
def deleteMessage(self, i):
self.list[i] = ''
def sync(self):
pass
class MyPOP3Downloader(pop3.POP3Client):
def handle_WELCOME(self, line):
pop3.POP3Client.handle_WELCOME(self, line)
self.apop('hello@baz.com', 'world')
def handle_APOP(self, line):
parts = line.split()
code = parts[0]
data = (parts[1:] or ['NONE'])[0]
if code != '+OK':
print parts
raise AssertionError, 'code is ' + code
self.lines = []
self.retr(1)
def handle_RETR_continue(self, line):
self.lines.append(line)
def handle_RETR_end(self):
self.message = '\n'.join(self.lines) + '\n'
self.quit()
def handle_QUIT(self, line):
if line[:3] != '+OK':
raise AssertionError, 'code is ' + line
class POP3TestCase(unittest.TestCase):
message = '''\
Subject: urgent
Someone set up us the bomb!
'''
expectedOutput = '''\
+OK <moshez>\015
+OK Authentication succeeded\015
+OK \015
1 0\015
.\015
+OK %d\015
Subject: urgent\015
\015
Someone set up us the bomb!\015
.\015
+OK \015
''' % len(message)
def setUp(self):
self.factory = internet.protocol.Factory()
self.factory.domains = {}
self.factory.domains['baz.com'] = DummyDomain()
self.factory.domains['baz.com'].addUser('hello')
self.factory.domains['baz.com'].addMessage('hello', self.message)
def testMessages(self):
client = LineSendingProtocol([
'APOP hello@baz.com world',
'UIDL',
'RETR 1',
'QUIT',
])
server = MyVirtualPOP3()
server.service = self.factory
def check(ignored):
output = '\r\n'.join(client.response) + '\r\n'
self.assertEqual(output, self.expectedOutput)
return loopback.loopbackTCP(server, client).addCallback(check)
def testLoopback(self):
protocol = MyVirtualPOP3()
protocol.service = self.factory
clientProtocol = MyPOP3Downloader()
def check(ignored):
self.assertEqual(clientProtocol.message, self.message)
protocol.connectionLost(
failure.Failure(Exception("Test harness disconnect")))
d = loopback.loopbackAsync(protocol, clientProtocol)
return d.addCallback(check)
testLoopback.suppress = [util.suppress(message="twisted.mail.pop3.POP3Client is deprecated")]
class DummyPOP3(pop3.POP3):
magic = '<moshez>'
def authenticateUserAPOP(self, user, password):
return pop3.IMailbox, DummyMailbox(ValueError), lambda: None
class DummyMailbox(pop3.Mailbox):
messages = ['From: moshe\nTo: moshe\n\nHow are you, friend?\n']
def __init__(self, exceptionType):
self.messages = DummyMailbox.messages[:]
self.exceptionType = exceptionType
def listMessages(self, i=None):
if i is None:
return map(len, self.messages)
if i >= len(self.messages):
raise self.exceptionType()
return len(self.messages[i])
def getMessage(self, i):
return StringIO.StringIO(self.messages[i])
def getUidl(self, i):
if i >= len(self.messages):
raise self.exceptionType()
return str(i)
def deleteMessage(self, i):
self.messages[i] = ''
class AnotherPOP3TestCase(unittest.TestCase):
def runTest(self, lines, expectedOutput):
dummy = DummyPOP3()
client = LineSendingProtocol(lines)
d = loopback.loopbackAsync(dummy, client)
return d.addCallback(self._cbRunTest, client, dummy, expectedOutput)
def _cbRunTest(self, ignored, client, dummy, expectedOutput):
self.assertEqual('\r\n'.join(expectedOutput),
'\r\n'.join(client.response))
dummy.connectionLost(failure.Failure(Exception("Test harness disconnect")))
return ignored
def test_buffer(self):
"""
Test a lot of different POP3 commands in an extremely pipelined
scenario.
This test may cover legitimate behavior, but the intent and
granularity are not very good. It would likely be an improvement to
split it into a number of smaller, more focused tests.
"""
return self.runTest(
["APOP moshez dummy",
"LIST",
"UIDL",
"RETR 1",
"RETR 2",
"DELE 1",
"RETR 1",
"QUIT"],
['+OK <moshez>',
'+OK Authentication succeeded',
'+OK 1',
'1 44',
'.',
'+OK ',
'1 0',
'.',
'+OK 44',
'From: moshe',
'To: moshe',
'',
'How are you, friend?',
'.',
'-ERR Bad message number argument',
'+OK ',
'-ERR message deleted',
'+OK '])
def test_noop(self):
"""
Test the no-op command.
"""
return self.runTest(
['APOP spiv dummy',
'NOOP',
'QUIT'],
['+OK <moshez>',
'+OK Authentication succeeded',
'+OK ',
'+OK '])
def testAuthListing(self):
p = DummyPOP3()
p.factory = internet.protocol.Factory()
p.factory.challengers = {'Auth1': None, 'secondAuth': None, 'authLast': None}
client = LineSendingProtocol([
"AUTH",
"QUIT",
])
d = loopback.loopbackAsync(p, client)
return d.addCallback(self._cbTestAuthListing, client)
def _cbTestAuthListing(self, ignored, client):
self.failUnless(client.response[1].startswith('+OK'))
self.assertEqual(sorted(client.response[2:5]),
["AUTH1", "AUTHLAST", "SECONDAUTH"])
self.assertEqual(client.response[5], ".")
def testIllegalPASS(self):
dummy = DummyPOP3()
client = LineSendingProtocol([
"PASS fooz",
"QUIT"
])
d = loopback.loopbackAsync(dummy, client)
return d.addCallback(self._cbTestIllegalPASS, client, dummy)
def _cbTestIllegalPASS(self, ignored, client, dummy):
expected_output = '+OK <moshez>\r\n-ERR USER required before PASS\r\n+OK \r\n'
self.assertEqual(expected_output, '\r\n'.join(client.response) + '\r\n')
dummy.connectionLost(failure.Failure(Exception("Test harness disconnect")))
def testEmptyPASS(self):
dummy = DummyPOP3()
client = LineSendingProtocol([
"PASS ",
"QUIT"
])
d = loopback.loopbackAsync(dummy, client)
return d.addCallback(self._cbTestEmptyPASS, client, dummy)
def _cbTestEmptyPASS(self, ignored, client, dummy):
expected_output = '+OK <moshez>\r\n-ERR USER required before PASS\r\n+OK \r\n'
self.assertEqual(expected_output, '\r\n'.join(client.response) + '\r\n')
dummy.connectionLost(failure.Failure(Exception("Test harness disconnect")))
class TestServerFactory:
implements(pop3.IServerFactory)
def cap_IMPLEMENTATION(self):
return "Test Implementation String"
def cap_EXPIRE(self):
return 60
challengers = OrderedDict([("SCHEME_1", None), ("SCHEME_2", None)])
def cap_LOGIN_DELAY(self):
return 120
pue = True
def perUserExpiration(self):
return self.pue
puld = True
def perUserLoginDelay(self):
return self.puld
class TestMailbox:
loginDelay = 100
messageExpiration = 25
class CapabilityTestCase(unittest.TestCase):
def setUp(self):
s = StringIO.StringIO()
p = pop3.POP3()
p.factory = TestServerFactory()
p.transport = internet.protocol.FileWrapper(s)
p.connectionMade()
p.do_CAPA()
self.caps = p.listCapabilities()
self.pcaps = s.getvalue().splitlines()
s = StringIO.StringIO()
p.mbox = TestMailbox()
p.transport = internet.protocol.FileWrapper(s)
p.do_CAPA()
self.lpcaps = s.getvalue().splitlines()
p.connectionLost(failure.Failure(Exception("Test harness disconnect")))
def contained(self, s, *caps):
for c in caps:
self.assertIn(s, c)
def testUIDL(self):
self.contained("UIDL", self.caps, self.pcaps, self.lpcaps)
def testTOP(self):
self.contained("TOP", self.caps, self.pcaps, self.lpcaps)
def testUSER(self):
self.contained("USER", self.caps, self.pcaps, self.lpcaps)
def testEXPIRE(self):
self.contained("EXPIRE 60 USER", self.caps, self.pcaps)
self.contained("EXPIRE 25", self.lpcaps)
def testIMPLEMENTATION(self):
self.contained(
"IMPLEMENTATION Test Implementation String",
self.caps, self.pcaps, self.lpcaps
)
def testSASL(self):
self.contained(
"SASL SCHEME_1 SCHEME_2",
self.caps, self.pcaps, self.lpcaps
)
def testLOGIN_DELAY(self):
self.contained("LOGIN-DELAY 120 USER", self.caps, self.pcaps)
self.assertIn("LOGIN-DELAY 100", self.lpcaps)
class GlobalCapabilitiesTestCase(unittest.TestCase):
def setUp(self):
s = StringIO.StringIO()
p = pop3.POP3()
p.factory = TestServerFactory()
p.factory.pue = p.factory.puld = False
p.transport = internet.protocol.FileWrapper(s)
p.connectionMade()
p.do_CAPA()
self.caps = p.listCapabilities()
self.pcaps = s.getvalue().splitlines()
s = StringIO.StringIO()
p.mbox = TestMailbox()
p.transport = internet.protocol.FileWrapper(s)
p.do_CAPA()
self.lpcaps = s.getvalue().splitlines()
p.connectionLost(failure.Failure(Exception("Test harness disconnect")))
def contained(self, s, *caps):
for c in caps:
self.assertIn(s, c)
def testEXPIRE(self):
self.contained("EXPIRE 60", self.caps, self.pcaps, self.lpcaps)
def testLOGIN_DELAY(self):
self.contained("LOGIN-DELAY 120", self.caps, self.pcaps, self.lpcaps)
class TestRealm:
def requestAvatar(self, avatarId, mind, *interfaces):
if avatarId == 'testuser':
return pop3.IMailbox, DummyMailbox(ValueError), lambda: None
assert False
class SASLTestCase(unittest.TestCase):
def testValidLogin(self):
p = pop3.POP3()
p.factory = TestServerFactory()
p.factory.challengers = {'CRAM-MD5': cred.credentials.CramMD5Credentials}
p.portal = cred.portal.Portal(TestRealm())
ch = cred.checkers.InMemoryUsernamePasswordDatabaseDontUse()
ch.addUser('testuser', 'testpassword')
p.portal.registerChecker(ch)
s = StringIO.StringIO()
p.transport = internet.protocol.FileWrapper(s)
p.connectionMade()
p.lineReceived("CAPA")
self.failUnless(s.getvalue().find("SASL CRAM-MD5") >= 0)
p.lineReceived("AUTH CRAM-MD5")
chal = s.getvalue().splitlines()[-1][2:]
chal = base64.decodestring(chal)
response = hmac.HMAC('testpassword', chal).hexdigest()
p.lineReceived(base64.encodestring('testuser ' + response).rstrip('\n'))
self.failUnless(p.mbox)
self.failUnless(s.getvalue().splitlines()[-1].find("+OK") >= 0)
p.connectionLost(failure.Failure(Exception("Test harness disconnect")))
class CommandMixin:
"""
Tests for all the commands a POP3 server is allowed to receive.
"""
extraMessage = '''\
From: guy
To: fellow
More message text for you.
'''
def setUp(self):
"""
Make a POP3 server protocol instance hooked up to a simple mailbox and
a transport that buffers output to a StringIO.
"""
p = pop3.POP3()
p.mbox = self.mailboxType(self.exceptionType)
p.schedule = list
self.pop3Server = p
s = StringIO.StringIO()
p.transport = internet.protocol.FileWrapper(s)
p.connectionMade()
s.truncate(0)
self.pop3Transport = s
def tearDown(self):
"""
Disconnect the server protocol so it can clean up anything it might
need to clean up.
"""
self.pop3Server.connectionLost(failure.Failure(Exception("Test harness disconnect")))
def _flush(self):
"""
Do some of the things that the reactor would take care of, if the
reactor were actually running.
"""
# Oh man FileWrapper is pooh.
self.pop3Server.transport._checkProducer()
def testLIST(self):
"""
Test the two forms of list: with a message index number, which should
return a short-form response, and without a message index number, which
should return a long-form response, one line per message.
"""
p = self.pop3Server
s = self.pop3Transport
p.lineReceived("LIST 1")
self._flush()
self.assertEqual(s.getvalue(), "+OK 1 44\r\n")
s.truncate(0)
p.lineReceived("LIST")
self._flush()
self.assertEqual(s.getvalue(), "+OK 1\r\n1 44\r\n.\r\n")
def testLISTWithBadArgument(self):
"""
Test that non-integers and out-of-bound integers produce appropriate
error responses.
"""
p = self.pop3Server
s = self.pop3Transport
p.lineReceived("LIST a")
self.assertEqual(
s.getvalue(),
"-ERR Invalid message-number: 'a'\r\n")
s.truncate(0)
p.lineReceived("LIST 0")
self.assertEqual(
s.getvalue(),
"-ERR Invalid message-number: 0\r\n")
s.truncate(0)
p.lineReceived("LIST 2")
self.assertEqual(
s.getvalue(),
"-ERR Invalid message-number: 2\r\n")
s.truncate(0)
def testUIDL(self):
"""
Test the two forms of the UIDL command. These are just like the two
forms of the LIST command.
"""
p = self.pop3Server
s = self.pop3Transport
p.lineReceived("UIDL 1")
self.assertEqual(s.getvalue(), "+OK 0\r\n")
s.truncate(0)
p.lineReceived("UIDL")
self._flush()
self.assertEqual(s.getvalue(), "+OK \r\n1 0\r\n.\r\n")
def testUIDLWithBadArgument(self):
"""
Test that UIDL with a non-integer or an out-of-bounds integer produces
the appropriate error response.
"""
p = self.pop3Server
s = self.pop3Transport
p.lineReceived("UIDL a")
self.assertEqual(
s.getvalue(),
"-ERR Bad message number argument\r\n")
s.truncate(0)
p.lineReceived("UIDL 0")
self.assertEqual(
s.getvalue(),
"-ERR Bad message number argument\r\n")
s.truncate(0)
p.lineReceived("UIDL 2")
self.assertEqual(
s.getvalue(),
"-ERR Bad message number argument\r\n")
s.truncate(0)
def testSTAT(self):
"""
Test the single form of the STAT command, which returns a short-form
response of the number of messages in the mailbox and their total size.
"""
p = self.pop3Server
s = self.pop3Transport
p.lineReceived("STAT")
self._flush()
self.assertEqual(s.getvalue(), "+OK 1 44\r\n")
def testRETR(self):
"""
Test downloading a message.
"""
p = self.pop3Server
s = self.pop3Transport
p.lineReceived("RETR 1")
self._flush()
self.assertEqual(
s.getvalue(),
"+OK 44\r\n"
"From: moshe\r\n"
"To: moshe\r\n"
"\r\n"
"How are you, friend?\r\n"
".\r\n")
s.truncate(0)
def testRETRWithBadArgument(self):
"""
Test that trying to download a message with a bad argument, either not
an integer or an out-of-bounds integer, fails with the appropriate
error response.
"""
p = self.pop3Server
s = self.pop3Transport
p.lineReceived("RETR a")
self.assertEqual(
s.getvalue(),
"-ERR Bad message number argument\r\n")
s.truncate(0)
p.lineReceived("RETR 0")
self.assertEqual(
s.getvalue(),
"-ERR Bad message number argument\r\n")
s.truncate(0)
p.lineReceived("RETR 2")
self.assertEqual(
s.getvalue(),
"-ERR Bad message number argument\r\n")
s.truncate(0)
def testTOP(self):
"""
Test downloading the headers and part of the body of a message.
"""
p = self.pop3Server
s = self.pop3Transport
p.mbox.messages.append(self.extraMessage)
p.lineReceived("TOP 1 0")
self._flush()
self.assertEqual(
s.getvalue(),
"+OK Top of message follows\r\n"
"From: moshe\r\n"
"To: moshe\r\n"
"\r\n"
".\r\n")
def testTOPWithBadArgument(self):
"""
Test that trying to download a message with a bad argument, either a
message number which isn't an integer or is an out-of-bounds integer or
a number of lines which isn't an integer or is a negative integer,
fails with the appropriate error response.
"""
p = self.pop3Server
s = self.pop3Transport
p.mbox.messages.append(self.extraMessage)
p.lineReceived("TOP 1 a")
self.assertEqual(
s.getvalue(),
"-ERR Bad line count argument\r\n")
s.truncate(0)
p.lineReceived("TOP 1 -1")
self.assertEqual(
s.getvalue(),
"-ERR Bad line count argument\r\n")
s.truncate(0)
p.lineReceived("TOP a 1")
self.assertEqual(
s.getvalue(),
"-ERR Bad message number argument\r\n")
s.truncate(0)
p.lineReceived("TOP 0 1")
self.assertEqual(
s.getvalue(),
"-ERR Bad message number argument\r\n")
s.truncate(0)
p.lineReceived("TOP 3 1")
self.assertEqual(
s.getvalue(),
"-ERR Bad message number argument\r\n")
s.truncate(0)
def testLAST(self):
"""
Test the exceedingly pointless LAST command, which tells you the
highest message index which you have already downloaded.
"""
p = self.pop3Server
s = self.pop3Transport
p.mbox.messages.append(self.extraMessage)
p.lineReceived('LAST')
self.assertEqual(
s.getvalue(),
"+OK 0\r\n")
s.truncate(0)
def testRetrieveUpdatesHighest(self):
"""
Test that issuing a RETR command updates the LAST response.
"""
p = self.pop3Server
s = self.pop3Transport
p.mbox.messages.append(self.extraMessage)
p.lineReceived('RETR 2')
self._flush()
s.truncate(0)
p.lineReceived('LAST')
self.assertEqual(
s.getvalue(),
'+OK 2\r\n')
s.truncate(0)
def testTopUpdatesHighest(self):
"""
Test that issuing a TOP command updates the LAST response.
"""
p = self.pop3Server
s = self.pop3Transport
p.mbox.messages.append(self.extraMessage)
p.lineReceived('TOP 2 10')
self._flush()
s.truncate(0)
p.lineReceived('LAST')
self.assertEqual(
s.getvalue(),
'+OK 2\r\n')
def testHighestOnlyProgresses(self):
"""
Test that downloading a message with a smaller index than the current
LAST response doesn't change the LAST response.
"""
p = self.pop3Server
s = self.pop3Transport
p.mbox.messages.append(self.extraMessage)
p.lineReceived('RETR 2')
self._flush()
p.lineReceived('TOP 1 10')
self._flush()
s.truncate(0)
p.lineReceived('LAST')
self.assertEqual(
s.getvalue(),
'+OK 2\r\n')
def testResetClearsHighest(self):
"""
Test that issuing RSET changes the LAST response to 0.
"""
p = self.pop3Server
s = self.pop3Transport
p.mbox.messages.append(self.extraMessage)
p.lineReceived('RETR 2')
self._flush()
p.lineReceived('RSET')
s.truncate(0)
p.lineReceived('LAST')
self.assertEqual(
s.getvalue(),
'+OK 0\r\n')
_listMessageDeprecation = (
"twisted.mail.pop3.IMailbox.listMessages may not "
"raise IndexError for out-of-bounds message numbers: "
"raise ValueError instead.")
_listMessageSuppression = util.suppress(
message=_listMessageDeprecation,
category=PendingDeprecationWarning)
_getUidlDeprecation = (
"twisted.mail.pop3.IMailbox.getUidl may not "
"raise IndexError for out-of-bounds message numbers: "
"raise ValueError instead.")
_getUidlSuppression = util.suppress(
message=_getUidlDeprecation,
category=PendingDeprecationWarning)
class IndexErrorCommandTestCase(CommandMixin, unittest.TestCase):
"""
Run all of the command tests against a mailbox which raises IndexError
when an out of bounds request is made. This behavior will be deprecated
shortly and then removed.
"""
exceptionType = IndexError
mailboxType = DummyMailbox
def testLISTWithBadArgument(self):
return CommandMixin.testLISTWithBadArgument(self)
testLISTWithBadArgument.suppress = [_listMessageSuppression]
def testUIDLWithBadArgument(self):
return CommandMixin.testUIDLWithBadArgument(self)
testUIDLWithBadArgument.suppress = [_getUidlSuppression]
def testTOPWithBadArgument(self):
return CommandMixin.testTOPWithBadArgument(self)
testTOPWithBadArgument.suppress = [_listMessageSuppression]
def testRETRWithBadArgument(self):
return CommandMixin.testRETRWithBadArgument(self)
testRETRWithBadArgument.suppress = [_listMessageSuppression]
class ValueErrorCommandTestCase(CommandMixin, unittest.TestCase):
"""
Run all of the command tests against a mailbox which raises ValueError
when an out of bounds request is made. This is the correct behavior and
after support for mailboxes which raise IndexError is removed, this will
become just C{CommandTestCase}.
"""
exceptionType = ValueError
mailboxType = DummyMailbox
class SyncDeferredMailbox(DummyMailbox):
"""
Mailbox which has a listMessages implementation which returns a Deferred
which has already fired.
"""
def listMessages(self, n=None):
return defer.succeed(DummyMailbox.listMessages(self, n))
class IndexErrorSyncDeferredCommandTestCase(IndexErrorCommandTestCase):
"""
Run all of the L{IndexErrorCommandTestCase} tests with a
synchronous-Deferred returning IMailbox implementation.
"""
mailboxType = SyncDeferredMailbox
class ValueErrorSyncDeferredCommandTestCase(ValueErrorCommandTestCase):
"""
Run all of the L{ValueErrorCommandTestCase} tests with a
synchronous-Deferred returning IMailbox implementation.
"""
mailboxType = SyncDeferredMailbox
class AsyncDeferredMailbox(DummyMailbox):
"""
Mailbox which has a listMessages implementation which returns a Deferred
which has not yet fired.
"""
def __init__(self, *a, **kw):
self.waiting = []
DummyMailbox.__init__(self, *a, **kw)
def listMessages(self, n=None):
d = defer.Deferred()
# See AsyncDeferredMailbox._flush
self.waiting.append((d, DummyMailbox.listMessages(self, n)))
return d
class IndexErrorAsyncDeferredCommandTestCase(IndexErrorCommandTestCase):
"""
Run all of the L{IndexErrorCommandTestCase} tests with an asynchronous-Deferred
returning IMailbox implementation.
"""
mailboxType = AsyncDeferredMailbox
def _flush(self):
"""
Fire whatever Deferreds we've built up in our mailbox.
"""
while self.pop3Server.mbox.waiting:
d, a = self.pop3Server.mbox.waiting.pop()
d.callback(a)
IndexErrorCommandTestCase._flush(self)
class ValueErrorAsyncDeferredCommandTestCase(ValueErrorCommandTestCase):
"""
Run all of the L{IndexErrorCommandTestCase} tests with an asynchronous-Deferred
returning IMailbox implementation.
"""
mailboxType = AsyncDeferredMailbox
def _flush(self):
"""
Fire whatever Deferreds we've built up in our mailbox.
"""
while self.pop3Server.mbox.waiting:
d, a = self.pop3Server.mbox.waiting.pop()
d.callback(a)
ValueErrorCommandTestCase._flush(self)
class POP3MiscTestCase(unittest.TestCase):
"""
Miscellaneous tests more to do with module/package structure than
anything to do with the Post Office Protocol.
"""
def test_all(self):
"""
This test checks that all names listed in
twisted.mail.pop3.__all__ are actually present in the module.
"""
mod = twisted.mail.pop3
for attr in mod.__all__:
self.failUnless(hasattr(mod, attr))
| agpl-3.0 |
madelynfreed/rlundo | venv/lib/python2.7/site-packages/setuptools/package_index.py | 301 | 38760 | """PyPI and direct package downloading"""
import sys
import os
import re
import shutil
import socket
import base64
import hashlib
from functools import wraps
from pkg_resources import (
CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST,
require, Environment, find_distributions, safe_name, safe_version,
to_filename, Requirement, DEVELOP_DIST,
)
from setuptools import ssl_support
from distutils import log
from distutils.errors import DistutilsError
from setuptools.compat import (urllib2, httplib, StringIO, HTTPError,
urlparse, urlunparse, unquote, splituser,
url2pathname, name2codepoint,
unichr, urljoin, urlsplit, urlunsplit,
ConfigParser)
from setuptools.compat import filterfalse
from fnmatch import translate
from setuptools.py26compat import strip_fragment
from setuptools.py27compat import get_all_headers
EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.]+)$')
HREF = re.compile("""href\\s*=\\s*['"]?([^'"> ]+)""", re.I)
# this is here to fix emacs' cruddy broken syntax highlighting
PYPI_MD5 = re.compile(
'<a href="([^"#]+)">([^<]+)</a>\n\s+\\(<a (?:title="MD5 hash"\n\s+)'
'href="[^?]+\?:action=show_md5&digest=([0-9a-f]{32})">md5</a>\\)'
)
URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):',re.I).match
EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split()
__all__ = [
'PackageIndex', 'distros_for_url', 'parse_bdist_wininst',
'interpret_distro_name',
]
_SOCKET_TIMEOUT = 15
def parse_bdist_wininst(name):
"""Return (base,pyversion) or (None,None) for possible .exe name"""
lower = name.lower()
base, py_ver, plat = None, None, None
if lower.endswith('.exe'):
if lower.endswith('.win32.exe'):
base = name[:-10]
plat = 'win32'
elif lower.startswith('.win32-py',-16):
py_ver = name[-7:-4]
base = name[:-16]
plat = 'win32'
elif lower.endswith('.win-amd64.exe'):
base = name[:-14]
plat = 'win-amd64'
elif lower.startswith('.win-amd64-py',-20):
py_ver = name[-7:-4]
base = name[:-20]
plat = 'win-amd64'
return base,py_ver,plat
def egg_info_for_url(url):
scheme, server, path, parameters, query, fragment = urlparse(url)
base = unquote(path.split('/')[-1])
if server=='sourceforge.net' and base=='download': # XXX Yuck
base = unquote(path.split('/')[-2])
if '#' in base: base, fragment = base.split('#',1)
return base,fragment
def distros_for_url(url, metadata=None):
"""Yield egg or source distribution objects that might be found at a URL"""
base, fragment = egg_info_for_url(url)
for dist in distros_for_location(url, base, metadata): yield dist
if fragment:
match = EGG_FRAGMENT.match(fragment)
if match:
for dist in interpret_distro_name(
url, match.group(1), metadata, precedence = CHECKOUT_DIST
):
yield dist
def distros_for_location(location, basename, metadata=None):
"""Yield egg or source distribution objects based on basename"""
if basename.endswith('.egg.zip'):
basename = basename[:-4] # strip the .zip
if basename.endswith('.egg') and '-' in basename:
# only one, unambiguous interpretation
return [Distribution.from_location(location, basename, metadata)]
if basename.endswith('.exe'):
win_base, py_ver, platform = parse_bdist_wininst(basename)
if win_base is not None:
return interpret_distro_name(
location, win_base, metadata, py_ver, BINARY_DIST, platform
)
# Try source distro extensions (.zip, .tgz, etc.)
#
for ext in EXTENSIONS:
if basename.endswith(ext):
basename = basename[:-len(ext)]
return interpret_distro_name(location, basename, metadata)
return [] # no extension matched
def distros_for_filename(filename, metadata=None):
"""Yield possible egg or source distribution objects based on a filename"""
return distros_for_location(
normalize_path(filename), os.path.basename(filename), metadata
)
def interpret_distro_name(
location, basename, metadata, py_version=None, precedence=SOURCE_DIST,
platform=None
):
"""Generate alternative interpretations of a source distro name
Note: if `location` is a filesystem filename, you should call
``pkg_resources.normalize_path()`` on it before passing it to this
routine!
"""
# Generate alternative interpretations of a source distro name
# Because some packages are ambiguous as to name/versions split
# e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc.
# So, we generate each possible interepretation (e.g. "adns, python-1.1.0"
# "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice,
# the spurious interpretations should be ignored, because in the event
# there's also an "adns" package, the spurious "python-1.1.0" version will
# compare lower than any numeric version number, and is therefore unlikely
# to match a request for it. It's still a potential problem, though, and
# in the long run PyPI and the distutils should go for "safe" names and
# versions in distribution archive names (sdist and bdist).
parts = basename.split('-')
if not py_version and any(re.match('py\d\.\d$', p) for p in parts[2:]):
# it is a bdist_dumb, not an sdist -- bail out
return
for p in range(1,len(parts)+1):
yield Distribution(
location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]),
py_version=py_version, precedence = precedence,
platform = platform
)
# From Python 2.7 docs
def unique_everseen(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def unique_values(func):
"""
Wrap a function returning an iterable such that the resulting iterable
only ever yields unique items.
"""
@wraps(func)
def wrapper(*args, **kwargs):
return unique_everseen(func(*args, **kwargs))
return wrapper
REL = re.compile("""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I)
# this line is here to fix emacs' cruddy broken syntax highlighting
@unique_values
def find_external_links(url, page):
"""Find rel="homepage" and rel="download" links in `page`, yielding URLs"""
for match in REL.finditer(page):
tag, rel = match.groups()
rels = set(map(str.strip, rel.lower().split(',')))
if 'homepage' in rels or 'download' in rels:
for match in HREF.finditer(tag):
yield urljoin(url, htmldecode(match.group(1)))
for tag in ("<th>Home Page", "<th>Download URL"):
pos = page.find(tag)
if pos!=-1:
match = HREF.search(page,pos)
if match:
yield urljoin(url, htmldecode(match.group(1)))
user_agent = "Python-urllib/%s setuptools/%s" % (
sys.version[:3], require('setuptools')[0].version
)
class ContentChecker(object):
"""
A null content checker that defines the interface for checking content
"""
def feed(self, block):
"""
Feed a block of data to the hash.
"""
return
def is_valid(self):
"""
Check the hash. Return False if validation fails.
"""
return True
def report(self, reporter, template):
"""
Call reporter with information about the checker (hash name)
substituted into the template.
"""
return
class HashChecker(ContentChecker):
pattern = re.compile(
r'(?P<hash_name>sha1|sha224|sha384|sha256|sha512|md5)='
r'(?P<expected>[a-f0-9]+)'
)
def __init__(self, hash_name, expected):
self.hash_name = hash_name
self.hash = hashlib.new(hash_name)
self.expected = expected
@classmethod
def from_url(cls, url):
"Construct a (possibly null) ContentChecker from a URL"
fragment = urlparse(url)[-1]
if not fragment:
return ContentChecker()
match = cls.pattern.search(fragment)
if not match:
return ContentChecker()
return cls(**match.groupdict())
def feed(self, block):
self.hash.update(block)
def is_valid(self):
return self.hash.hexdigest() == self.expected
def report(self, reporter, template):
msg = template % self.hash_name
return reporter(msg)
class PackageIndex(Environment):
"""A distribution index that scans web pages for download URLs"""
def __init__(
self, index_url="https://pypi.python.org/simple", hosts=('*',),
ca_bundle=None, verify_ssl=True, *args, **kw
):
Environment.__init__(self,*args,**kw)
self.index_url = index_url + "/"[:not index_url.endswith('/')]
self.scanned_urls = {}
self.fetched_urls = {}
self.package_pages = {}
self.allows = re.compile('|'.join(map(translate,hosts))).match
self.to_scan = []
if verify_ssl and ssl_support.is_available and (ca_bundle or ssl_support.find_ca_bundle()):
self.opener = ssl_support.opener_for(ca_bundle)
else: self.opener = urllib2.urlopen
def process_url(self, url, retrieve=False):
"""Evaluate a URL as a possible download, and maybe retrieve it"""
if url in self.scanned_urls and not retrieve:
return
self.scanned_urls[url] = True
if not URL_SCHEME(url):
self.process_filename(url)
return
else:
dists = list(distros_for_url(url))
if dists:
if not self.url_ok(url):
return
self.debug("Found link: %s", url)
if dists or not retrieve or url in self.fetched_urls:
list(map(self.add, dists))
return # don't need the actual page
if not self.url_ok(url):
self.fetched_urls[url] = True
return
self.info("Reading %s", url)
self.fetched_urls[url] = True # prevent multiple fetch attempts
f = self.open_url(url, "Download error on %s: %%s -- Some packages may not be found!" % url)
if f is None: return
self.fetched_urls[f.url] = True
if 'html' not in f.headers.get('content-type', '').lower():
f.close() # not html, we can't process it
return
base = f.url # handle redirects
page = f.read()
if not isinstance(page, str): # We are in Python 3 and got bytes. We want str.
if isinstance(f, HTTPError):
# Errors have no charset, assume latin1:
charset = 'latin-1'
else:
charset = f.headers.get_param('charset') or 'latin-1'
page = page.decode(charset, "ignore")
f.close()
for match in HREF.finditer(page):
link = urljoin(base, htmldecode(match.group(1)))
self.process_url(link)
if url.startswith(self.index_url) and getattr(f,'code',None)!=404:
page = self.process_index(url, page)
def process_filename(self, fn, nested=False):
# process filenames or directories
if not os.path.exists(fn):
self.warn("Not found: %s", fn)
return
if os.path.isdir(fn) and not nested:
path = os.path.realpath(fn)
for item in os.listdir(path):
self.process_filename(os.path.join(path,item), True)
dists = distros_for_filename(fn)
if dists:
self.debug("Found: %s", fn)
list(map(self.add, dists))
def url_ok(self, url, fatal=False):
s = URL_SCHEME(url)
if (s and s.group(1).lower()=='file') or self.allows(urlparse(url)[1]):
return True
msg = ("\nNote: Bypassing %s (disallowed host; see "
"http://bit.ly/1dg9ijs for details).\n")
if fatal:
raise DistutilsError(msg % url)
else:
self.warn(msg, url)
def scan_egg_links(self, search_path):
for item in search_path:
if os.path.isdir(item):
for entry in os.listdir(item):
if entry.endswith('.egg-link'):
self.scan_egg_link(item, entry)
def scan_egg_link(self, path, entry):
lines = [_f for _f in map(str.strip,
open(os.path.join(path, entry))) if _f]
if len(lines)==2:
for dist in find_distributions(os.path.join(path, lines[0])):
dist.location = os.path.join(path, *lines)
dist.precedence = SOURCE_DIST
self.add(dist)
def process_index(self,url,page):
"""Process the contents of a PyPI page"""
def scan(link):
# Process a URL to see if it's for a package page
if link.startswith(self.index_url):
parts = list(map(
unquote, link[len(self.index_url):].split('/')
))
if len(parts)==2 and '#' not in parts[1]:
# it's a package page, sanitize and index it
pkg = safe_name(parts[0])
ver = safe_version(parts[1])
self.package_pages.setdefault(pkg.lower(),{})[link] = True
return to_filename(pkg), to_filename(ver)
return None, None
# process an index page into the package-page index
for match in HREF.finditer(page):
try:
scan(urljoin(url, htmldecode(match.group(1))))
except ValueError:
pass
pkg, ver = scan(url) # ensure this page is in the page index
if pkg:
# process individual package page
for new_url in find_external_links(url, page):
# Process the found URL
base, frag = egg_info_for_url(new_url)
if base.endswith('.py') and not frag:
if ver:
new_url+='#egg=%s-%s' % (pkg,ver)
else:
self.need_version_info(url)
self.scan_url(new_url)
return PYPI_MD5.sub(
lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1,3,2), page
)
else:
return "" # no sense double-scanning non-package pages
def need_version_info(self, url):
self.scan_all(
"Page at %s links to .py file(s) without version info; an index "
"scan is required.", url
)
def scan_all(self, msg=None, *args):
if self.index_url not in self.fetched_urls:
if msg: self.warn(msg,*args)
self.info(
"Scanning index of all packages (this may take a while)"
)
self.scan_url(self.index_url)
def find_packages(self, requirement):
self.scan_url(self.index_url + requirement.unsafe_name+'/')
if not self.package_pages.get(requirement.key):
# Fall back to safe version of the name
self.scan_url(self.index_url + requirement.project_name+'/')
if not self.package_pages.get(requirement.key):
# We couldn't find the target package, so search the index page too
self.not_found_in_index(requirement)
for url in list(self.package_pages.get(requirement.key,())):
# scan each page that might be related to the desired package
self.scan_url(url)
def obtain(self, requirement, installer=None):
self.prescan()
self.find_packages(requirement)
for dist in self[requirement.key]:
if dist in requirement:
return dist
self.debug("%s does not match %s", requirement, dist)
return super(PackageIndex, self).obtain(requirement,installer)
def check_hash(self, checker, filename, tfp):
"""
checker is a ContentChecker
"""
checker.report(self.debug,
"Validating %%s checksum for %s" % filename)
if not checker.is_valid():
tfp.close()
os.unlink(filename)
raise DistutilsError(
"%s validation failed for %s; "
"possible download problem?" % (
checker.hash.name, os.path.basename(filename))
)
def add_find_links(self, urls):
"""Add `urls` to the list that will be prescanned for searches"""
for url in urls:
if (
self.to_scan is None # if we have already "gone online"
or not URL_SCHEME(url) # or it's a local file/directory
or url.startswith('file:')
or list(distros_for_url(url)) # or a direct package link
):
# then go ahead and process it now
self.scan_url(url)
else:
# otherwise, defer retrieval till later
self.to_scan.append(url)
def prescan(self):
"""Scan urls scheduled for prescanning (e.g. --find-links)"""
if self.to_scan:
list(map(self.scan_url, self.to_scan))
self.to_scan = None # from now on, go ahead and process immediately
def not_found_in_index(self, requirement):
if self[requirement.key]: # we've seen at least one distro
meth, msg = self.info, "Couldn't retrieve index page for %r"
else: # no distros seen for this name, might be misspelled
meth, msg = (self.warn,
"Couldn't find index page for %r (maybe misspelled?)")
meth(msg, requirement.unsafe_name)
self.scan_all()
def download(self, spec, tmpdir):
"""Locate and/or download `spec` to `tmpdir`, returning a local path
`spec` may be a ``Requirement`` object, or a string containing a URL,
an existing local filename, or a project/version requirement spec
(i.e. the string form of a ``Requirement`` object). If it is the URL
of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one
that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is
automatically created alongside the downloaded file.
If `spec` is a ``Requirement`` object or a string containing a
project/version requirement spec, this method returns the location of
a matching distribution (possibly after downloading it to `tmpdir`).
If `spec` is a locally existing file or directory name, it is simply
returned unchanged. If `spec` is a URL, it is downloaded to a subpath
of `tmpdir`, and the local filename is returned. Various errors may be
raised if a problem occurs during downloading.
"""
if not isinstance(spec,Requirement):
scheme = URL_SCHEME(spec)
if scheme:
# It's a url, download it to tmpdir
found = self._download_url(scheme.group(1), spec, tmpdir)
base, fragment = egg_info_for_url(spec)
if base.endswith('.py'):
found = self.gen_setup(found,fragment,tmpdir)
return found
elif os.path.exists(spec):
# Existing file or directory, just return it
return spec
else:
try:
spec = Requirement.parse(spec)
except ValueError:
raise DistutilsError(
"Not a URL, existing file, or requirement spec: %r" %
(spec,)
)
return getattr(self.fetch_distribution(spec, tmpdir),'location',None)
def fetch_distribution(
self, requirement, tmpdir, force_scan=False, source=False,
develop_ok=False, local_index=None
):
"""Obtain a distribution suitable for fulfilling `requirement`
`requirement` must be a ``pkg_resources.Requirement`` instance.
If necessary, or if the `force_scan` flag is set, the requirement is
searched for in the (online) package index as well as the locally
installed packages. If a distribution matching `requirement` is found,
the returned distribution's ``location`` is the value you would have
gotten from calling the ``download()`` method with the matching
distribution's URL or filename. If no matching distribution is found,
``None`` is returned.
If the `source` flag is set, only source distributions and source
checkout links will be considered. Unless the `develop_ok` flag is
set, development and system eggs (i.e., those using the ``.egg-info``
format) will be ignored.
"""
# process a Requirement
self.info("Searching for %s", requirement)
skipped = {}
dist = None
def find(req, env=None):
if env is None:
env = self
# Find a matching distribution; may be called more than once
for dist in env[req.key]:
if dist.precedence==DEVELOP_DIST and not develop_ok:
if dist not in skipped:
self.warn("Skipping development or system egg: %s",dist)
skipped[dist] = 1
continue
if dist in req and (dist.precedence<=SOURCE_DIST or not source):
return dist
if force_scan:
self.prescan()
self.find_packages(requirement)
dist = find(requirement)
if local_index is not None:
dist = dist or find(requirement, local_index)
if dist is None:
if self.to_scan is not None:
self.prescan()
dist = find(requirement)
if dist is None and not force_scan:
self.find_packages(requirement)
dist = find(requirement)
if dist is None:
self.warn(
"No local packages or download links found for %s%s",
(source and "a source distribution of " or ""),
requirement,
)
else:
self.info("Best match: %s", dist)
return dist.clone(location=self.download(dist.location, tmpdir))
def fetch(self, requirement, tmpdir, force_scan=False, source=False):
"""Obtain a file suitable for fulfilling `requirement`
DEPRECATED; use the ``fetch_distribution()`` method now instead. For
backward compatibility, this routine is identical but returns the
``location`` of the downloaded distribution instead of a distribution
object.
"""
dist = self.fetch_distribution(requirement,tmpdir,force_scan,source)
if dist is not None:
return dist.location
return None
def gen_setup(self, filename, fragment, tmpdir):
match = EGG_FRAGMENT.match(fragment)
dists = match and [
d for d in
interpret_distro_name(filename, match.group(1), None) if d.version
] or []
if len(dists)==1: # unambiguous ``#egg`` fragment
basename = os.path.basename(filename)
# Make sure the file has been downloaded to the temp dir.
if os.path.dirname(filename) != tmpdir:
dst = os.path.join(tmpdir, basename)
from setuptools.command.easy_install import samefile
if not samefile(filename, dst):
shutil.copy2(filename, dst)
filename=dst
with open(os.path.join(tmpdir, 'setup.py'), 'w') as file:
file.write(
"from setuptools import setup\n"
"setup(name=%r, version=%r, py_modules=[%r])\n"
% (
dists[0].project_name, dists[0].version,
os.path.splitext(basename)[0]
)
)
return filename
elif match:
raise DistutilsError(
"Can't unambiguously interpret project/version identifier %r; "
"any dashes in the name or version should be escaped using "
"underscores. %r" % (fragment,dists)
)
else:
raise DistutilsError(
"Can't process plain .py files without an '#egg=name-version'"
" suffix to enable automatic setup script generation."
)
dl_blocksize = 8192
def _download_to(self, url, filename):
self.info("Downloading %s", url)
# Download the file
fp, info = None, None
try:
checker = HashChecker.from_url(url)
fp = self.open_url(strip_fragment(url))
if isinstance(fp, HTTPError):
raise DistutilsError(
"Can't download %s: %s %s" % (url, fp.code,fp.msg)
)
headers = fp.info()
blocknum = 0
bs = self.dl_blocksize
size = -1
if "content-length" in headers:
# Some servers return multiple Content-Length headers :(
sizes = get_all_headers(headers, 'Content-Length')
size = max(map(int, sizes))
self.reporthook(url, filename, blocknum, bs, size)
with open(filename,'wb') as tfp:
while True:
block = fp.read(bs)
if block:
checker.feed(block)
tfp.write(block)
blocknum += 1
self.reporthook(url, filename, blocknum, bs, size)
else:
break
self.check_hash(checker, filename, tfp)
return headers
finally:
if fp: fp.close()
def reporthook(self, url, filename, blocknum, blksize, size):
pass # no-op
def open_url(self, url, warning=None):
if url.startswith('file:'):
return local_open(url)
try:
return open_with_auth(url, self.opener)
except (ValueError, httplib.InvalidURL) as v:
msg = ' '.join([str(arg) for arg in v.args])
if warning:
self.warn(warning, msg)
else:
raise DistutilsError('%s %s' % (url, msg))
except urllib2.HTTPError as v:
return v
except urllib2.URLError as v:
if warning:
self.warn(warning, v.reason)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v.reason))
except httplib.BadStatusLine as v:
if warning:
self.warn(warning, v.line)
else:
raise DistutilsError(
'%s returned a bad status line. The server might be '
'down, %s' %
(url, v.line)
)
except httplib.HTTPException as v:
if warning:
self.warn(warning, v)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v))
def _download_url(self, scheme, url, tmpdir):
# Determine download filename
#
name, fragment = egg_info_for_url(url)
if name:
while '..' in name:
name = name.replace('..','.').replace('\\','_')
else:
name = "__downloaded__" # default if URL has no path contents
if name.endswith('.egg.zip'):
name = name[:-4] # strip the extra .zip before download
filename = os.path.join(tmpdir,name)
# Download the file
#
if scheme=='svn' or scheme.startswith('svn+'):
return self._download_svn(url, filename)
elif scheme=='git' or scheme.startswith('git+'):
return self._download_git(url, filename)
elif scheme.startswith('hg+'):
return self._download_hg(url, filename)
elif scheme=='file':
return url2pathname(urlparse(url)[2])
else:
self.url_ok(url, True) # raises error if not allowed
return self._attempt_download(url, filename)
def scan_url(self, url):
self.process_url(url, True)
def _attempt_download(self, url, filename):
headers = self._download_to(url, filename)
if 'html' in headers.get('content-type','').lower():
return self._download_html(url, headers, filename)
else:
return filename
def _download_html(self, url, headers, filename):
file = open(filename)
for line in file:
if line.strip():
# Check for a subversion index page
if re.search(r'<title>([^- ]+ - )?Revision \d+:', line):
# it's a subversion index page:
file.close()
os.unlink(filename)
return self._download_svn(url, filename)
break # not an index page
file.close()
os.unlink(filename)
raise DistutilsError("Unexpected HTML page found at "+url)
def _download_svn(self, url, filename):
url = url.split('#',1)[0] # remove any fragment for svn's sake
creds = ''
if url.lower().startswith('svn:') and '@' in url:
scheme, netloc, path, p, q, f = urlparse(url)
if not netloc and path.startswith('//') and '/' in path[2:]:
netloc, path = path[2:].split('/',1)
auth, host = splituser(netloc)
if auth:
if ':' in auth:
user, pw = auth.split(':',1)
creds = " --username=%s --password=%s" % (user, pw)
else:
creds = " --username="+auth
netloc = host
url = urlunparse((scheme, netloc, url, p, q, f))
self.info("Doing subversion checkout from %s to %s", url, filename)
os.system("svn checkout%s -q %s %s" % (creds, url, filename))
return filename
@staticmethod
def _vcs_split_rev_from_url(url, pop_prefix=False):
scheme, netloc, path, query, frag = urlsplit(url)
scheme = scheme.split('+', 1)[-1]
# Some fragment identification fails
path = path.split('#',1)[0]
rev = None
if '@' in path:
path, rev = path.rsplit('@', 1)
# Also, discard fragment
url = urlunsplit((scheme, netloc, path, query, ''))
return url, rev
def _download_git(self, url, filename):
filename = filename.split('#',1)[0]
url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
self.info("Doing git clone from %s to %s", url, filename)
os.system("git clone --quiet %s %s" % (url, filename))
if rev is not None:
self.info("Checking out %s", rev)
os.system("(cd %s && git checkout --quiet %s)" % (
filename,
rev,
))
return filename
def _download_hg(self, url, filename):
filename = filename.split('#',1)[0]
url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
self.info("Doing hg clone from %s to %s", url, filename)
os.system("hg clone --quiet %s %s" % (url, filename))
if rev is not None:
self.info("Updating to %s", rev)
os.system("(cd %s && hg up -C -r %s >&-)" % (
filename,
rev,
))
return filename
def debug(self, msg, *args):
log.debug(msg, *args)
def info(self, msg, *args):
log.info(msg, *args)
def warn(self, msg, *args):
log.warn(msg, *args)
# This pattern matches a character entity reference (a decimal numeric
# references, a hexadecimal numeric reference, or a named reference).
entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub
def uchr(c):
if not isinstance(c, int):
return c
if c>255: return unichr(c)
return chr(c)
def decode_entity(match):
what = match.group(1)
if what.startswith('#x'):
what = int(what[2:], 16)
elif what.startswith('#'):
what = int(what[1:])
else:
what = name2codepoint.get(what, match.group(0))
return uchr(what)
def htmldecode(text):
"""Decode HTML entities in the given text."""
return entity_sub(decode_entity, text)
def socket_timeout(timeout=15):
def _socket_timeout(func):
def _socket_timeout(*args, **kwargs):
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
return func(*args, **kwargs)
finally:
socket.setdefaulttimeout(old_timeout)
return _socket_timeout
return _socket_timeout
def _encode_auth(auth):
"""
A function compatible with Python 2.3-3.3 that will encode
auth from a URL suitable for an HTTP header.
>>> str(_encode_auth('username%3Apassword'))
'dXNlcm5hbWU6cGFzc3dvcmQ='
Long auth strings should not cause a newline to be inserted.
>>> long_auth = 'username:' + 'password'*10
>>> chr(10) in str(_encode_auth(long_auth))
False
"""
auth_s = unquote(auth)
# convert to bytes
auth_bytes = auth_s.encode()
# use the legacy interface for Python 2.3 support
encoded_bytes = base64.encodestring(auth_bytes)
# convert back to a string
encoded = encoded_bytes.decode()
# strip the trailing carriage return
return encoded.replace('\n','')
class Credential(object):
"""
A username/password pair. Use like a namedtuple.
"""
def __init__(self, username, password):
self.username = username
self.password = password
def __iter__(self):
yield self.username
yield self.password
def __str__(self):
return '%(username)s:%(password)s' % vars(self)
class PyPIConfig(ConfigParser.ConfigParser):
def __init__(self):
"""
Load from ~/.pypirc
"""
defaults = dict.fromkeys(['username', 'password', 'repository'], '')
ConfigParser.ConfigParser.__init__(self, defaults)
rc = os.path.join(os.path.expanduser('~'), '.pypirc')
if os.path.exists(rc):
self.read(rc)
@property
def creds_by_repository(self):
sections_with_repositories = [
section for section in self.sections()
if self.get(section, 'repository').strip()
]
return dict(map(self._get_repo_cred, sections_with_repositories))
def _get_repo_cred(self, section):
repo = self.get(section, 'repository').strip()
return repo, Credential(
self.get(section, 'username').strip(),
self.get(section, 'password').strip(),
)
def find_credential(self, url):
"""
If the URL indicated appears to be a repository defined in this
config, return the credential for that repository.
"""
for repository, cred in self.creds_by_repository.items():
if url.startswith(repository):
return cred
def open_with_auth(url, opener=urllib2.urlopen):
"""Open a urllib2 request, handling HTTP authentication"""
scheme, netloc, path, params, query, frag = urlparse(url)
# Double scheme does not raise on Mac OS X as revealed by a
# failing test. We would expect "nonnumeric port". Refs #20.
if netloc.endswith(':'):
raise httplib.InvalidURL("nonnumeric port: ''")
if scheme in ('http', 'https'):
auth, host = splituser(netloc)
else:
auth = None
if not auth:
cred = PyPIConfig().find_credential(url)
if cred:
auth = str(cred)
info = cred.username, url
log.info('Authenticating as %s for %s (from .pypirc)' % info)
if auth:
auth = "Basic " + _encode_auth(auth)
new_url = urlunparse((scheme,host,path,params,query,frag))
request = urllib2.Request(new_url)
request.add_header("Authorization", auth)
else:
request = urllib2.Request(url)
request.add_header('User-Agent', user_agent)
fp = opener(request)
if auth:
# Put authentication info back into request URL if same host,
# so that links found on the page will work
s2, h2, path2, param2, query2, frag2 = urlparse(fp.url)
if s2==scheme and h2==host:
fp.url = urlunparse((s2,netloc,path2,param2,query2,frag2))
return fp
# adding a timeout to avoid freezing package_index
open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth)
def fix_sf_url(url):
return url # backward compatibility
def local_open(url):
"""Read a local path, with special support for directories"""
scheme, server, path, param, query, frag = urlparse(url)
filename = url2pathname(path)
if os.path.isfile(filename):
return urllib2.urlopen(url)
elif path.endswith('/') and os.path.isdir(filename):
files = []
for f in os.listdir(filename):
if f=='index.html':
with open(os.path.join(filename,f),'r') as fp:
body = fp.read()
break
elif os.path.isdir(os.path.join(filename,f)):
f+='/'
files.append("<a href=%r>%s</a>" % (f,f))
else:
body = ("<html><head><title>%s</title>" % url) + \
"</head><body>%s</body></html>" % '\n'.join(files)
status, message = 200, "OK"
else:
status, message, body = 404, "Path not found", "Not found"
headers = {'content-type': 'text/html'}
return HTTPError(url, status, message, headers, StringIO(body))
| gpl-3.0 |
chubbymaggie/claripy | tests/test_expression.py | 2 | 16903 | import claripy
import nose
def test_smudging():
x = claripy.BVS('x', 32)
y = x+1
nose.tools.assert_true(isinstance(y.args[1], claripy.ast.BV))
nose.tools.assert_equal(y.args[1].args[0], 1)
nose.tools.assert_equal(y.args[1].args[1], 32)
x = claripy.BVS('x', 32)
y = x*1
z = y+1
nose.tools.assert_true(isinstance(y.args[1], claripy.ast.BV))
nose.tools.assert_equal(y.args[1].args[0], 1)
nose.tools.assert_equal(y.args[1].args[1], 32)
nose.tools.assert_true(isinstance(z.args[1], claripy.ast.BV))
nose.tools.assert_equal(z.args[1].args[0], 1)
nose.tools.assert_equal(z.args[1].args[1], 32)
ccc = claripy.If(x > 10, x*3+2, x*4+2)
nose.tools.assert_true(isinstance(ccc.args[1].args[1], claripy.ast.BV))
nose.tools.assert_equal(ccc.args[1].args[1].args[0], 2)
nose.tools.assert_equal(ccc.args[1].args[1].args[1], 32)
x = claripy.BVS('x', 32)
y = x + "AAAA"
nose.tools.assert_true(isinstance(y.args[1], claripy.ast.BV))
nose.tools.assert_equal(y.args[1].args[0], 0x41414141)
nose.tools.assert_equal(y.args[1].args[1], 32)
def test_expression():
bc = claripy.backends.concrete
e = claripy.BVV(0x01020304, 32)
nose.tools.assert_equal(len(e), 32)
r = e.reversed
nose.tools.assert_equal(bc.convert(r), 0x04030201)
nose.tools.assert_equal(len(r), 32)
nose.tools.assert_equal([ bc.convert(i) for i in r.chop(8) ], [ 4, 3, 2, 1 ] )
e1 = r[31:24]
nose.tools.assert_equal(bc.convert(e1), 0x04)
nose.tools.assert_equal(len(e1), 8)
nose.tools.assert_equal(bc.convert(e1[2]), 1)
nose.tools.assert_equal(bc.convert(e1[1]), 0)
ee1 = e1.zero_extend(8)
nose.tools.assert_equal(bc.convert(ee1), 0x0004)
nose.tools.assert_equal(len(ee1), 16)
ee1 = claripy.BVV(0xfe, 8).sign_extend(8)
nose.tools.assert_equal(bc.convert(ee1), 0xfffe)
nose.tools.assert_equal(len(ee1), 16)
xe1 = [ bc.convert(i) for i in e1.chop(1) ]
nose.tools.assert_equal(xe1, [ 0, 0, 0, 0, 0, 1, 0, 0 ])
a = claripy.BVV(1, 1)
nose.tools.assert_equal(bc.convert(a+a), 2)
x = claripy.BVV(1, 32)
nose.tools.assert_equal(x.length, 32)
y = claripy.LShR(x, 10)
nose.tools.assert_equal(y.length, 32)
r = claripy.BVV(0x01020304, 32)
rr = r.reversed
rrr = rr.reversed
#nose.tools.assert_is(bc.convert(r), bc.convert(rrr))
#nose.tools.assert_is(type(bc.convert(rr)), claripy.A)
nose.tools.assert_equal(bc.convert(rr), 0x04030201)
nose.tools.assert_is(r.concat(rr), claripy.Concat(r, rr))
rsum = r+rr
nose.tools.assert_equal(bc.convert(rsum), 0x05050505)
r = claripy.BVS('x', 32)
rr = r.reversed
rrr = rr.reversed
nose.tools.assert_is(r, rrr)
# test identity
nose.tools.assert_is(r, rrr)
nose.tools.assert_is_not(r, rr)
ii = claripy.BVS('ii', 32)
ij = claripy.BVS('ij', 32)
nose.tools.assert_is(ii, ii)
nose.tools.assert_is_not(ii, ij)
si = claripy.SI(bits=32, stride=2, lower_bound=20, upper_bound=100)
sj = claripy.SI(bits=32, stride=2, lower_bound=10, upper_bound=10)
sk = claripy.SI(bits=32, stride=2, lower_bound=20, upper_bound=100)
nose.tools.assert_true(claripy.backends.vsa.identical(si, si))
nose.tools.assert_false(claripy.backends.vsa.identical(si, sj))
nose.tools.assert_true(claripy.backends.vsa.identical(si, sk))
nose.tools.assert_is_not(si, sj)
nose.tools.assert_is_not(sj, sk)
nose.tools.assert_is_not(sk, si)
# test hash cache
nose.tools.assert_is(a+a, a+a)
# test replacement
old = claripy.BVS('old', 32, explicit_name=True)
new = claripy.BVS('new', 32, explicit_name=True)
ooo = claripy.BVV(0, 32)
old_formula = claripy.If((old + 1)%256 == 0, old+10, old+20)
print old_formula.dbg_repr()
new_formula = old_formula.replace(old, new)
print new_formula.dbg_repr()
ooo_formula = new_formula.replace(new, ooo)
print ooo_formula.dbg_repr()
nose.tools.assert_not_equal(hash(old_formula), hash(new_formula))
nose.tools.assert_not_equal(hash(old_formula), hash(ooo_formula))
nose.tools.assert_not_equal(hash(new_formula), hash(ooo_formula))
nose.tools.assert_equal(old_formula.variables, { 'old' })
nose.tools.assert_equal(new_formula.variables, { 'new' })
nose.tools.assert_equal(ooo_formula.variables, ooo.variables)
nose.tools.assert_true(old_formula.symbolic)
nose.tools.assert_true(new_formula.symbolic)
nose.tools.assert_true(new_formula.symbolic)
nose.tools.assert_equal(str(old_formula).replace('old', 'new'), str(new_formula))
nose.tools.assert_equal(bc.convert(ooo_formula), 20)
# test dict replacement
old = claripy.BVS('old', 32, explicit_name=True)
new = claripy.BVS('new', 32, explicit_name=True)
c = (old + 10) - (old + 20)
d = (old + 1) - (old + 2)
cr = c.replace_dict({(old+10).cache_key: (old+1), (old+20).cache_key: (old+2)})
nose.tools.assert_is(cr, d)
# test AST collapse
s = claripy.SI(bits=32, stride=0, lower_bound=10, upper_bound=10)
b = claripy.BVV(20, 32)
sb = s+b
nose.tools.assert_is_instance(sb.args[0], claripy.ast.Base)
bb = b+b
# this was broken previously -- it was checking if type(bb.args[0]) == A,
# and it wasn't, but was instead a subclass. leaving this out for now
# nose.tools.assert_not_is_instance(bb.args[0], claripy.ast.Base)
# ss = s+s
# (see above)
# nose.tools.assert_not_is_instance(ss.args[0], claripy.ast.Base)
sob = s|b
# for now, this is collapsed. Presumably, Fish will make it not collapse at some point
nose.tools.assert_is_instance(sob.args[0], claripy.ast.Base)
# make sure the AST collapses for delayed ops like reversing
rb = b.reversed
#nose.tools.assert_is_instance(rb.args[0], claripy.ast.Base)
# TODO: Properly delay reversing: should not be eager
nose.tools.assert_is_not(rb, bb)
nose.tools.assert_is(rb, rb)
# test some alternate bvv creation methods
nose.tools.assert_is(claripy.BVV('AAAA'), claripy.BVV(0x41414141, 32))
nose.tools.assert_is(claripy.BVV('AAAA', 32), claripy.BVV(0x41414141, 32))
nose.tools.assert_is(claripy.BVV('AB'), claripy.BVV(0x4142, 16))
nose.tools.assert_is(claripy.BVV('AB', 16), claripy.BVV(0x4142, 16))
nose.tools.assert_raises(claripy.errors.ClaripyValueError, claripy.BVV, 'AB', 8)
def test_cardinality():
x = claripy.BVS('x', 32)
y = claripy.BVS('y', 32, min=100, max=120)
n = claripy.BVV(10, 32)
m = claripy.BVV(20, 32)
nose.tools.assert_equals(y.cardinality, 21)
nose.tools.assert_equals(x.cardinality, 2**32)
nose.tools.assert_equals(n.cardinality, 1)
nose.tools.assert_equals(m.cardinality, 1)
nose.tools.assert_equals(n.union(m).cardinality, 2)
nose.tools.assert_equals(n.union(y).cardinality, 111)
nose.tools.assert_equals(y.intersection(x).cardinality, 21)
nose.tools.assert_equals(n.intersection(m).cardinality, 0)
nose.tools.assert_equals(y.intersection(m).cardinality, 0)
nose.tools.assert_true(n.singlevalued)
nose.tools.assert_false(n.multivalued)
nose.tools.assert_true(y.multivalued)
nose.tools.assert_false(y.singlevalued)
nose.tools.assert_false(x.singlevalued)
nose.tools.assert_true(x.multivalued)
nose.tools.assert_false(y.union(m).singlevalued)
nose.tools.assert_true(y.union(m).multivalued)
nose.tools.assert_false(y.intersection(m).singlevalued)
nose.tools.assert_false(y.intersection(m).multivalued)
def test_if_stuff():
x = claripy.BVS('x', 32)
#y = claripy.BVS('y', 32)
c = claripy.If(x > 10, (claripy.If(x > 10, x*3, x*2)), x*4) + 2
cc = claripy.If(x > 10, x*3, x*4) + 2
ccc = claripy.If(x > 10, x*3+2, x*4+2)
cccc = x*claripy.If(x > 10, claripy.BVV(3, 32), claripy.BVV(4, 32)) + 2
nose.tools.assert_is(c, cc)
nose.tools.assert_is(c.ite_excavated, ccc)
nose.tools.assert_is(ccc.ite_burrowed, cccc)
i = c + c
ii = claripy.If(x > 10, (x*3+2)+(x*3+2), (x*4+2)+(x*4+2))
nose.tools.assert_is(i.ite_excavated, ii)
cn = claripy.If(x <= 10, claripy.BVV(0x10, 32), 0x20)
iii = c + cn
iiii = claripy.If(x > 10, (x*3+2)+0x20, (x*4+2)+0x10)
nose.tools.assert_is(iii.ite_excavated, iiii)
def test_ite():
yield raw_ite, claripy.Solver
yield raw_ite, claripy.SolverHybrid
yield raw_ite, claripy.SolverComposite
def raw_ite(solver_type):
s = solver_type()
x = claripy.BVS("x", 32)
y = claripy.BVS("y", 32)
z = claripy.BVS("z", 32)
ite = claripy.ite_dict(x, {1:11, 2:22, 3:33, 4:44, 5:55, 6:66, 7:77, 8:88, 9:99}, claripy.BVV(0, 32))
nose.tools.assert_equal(sorted(s.eval(ite, 100)), [ 0, 11, 22, 33, 44, 55, 66, 77, 88, 99 ] )
ss = s.branch()
ss.add(ite == 88)
nose.tools.assert_equal(sorted(ss.eval(ite, 100)), [ 88 ] )
nose.tools.assert_equal(sorted(ss.eval(x, 100)), [ 8 ] )
ity = claripy.ite_dict(x, {1:11, 2:22, 3:y, 4:44, 5:55, 6:66, 7:77, 8:88, 9:99}, claripy.BVV(0, 32))
ss = s.branch()
ss.add(ity != 11)
ss.add(ity != 22)
ss.add(ity != 33)
ss.add(ity != 44)
ss.add(ity != 55)
ss.add(ity != 66)
ss.add(ity != 77)
ss.add(ity != 88)
ss.add(ity != 0)
ss.add(y == 123)
nose.tools.assert_equal(sorted(ss.eval(ity, 100)), [ 99, 123 ] )
nose.tools.assert_equal(sorted(ss.eval(x, 100)), [ 3, 9 ] )
nose.tools.assert_equal(sorted(ss.eval(y, 100)), [ 123 ] )
itz = claripy.ite_cases([ (claripy.And(x == 10, y == 20), 33), (claripy.And(x==1, y==2), 3), (claripy.And(x==100, y==200), 333) ], claripy.BVV(0, 32))
ss = s.branch()
ss.add(z == itz)
ss.add(itz != 0)
nose.tools.assert_equal(ss.eval(y/x, 100), ( 2, ))
nose.tools.assert_items_equal(sorted(ss.eval(x, 100)), ( 1, 10, 100 ))
nose.tools.assert_items_equal(sorted(ss.eval(y, 100)), ( 2, 20, 200 ))
def test_bool():
bc = claripy.backends.concrete
a = claripy.And(*[False, False, True])
nose.tools.assert_equal(bc.convert(a), False)
a = claripy.And(*[True, True, True])
nose.tools.assert_equal(bc.convert(a), True)
o = claripy.Or(*[False, False, True])
nose.tools.assert_equal(bc.convert(o), True)
o = claripy.Or(*[False, False, False])
nose.tools.assert_equal(bc.convert(o), False)
def test_extract():
a = claripy.BVS("a", 32)
assert a[7:] is a[7:0]
assert a[31:] is a
assert a[:] is a
assert a[:0] is a
assert a[:-8] is a[31:24]
assert a[-1:] is a[31:0]
assert a[-1:-8] is a[31:24]
def test_get_byte():
a = claripy.BVV("ABCD")
assert a.get_byte(1) is claripy.BVV("B")
def test_extract_concat_simplify():
a = claripy.BVS("a", 32)
assert a[31:0] is a
assert a[31:8].concat(a[7:0]) is a
assert a[31:16].concat(a[15:8], a[7:0]) is a
assert a[31:24].concat(a[23:16], a[15:8], a[7:0]) is a
a = claripy.BVS("a", 32)
b = a + 100
b_concat = b[31:8].concat(b[7:0])
a100 = a + 100
assert claripy.is_false(b_concat == a100) is False
assert list(claripy.Solver().eval(b_concat == a100, 2)) == [ True ]
assert b_concat is a100
assert claripy.is_true(b_concat == a100)
def test_true_false_cache():
claripy.backends._quick_backends.append(claripy.backends.z3)
a = claripy.BVS("a_WILL_BE_VIOLATED", 32)
c = a == a+1
assert claripy.is_false(c)
c.args[1].args = (a, claripy.BVV(0, 32))
assert claripy.is_false(c)
assert not claripy.is_true(c)
assert not claripy.is_false(a == a)
claripy.backends._quick_backends[-1:] = [ ]
def test_depth_repr():
x = claripy.BVS("x", 32)
y = claripy.LShR(x, 10)
y = claripy.LShR(y, 10)
y = claripy.LShR(y, 10)
y = claripy.LShR(y, 10)
y = claripy.LShR(y, 10)
y = claripy.LShR(y, 10)
y = claripy.LShR(y, 10)
print y.shallow_repr(max_depth=5)
nose.tools.assert_equal(y.shallow_repr(max_depth=5), "<BV32 LShR(LShR(LShR(LShR(LShR(<...>, <...>), 0xa), 0xa), 0xa), 0xa)>")
def test_rename():
x1 = claripy.BVS('x', 32)
x2 = x1._rename('y')
print x2.variables
assert x2.variables == frozenset(('y',))
def test_canonical():
x1 = claripy.BVS('x', 32)
b1 = claripy.BoolS('b')
c1 = claripy.BoolS('c')
x2 = claripy.BVS('x', 32)
b2 = claripy.BoolS('b')
c2 = claripy.BoolS('c')
assert x1.canonicalize()[-1] is x2.canonicalize()[-1]
y1 = claripy.If(claripy.And(b1, c1), x1, ((x1+x1)*x1)+1)
y2 = claripy.If(claripy.And(b2, c2), x2, ((x2+x2)*x2)+1)
one_names = frozenset.union(x1.variables, b1.variables, c1.variables)
two_names = frozenset.union(x2.variables, b2.variables, c2.variables)
assert frozenset.union(*[a.variables for a in y1.recursive_leaf_asts]) == one_names
assert frozenset.union(*[a.variables for a in y2.recursive_leaf_asts]) == two_names
assert y1.canonicalize()[-1] is y2.canonicalize()[-1]
def test_depth():
x1 = claripy.BVS('x', 32)
assert x1.depth == 1
x2 = x1 + 1
assert x2.depth == 2
def test_multiarg():
x = claripy.BVS('x', 32)
o = claripy.BVV(2, 32)
x_add = x+x+x+x
x_mul = x*x*x*x
x_sub = x-(x+1)-(x+2)-(x+3)
x_or = x|(x+1)|(x+2)|(x+3)
x_xor = x^(x+1)^(x+2)^(x+3)
x_and = x&(x+1)&(x+2)&(x+3)
assert x_add.variables == x.variables
assert x_mul.variables == x.variables
assert x_sub.variables == x.variables
assert x_or.variables == x.variables
assert x_xor.variables == x.variables
assert x_and.variables == x.variables
assert (claripy.BVV(1, 32)+(x+x)).variables == x.variables
assert len(x_add.args) == 4
assert len(x_mul.args) == 4
#assert len(x_sub.args) == 4 # needs more work
assert len(x_or.args) == 4
assert len(x_xor.args) == 4
assert len(x_and.args) == 4
assert (x_add).replace(x, o).args[0] == 8
assert (x_mul).replace(x, o).args[0] == 16
assert (x_or).replace(x, o).args[0] == 7
assert (x_xor).replace(x, o).args[0] == 0
assert (x_and).replace(x, o).args[0] == 0
assert (100 + (x_sub).replace(x, o)).args[0] == 90
# make sure that all backends handle this properly
for b in claripy.backends._all_backends:
try:
b.convert(x+x+x+x)
except claripy.BackendError:
pass
print 'ok'
def test_signed_concrete():
bc = claripy.backends.concrete
a = claripy.BVV(5, 32)
b = claripy.BVV(-5, 32)
c = claripy.BVV(3, 32)
d = claripy.BVV(-3, 32)
# test unsigned
assert bc.convert(a / c) == 1
assert bc.convert(a / d) == 0
assert bc.convert(b / c) == 0x55555553
assert bc.convert(b / d) == 0
assert bc.convert(a % c) == 2
assert bc.convert(a % d) == 5
assert bc.convert(b % c) == 2
assert bc.convert(b % d) == -5
# test unsigned
assert bc.convert(a.SDiv(c)) == 1
assert bc.convert(a.SDiv(d)) == -1
assert bc.convert(b.SDiv(c)) == -1
assert bc.convert(b.SDiv(d)) == 1
assert bc.convert(a.SMod(c)) == 2
assert bc.convert(a.SMod(d)) == 2
assert bc.convert(b.SMod(c)) == -2
assert bc.convert(b.SMod(d)) == -2
def test_signed_symbolic():
solver = claripy.Solver()
a = claripy.BVS("a", 32)
b = claripy.BVS("b", 32)
c = claripy.BVS("c", 32)
d = claripy.BVS("d", 32)
solver.add(a == 5)
solver.add(b == -5)
solver.add(c == 3)
solver.add(d == -3)
# test unsigned
assert list(solver.eval(a / c, 2)) == [1]
assert list(solver.eval(a / d, 2)) == [0]
assert list(solver.eval(b / c, 2)) == [0x55555553]
assert list(solver.eval(b / d, 2)) == [0]
assert list(solver.eval(a % c, 2)) == [2]
assert list(solver.eval(a % d, 2)) == [5]
assert list(solver.eval(b % c, 2)) == [2]
assert list(solver.eval(b % d, 2)) == [2**32-5]
# test unsigned
assert list(solver.eval(a.SDiv(c), 2)) == [1]
assert list(solver.eval(a.SDiv(d), 2)) == [2**32-1]
assert list(solver.eval(b.SDiv(c), 2)) == [2**32-1]
assert list(solver.eval(b.SDiv(d), 2)) == [1]
assert list(solver.eval(a.SMod(c), 2)) == [2]
assert list(solver.eval(a.SMod(d), 2)) == [2]
assert list(solver.eval(b.SMod(c), 2)) == [2**32-2]
assert list(solver.eval(b.SMod(d), 2)) == [2**32-2]
def test_arith_shift():
bc = claripy.backends.concrete
a = claripy.BVV(-4, 32)
assert bc.convert(a >> 1) == -2
solver = claripy.Solver()
a = claripy.BVS("a", 32)
solver.add(a == -4)
assert list(solver.eval(a >> 1, 2)) == [2**32-2]
if __name__ == '__main__':
test_multiarg()
test_depth()
test_rename()
test_canonical()
test_depth_repr()
test_extract()
test_true_false_cache()
test_smudging()
test_expression()
test_bool()
test_extract_concat_simplify()
test_get_byte()
for func, param in test_ite():
func(param)
test_if_stuff()
test_signed_concrete()
test_signed_symbolic()
test_arith_shift()
| bsd-2-clause |
seem-sky/kbengine | kbe/src/lib/python/Lib/asyncio/base_events.py | 61 | 40585 | """Base implementation of event loop.
The event loop can be broken up into a multiplexer (the part
responsible for notifying us of I/O events) and the event loop proper,
which wraps a multiplexer with functionality for scheduling callbacks,
immediately or at a given time in the future.
Whenever a public API takes a callback, subsequent positional
arguments will be passed to the callback if/when it is called. This
avoids the proliferation of trivial lambdas implementing closures.
Keyword arguments for the callback are not supported; this is a
conscious design decision, leaving the door open for keyword arguments
to modify the meaning of the API call itself.
"""
import collections
import concurrent.futures
import heapq
import inspect
import logging
import os
import socket
import subprocess
import time
import traceback
import sys
from . import coroutines
from . import events
from . import futures
from . import tasks
from .coroutines import coroutine
from .log import logger
__all__ = ['BaseEventLoop', 'Server']
# Argument for default thread pool executor creation.
_MAX_WORKERS = 5
def _format_handle(handle):
cb = handle._callback
if inspect.ismethod(cb) and isinstance(cb.__self__, tasks.Task):
# format the task
return repr(cb.__self__)
else:
return str(handle)
def _format_pipe(fd):
if fd == subprocess.PIPE:
return '<pipe>'
elif fd == subprocess.STDOUT:
return '<stdout>'
else:
return repr(fd)
class _StopError(BaseException):
"""Raised to stop the event loop."""
def _check_resolved_address(sock, address):
# Ensure that the address is already resolved to avoid the trap of hanging
# the entire event loop when the address requires doing a DNS lookup.
family = sock.family
if family == socket.AF_INET:
host, port = address
elif family == socket.AF_INET6:
host, port = address[:2]
else:
return
type_mask = 0
if hasattr(socket, 'SOCK_NONBLOCK'):
type_mask |= socket.SOCK_NONBLOCK
if hasattr(socket, 'SOCK_CLOEXEC'):
type_mask |= socket.SOCK_CLOEXEC
# Use getaddrinfo(flags=AI_NUMERICHOST) to ensure that the address is
# already resolved.
try:
socket.getaddrinfo(host, port,
family=family,
type=(sock.type & ~type_mask),
proto=sock.proto,
flags=socket.AI_NUMERICHOST)
except socket.gaierror as err:
raise ValueError("address must be resolved (IP address), got %r: %s"
% (address, err))
def _raise_stop_error(*args):
raise _StopError
class Server(events.AbstractServer):
def __init__(self, loop, sockets):
self._loop = loop
self.sockets = sockets
self._active_count = 0
self._waiters = []
def __repr__(self):
return '<%s sockets=%r>' % (self.__class__.__name__, self.sockets)
def _attach(self):
assert self.sockets is not None
self._active_count += 1
def _detach(self):
assert self._active_count > 0
self._active_count -= 1
if self._active_count == 0 and self.sockets is None:
self._wakeup()
def close(self):
sockets = self.sockets
if sockets is None:
return
self.sockets = None
for sock in sockets:
self._loop._stop_serving(sock)
if self._active_count == 0:
self._wakeup()
def _wakeup(self):
waiters = self._waiters
self._waiters = None
for waiter in waiters:
if not waiter.done():
waiter.set_result(waiter)
@coroutine
def wait_closed(self):
if self.sockets is None or self._waiters is None:
return
waiter = futures.Future(loop=self._loop)
self._waiters.append(waiter)
yield from waiter
class BaseEventLoop(events.AbstractEventLoop):
def __init__(self):
self._closed = False
self._ready = collections.deque()
self._scheduled = []
self._default_executor = None
self._internal_fds = 0
self._running = False
self._clock_resolution = time.get_clock_info('monotonic').resolution
self._exception_handler = None
self._debug = (not sys.flags.ignore_environment
and bool(os.environ.get('PYTHONASYNCIODEBUG')))
# In debug mode, if the execution of a callback or a step of a task
# exceed this duration in seconds, the slow callback/task is logged.
self.slow_callback_duration = 0.1
def __repr__(self):
return ('<%s running=%s closed=%s debug=%s>'
% (self.__class__.__name__, self.is_running(),
self.is_closed(), self.get_debug()))
def create_task(self, coro):
"""Schedule a coroutine object.
Return a task object.
"""
task = tasks.Task(coro, loop=self)
if task._source_traceback:
del task._source_traceback[-1]
return task
def _make_socket_transport(self, sock, protocol, waiter=None, *,
extra=None, server=None):
"""Create socket transport."""
raise NotImplementedError
def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter, *,
server_side=False, server_hostname=None,
extra=None, server=None):
"""Create SSL transport."""
raise NotImplementedError
def _make_datagram_transport(self, sock, protocol,
address=None, waiter=None, extra=None):
"""Create datagram transport."""
raise NotImplementedError
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create read pipe transport."""
raise NotImplementedError
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create write pipe transport."""
raise NotImplementedError
@coroutine
def _make_subprocess_transport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
"""Create subprocess transport."""
raise NotImplementedError
def _write_to_self(self):
"""Write a byte to self-pipe, to wake up the event loop.
This may be called from a different thread.
The subclass is responsible for implementing the self-pipe.
"""
raise NotImplementedError
def _process_events(self, event_list):
"""Process selector events."""
raise NotImplementedError
def _check_closed(self):
if self._closed:
raise RuntimeError('Event loop is closed')
def run_forever(self):
"""Run until stop() is called."""
self._check_closed()
if self._running:
raise RuntimeError('Event loop is running.')
self._running = True
try:
while True:
try:
self._run_once()
except _StopError:
break
finally:
self._running = False
def run_until_complete(self, future):
"""Run until the Future is done.
If the argument is a coroutine, it is wrapped in a Task.
WARNING: It would be disastrous to call run_until_complete()
with the same coroutine twice -- it would wrap it in two
different Tasks and that can't be good.
Return the Future's result, or raise its exception.
"""
self._check_closed()
new_task = not isinstance(future, futures.Future)
future = tasks.async(future, loop=self)
if new_task:
# An exception is raised if the future didn't complete, so there
# is no need to log the "destroy pending task" message
future._log_destroy_pending = False
future.add_done_callback(_raise_stop_error)
self.run_forever()
future.remove_done_callback(_raise_stop_error)
if not future.done():
raise RuntimeError('Event loop stopped before Future completed.')
return future.result()
def stop(self):
"""Stop running the event loop.
Every callback scheduled before stop() is called will run. Callbacks
scheduled after stop() is called will not run. However, those callbacks
will run if run_forever is called again later.
"""
self.call_soon(_raise_stop_error)
def close(self):
"""Close the event loop.
This clears the queues and shuts down the executor,
but does not wait for the executor to finish.
The event loop must not be running.
"""
if self._running:
raise RuntimeError("Cannot close a running event loop")
if self._closed:
return
if self._debug:
logger.debug("Close %r", self)
self._closed = True
self._ready.clear()
self._scheduled.clear()
executor = self._default_executor
if executor is not None:
self._default_executor = None
executor.shutdown(wait=False)
def is_closed(self):
"""Returns True if the event loop was closed."""
return self._closed
def is_running(self):
"""Returns True if the event loop is running."""
return self._running
def time(self):
"""Return the time according to the event loop's clock.
This is a float expressed in seconds since an epoch, but the
epoch, precision, accuracy and drift are unspecified and may
differ per event loop.
"""
return time.monotonic()
def call_later(self, delay, callback, *args):
"""Arrange for a callback to be called at a given time.
Return a Handle: an opaque object with a cancel() method that
can be used to cancel the call.
The delay can be an int or float, expressed in seconds. It is
always relative to the current time.
Each callback will be called exactly once. If two callbacks
are scheduled for exactly the same time, it undefined which
will be called first.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
timer = self.call_at(self.time() + delay, callback, *args)
if timer._source_traceback:
del timer._source_traceback[-1]
return timer
def call_at(self, when, callback, *args):
"""Like call_later(), but uses an absolute time.
Absolute time corresponds to the event loop's time() method.
"""
if coroutines.iscoroutinefunction(callback):
raise TypeError("coroutines cannot be used with call_at()")
if self._debug:
self._assert_is_current_event_loop()
timer = events.TimerHandle(when, callback, args, self)
if timer._source_traceback:
del timer._source_traceback[-1]
heapq.heappush(self._scheduled, timer)
return timer
def call_soon(self, callback, *args):
"""Arrange for a callback to be called as soon as possible.
This operates as a FIFO queue: callbacks are called in the
order in which they are registered. Each callback will be
called exactly once.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
handle = self._call_soon(callback, args, check_loop=True)
if handle._source_traceback:
del handle._source_traceback[-1]
return handle
def _call_soon(self, callback, args, check_loop):
if coroutines.iscoroutinefunction(callback):
raise TypeError("coroutines cannot be used with call_soon()")
if self._debug and check_loop:
self._assert_is_current_event_loop()
handle = events.Handle(callback, args, self)
if handle._source_traceback:
del handle._source_traceback[-1]
self._ready.append(handle)
return handle
def _assert_is_current_event_loop(self):
"""Asserts that this event loop is the current event loop.
Non-thread-safe methods of this class make this assumption and will
likely behave incorrectly when the assumption is violated.
Should only be called when (self._debug == True). The caller is
responsible for checking this condition for performance reasons.
"""
try:
current = events.get_event_loop()
except AssertionError:
return
if current is not self:
raise RuntimeError(
"Non-thread-safe operation invoked on an event loop other "
"than the current one")
def call_soon_threadsafe(self, callback, *args):
"""Like call_soon(), but thread-safe."""
handle = self._call_soon(callback, args, check_loop=False)
if handle._source_traceback:
del handle._source_traceback[-1]
self._write_to_self()
return handle
def run_in_executor(self, executor, callback, *args):
if coroutines.iscoroutinefunction(callback):
raise TypeError("Coroutines cannot be used with run_in_executor()")
if isinstance(callback, events.Handle):
assert not args
assert not isinstance(callback, events.TimerHandle)
if callback._cancelled:
f = futures.Future(loop=self)
f.set_result(None)
return f
callback, args = callback._callback, callback._args
if executor is None:
executor = self._default_executor
if executor is None:
executor = concurrent.futures.ThreadPoolExecutor(_MAX_WORKERS)
self._default_executor = executor
return futures.wrap_future(executor.submit(callback, *args), loop=self)
def set_default_executor(self, executor):
self._default_executor = executor
def _getaddrinfo_debug(self, host, port, family, type, proto, flags):
msg = ["%s:%r" % (host, port)]
if family:
msg.append('family=%r' % family)
if type:
msg.append('type=%r' % type)
if proto:
msg.append('proto=%r' % proto)
if flags:
msg.append('flags=%r' % flags)
msg = ', '.join(msg)
logger.debug('Get address info %s', msg)
t0 = self.time()
addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags)
dt = self.time() - t0
msg = ('Getting address info %s took %.3f ms: %r'
% (msg, dt * 1e3, addrinfo))
if dt >= self.slow_callback_duration:
logger.info(msg)
else:
logger.debug(msg)
return addrinfo
def getaddrinfo(self, host, port, *,
family=0, type=0, proto=0, flags=0):
if self._debug:
return self.run_in_executor(None, self._getaddrinfo_debug,
host, port, family, type, proto, flags)
else:
return self.run_in_executor(None, socket.getaddrinfo,
host, port, family, type, proto, flags)
def getnameinfo(self, sockaddr, flags=0):
return self.run_in_executor(None, socket.getnameinfo, sockaddr, flags)
@coroutine
def create_connection(self, protocol_factory, host=None, port=None, *,
ssl=None, family=0, proto=0, flags=0, sock=None,
local_addr=None, server_hostname=None):
"""Connect to a TCP server.
Create a streaming transport connection to a given Internet host and
port: socket family AF_INET or socket.AF_INET6 depending on host (or
family if specified), socket type SOCK_STREAM. protocol_factory must be
a callable returning a protocol instance.
This method is a coroutine which will try to establish the connection
in the background. When successful, the coroutine returns a
(transport, protocol) pair.
"""
if server_hostname is not None and not ssl:
raise ValueError('server_hostname is only meaningful with ssl')
if server_hostname is None and ssl:
# Use host as default for server_hostname. It is an error
# if host is empty or not set, e.g. when an
# already-connected socket was passed or when only a port
# is given. To avoid this error, you can pass
# server_hostname='' -- this will bypass the hostname
# check. (This also means that if host is a numeric
# IP/IPv6 address, we will attempt to verify that exact
# address; this will probably fail, but it is possible to
# create a certificate for a specific IP address, so we
# don't judge it here.)
if not host:
raise ValueError('You must set server_hostname '
'when using ssl without a host')
server_hostname = host
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
f1 = self.getaddrinfo(
host, port, family=family,
type=socket.SOCK_STREAM, proto=proto, flags=flags)
fs = [f1]
if local_addr is not None:
f2 = self.getaddrinfo(
*local_addr, family=family,
type=socket.SOCK_STREAM, proto=proto, flags=flags)
fs.append(f2)
else:
f2 = None
yield from tasks.wait(fs, loop=self)
infos = f1.result()
if not infos:
raise OSError('getaddrinfo() returned empty list')
if f2 is not None:
laddr_infos = f2.result()
if not laddr_infos:
raise OSError('getaddrinfo() returned empty list')
exceptions = []
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
if f2 is not None:
for _, _, _, _, laddr in laddr_infos:
try:
sock.bind(laddr)
break
except OSError as exc:
exc = OSError(
exc.errno, 'error while '
'attempting to bind on address '
'{!r}: {}'.format(
laddr, exc.strerror.lower()))
exceptions.append(exc)
else:
sock.close()
sock = None
continue
if self._debug:
logger.debug("connect %r to %r", sock, address)
yield from self.sock_connect(sock, address)
except OSError as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
except:
if sock is not None:
sock.close()
raise
else:
break
else:
if len(exceptions) == 1:
raise exceptions[0]
else:
# If they all have the same str(), raise one.
model = str(exceptions[0])
if all(str(exc) == model for exc in exceptions):
raise exceptions[0]
# Raise a combined exception so the user can see all
# the various error messages.
raise OSError('Multiple exceptions: {}'.format(
', '.join(str(exc) for exc in exceptions)))
elif sock is None:
raise ValueError(
'host and port was not specified and no sock specified')
sock.setblocking(False)
transport, protocol = yield from self._create_connection_transport(
sock, protocol_factory, ssl, server_hostname)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r connected to %s:%r: (%r, %r)",
sock, host, port, transport, protocol)
return transport, protocol
@coroutine
def _create_connection_transport(self, sock, protocol_factory, ssl,
server_hostname):
protocol = protocol_factory()
waiter = futures.Future(loop=self)
if ssl:
sslcontext = None if isinstance(ssl, bool) else ssl
transport = self._make_ssl_transport(
sock, protocol, sslcontext, waiter,
server_side=False, server_hostname=server_hostname)
else:
transport = self._make_socket_transport(sock, protocol, waiter)
yield from waiter
return transport, protocol
@coroutine
def create_datagram_endpoint(self, protocol_factory,
local_addr=None, remote_addr=None, *,
family=0, proto=0, flags=0):
"""Create datagram connection."""
if not (local_addr or remote_addr):
if family == 0:
raise ValueError('unexpected address family')
addr_pairs_info = (((family, proto), (None, None)),)
else:
# join address by (family, protocol)
addr_infos = collections.OrderedDict()
for idx, addr in ((0, local_addr), (1, remote_addr)):
if addr is not None:
assert isinstance(addr, tuple) and len(addr) == 2, (
'2-tuple is expected')
infos = yield from self.getaddrinfo(
*addr, family=family, type=socket.SOCK_DGRAM,
proto=proto, flags=flags)
if not infos:
raise OSError('getaddrinfo() returned empty list')
for fam, _, pro, _, address in infos:
key = (fam, pro)
if key not in addr_infos:
addr_infos[key] = [None, None]
addr_infos[key][idx] = address
# each addr has to have info for each (family, proto) pair
addr_pairs_info = [
(key, addr_pair) for key, addr_pair in addr_infos.items()
if not ((local_addr and addr_pair[0] is None) or
(remote_addr and addr_pair[1] is None))]
if not addr_pairs_info:
raise ValueError('can not get address information')
exceptions = []
for ((family, proto),
(local_address, remote_address)) in addr_pairs_info:
sock = None
r_addr = None
try:
sock = socket.socket(
family=family, type=socket.SOCK_DGRAM, proto=proto)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(False)
if local_addr:
sock.bind(local_address)
if remote_addr:
yield from self.sock_connect(sock, remote_address)
r_addr = remote_address
except OSError as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
except:
if sock is not None:
sock.close()
raise
else:
break
else:
raise exceptions[0]
protocol = protocol_factory()
waiter = futures.Future(loop=self)
transport = self._make_datagram_transport(sock, protocol, r_addr,
waiter)
if self._debug:
if local_addr:
logger.info("Datagram endpoint local_addr=%r remote_addr=%r "
"created: (%r, %r)",
local_addr, remote_addr, transport, protocol)
else:
logger.debug("Datagram endpoint remote_addr=%r created: "
"(%r, %r)",
remote_addr, transport, protocol)
yield from waiter
return transport, protocol
@coroutine
def create_server(self, protocol_factory, host=None, port=None,
*,
family=socket.AF_UNSPEC,
flags=socket.AI_PASSIVE,
sock=None,
backlog=100,
ssl=None,
reuse_address=None):
"""Create a TCP server bound to host and port.
Return a Server object which can be used to stop the service.
This method is a coroutine.
"""
if isinstance(ssl, bool):
raise TypeError('ssl argument must be an SSLContext or None')
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
AF_INET6 = getattr(socket, 'AF_INET6', 0)
if reuse_address is None:
reuse_address = os.name == 'posix' and sys.platform != 'cygwin'
sockets = []
if host == '':
host = None
infos = yield from self.getaddrinfo(
host, port, family=family,
type=socket.SOCK_STREAM, proto=0, flags=flags)
if not infos:
raise OSError('getaddrinfo() returned empty list')
completed = False
try:
for res in infos:
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
except socket.error:
# Assume it's a bad family/type/protocol combination.
if self._debug:
logger.warning('create_server() failed to create '
'socket.socket(%r, %r, %r)',
af, socktype, proto, exc_info=True)
continue
sockets.append(sock)
if reuse_address:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,
True)
# Disable IPv4/IPv6 dual stack support (enabled by
# default on Linux) which makes a single socket
# listen on both address families.
if af == AF_INET6 and hasattr(socket, 'IPPROTO_IPV6'):
sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_V6ONLY,
True)
try:
sock.bind(sa)
except OSError as err:
raise OSError(err.errno, 'error while attempting '
'to bind on address %r: %s'
% (sa, err.strerror.lower()))
completed = True
finally:
if not completed:
for sock in sockets:
sock.close()
else:
if sock is None:
raise ValueError('Neither host/port nor sock were specified')
sockets = [sock]
server = Server(self, sockets)
for sock in sockets:
sock.listen(backlog)
sock.setblocking(False)
self._start_serving(protocol_factory, sock, ssl, server)
if self._debug:
logger.info("%r is serving", server)
return server
@coroutine
def connect_read_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = futures.Future(loop=self)
transport = self._make_read_pipe_transport(pipe, protocol, waiter)
yield from waiter
if self._debug:
logger.debug('Read pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
@coroutine
def connect_write_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = futures.Future(loop=self)
transport = self._make_write_pipe_transport(pipe, protocol, waiter)
yield from waiter
if self._debug:
logger.debug('Write pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
def _log_subprocess(self, msg, stdin, stdout, stderr):
info = [msg]
if stdin is not None:
info.append('stdin=%s' % _format_pipe(stdin))
if stdout is not None and stderr == subprocess.STDOUT:
info.append('stdout=stderr=%s' % _format_pipe(stdout))
else:
if stdout is not None:
info.append('stdout=%s' % _format_pipe(stdout))
if stderr is not None:
info.append('stderr=%s' % _format_pipe(stderr))
logger.debug(' '.join(info))
@coroutine
def subprocess_shell(self, protocol_factory, cmd, *, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=False, shell=True, bufsize=0,
**kwargs):
if not isinstance(cmd, (bytes, str)):
raise ValueError("cmd must be a string")
if universal_newlines:
raise ValueError("universal_newlines must be False")
if not shell:
raise ValueError("shell must be True")
if bufsize != 0:
raise ValueError("bufsize must be 0")
protocol = protocol_factory()
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
debug_log = 'run shell command %r' % cmd
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = yield from self._make_subprocess_transport(
protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
if self._debug:
logger.info('%s: %r' % (debug_log, transport))
return transport, protocol
@coroutine
def subprocess_exec(self, protocol_factory, program, *args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=False,
shell=False, bufsize=0, **kwargs):
if universal_newlines:
raise ValueError("universal_newlines must be False")
if shell:
raise ValueError("shell must be False")
if bufsize != 0:
raise ValueError("bufsize must be 0")
popen_args = (program,) + args
for arg in popen_args:
if not isinstance(arg, (str, bytes)):
raise TypeError("program arguments must be "
"a bytes or text string, not %s"
% type(arg).__name__)
protocol = protocol_factory()
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
debug_log = 'execute program %r' % program
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = yield from self._make_subprocess_transport(
protocol, popen_args, False, stdin, stdout, stderr,
bufsize, **kwargs)
if self._debug:
logger.info('%s: %r' % (debug_log, transport))
return transport, protocol
def set_exception_handler(self, handler):
"""Set handler as the new event loop exception handler.
If handler is None, the default exception handler will
be set.
If handler is a callable object, it should have a
signature matching '(loop, context)', where 'loop'
will be a reference to the active event loop, 'context'
will be a dict object (see `call_exception_handler()`
documentation for details about context).
"""
if handler is not None and not callable(handler):
raise TypeError('A callable object or None is expected, '
'got {!r}'.format(handler))
self._exception_handler = handler
def default_exception_handler(self, context):
"""Default exception handler.
This is called when an exception occurs and no exception
handler is set, and can be called by a custom exception
handler that wants to defer to the default behavior.
The context parameter has the same meaning as in
`call_exception_handler()`.
"""
message = context.get('message')
if not message:
message = 'Unhandled exception in event loop'
exception = context.get('exception')
if exception is not None:
exc_info = (type(exception), exception, exception.__traceback__)
else:
exc_info = False
log_lines = [message]
for key in sorted(context):
if key in {'message', 'exception'}:
continue
value = context[key]
if key == 'source_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Object created at (most recent call last):\n'
value += tb.rstrip()
else:
value = repr(value)
log_lines.append('{}: {}'.format(key, value))
logger.error('\n'.join(log_lines), exc_info=exc_info)
def call_exception_handler(self, context):
"""Call the current event loop's exception handler.
The context argument is a dict containing the following keys:
- 'message': Error message;
- 'exception' (optional): Exception object;
- 'future' (optional): Future instance;
- 'handle' (optional): Handle instance;
- 'protocol' (optional): Protocol instance;
- 'transport' (optional): Transport instance;
- 'socket' (optional): Socket instance.
New keys maybe introduced in the future.
Note: do not overload this method in an event loop subclass.
For custom exception handling, use the
`set_exception_handler()` method.
"""
if self._exception_handler is None:
try:
self.default_exception_handler(context)
except Exception:
# Second protection layer for unexpected errors
# in the default implementation, as well as for subclassed
# event loops with overloaded "default_exception_handler".
logger.error('Exception in default exception handler',
exc_info=True)
else:
try:
self._exception_handler(self, context)
except Exception as exc:
# Exception in the user set custom exception handler.
try:
# Let's try default handler.
self.default_exception_handler({
'message': 'Unhandled error in exception handler',
'exception': exc,
'context': context,
})
except Exception:
# Guard 'default_exception_handler' in case it is
# overloaded.
logger.error('Exception in default exception handler '
'while handling an unexpected error '
'in custom exception handler',
exc_info=True)
def _add_callback(self, handle):
"""Add a Handle to _scheduled (TimerHandle) or _ready."""
assert isinstance(handle, events.Handle), 'A Handle is required here'
if handle._cancelled:
return
if isinstance(handle, events.TimerHandle):
heapq.heappush(self._scheduled, handle)
else:
self._ready.append(handle)
def _add_callback_signalsafe(self, handle):
"""Like _add_callback() but called from a signal handler."""
self._add_callback(handle)
self._write_to_self()
def _run_once(self):
"""Run one full iteration of the event loop.
This calls all currently ready callbacks, polls for I/O,
schedules the resulting callbacks, and finally schedules
'call_later' callbacks.
"""
# Remove delayed calls that were cancelled from head of queue.
while self._scheduled and self._scheduled[0]._cancelled:
heapq.heappop(self._scheduled)
timeout = None
if self._ready:
timeout = 0
elif self._scheduled:
# Compute the desired timeout.
when = self._scheduled[0]._when
timeout = max(0, when - self.time())
if self._debug and timeout != 0:
t0 = self.time()
event_list = self._selector.select(timeout)
dt = self.time() - t0
if dt >= 1.0:
level = logging.INFO
else:
level = logging.DEBUG
nevent = len(event_list)
if timeout is None:
logger.log(level, 'poll took %.3f ms: %s events',
dt * 1e3, nevent)
elif nevent:
logger.log(level,
'poll %.3f ms took %.3f ms: %s events',
timeout * 1e3, dt * 1e3, nevent)
elif dt >= 1.0:
logger.log(level,
'poll %.3f ms took %.3f ms: timeout',
timeout * 1e3, dt * 1e3)
else:
event_list = self._selector.select(timeout)
self._process_events(event_list)
# Handle 'later' callbacks that are ready.
end_time = self.time() + self._clock_resolution
while self._scheduled:
handle = self._scheduled[0]
if handle._when >= end_time:
break
handle = heapq.heappop(self._scheduled)
self._ready.append(handle)
# This is the only place where callbacks are actually *called*.
# All other places just add them to ready.
# Note: We run all currently scheduled callbacks, but not any
# callbacks scheduled by callbacks run this time around --
# they will be run the next time (after another I/O poll).
# Use an idiom that is thread-safe without using locks.
ntodo = len(self._ready)
for i in range(ntodo):
handle = self._ready.popleft()
if handle._cancelled:
continue
if self._debug:
t0 = self.time()
handle._run()
dt = self.time() - t0
if dt >= self.slow_callback_duration:
logger.warning('Executing %s took %.3f seconds',
_format_handle(handle), dt)
else:
handle._run()
handle = None # Needed to break cycles when an exception occurs.
def get_debug(self):
return self._debug
def set_debug(self, enabled):
self._debug = enabled
| lgpl-3.0 |
sstoma/CellProfiler | cellprofiler/cpmath/tests/test_fastemd.py | 2 | 5347 | """test_fastemd.py test the FastEMD library wrapper
CellProfiler is distributed under the GNU General Public License,
but this file is licensed under the more permissive BSD license.
See the accompanying file LICENSE for details.
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2015 Broad Institute
All rights reserved.
Please see the AUTHORS file for credits.
Website: http://www.cellprofiler.org
"""
import numpy as np
import unittest
from cellprofiler.cpmath.fastemd import *
class TestFastEMD(unittest.TestCase):
def check(
self, p, q, c, expected_result,
expected_flow = None,
extra_mass_penalty = None,
flow_type = EMD_NO_FLOW,
gd_metric = False):
if p.dtype == np.int32:
fn = emd_hat_int32
equal_test = self.assertEqual
array_equal_test = np.testing.assert_array_equal
else:
self.fail("Unsupported dtype: %s" % repr(p.dtype))
if flow_type == EMD_NO_FLOW:
result = fn(p, q, c,
extra_mass_penalty=extra_mass_penalty,
flow_type=flow_type,
gd_metric=gd_metric)
equal_test(result, expected_result)
else:
result, f = fn(p, q, c,
extra_mass_penalty=extra_mass_penalty,
flow_type=flow_type,
gd_metric=gd_metric)
equal_test(result, expected_result)
array_equal_test(f, expected_flow)
def test_01_01_no_flow(self):
tests = (
([1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
np.zeros((5, 5), np.int32), 0),
([1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
2 - np.eye(5, dtype=np.int32), 5),
([1, 2, 3, 4, 5],
[3, 3, 3, 3, 3],
[[1, 9, 9, 9, 9],
[2, 9, 9, 9, 9],
[9, 9, 9, 9, 2],
[9, 9, 1, 5, 9],
[9, 3, 4, 9, 9]],
1*1+2*2+3*2+(3*5 + 1*1)+(3*3+2*4)),
([1, 2, 3, 4, 5],
[5, 10],
[[1, 9],
[5, 9],
[3, 4],
[9, 5],
[9, 6]],
1*1+2*5+(2*3+1*4)+4*5+5*6),
([5, 10],
[1, 2, 3, 4, 5],
np.array([[1, 9],
[5, 9],
[3, 4],
[9, 5],
[9, 6]], np.int32).T,
1*1+2*5+(2*3+1*4)+4*5+5*6)
)
for p, q, c, expected in tests:
self.check(np.array(p, np.int32),
np.array(q, np.int32),
np.array(c, np.int32),
expected)
def test_01_02_extra_default(self):
self.check(
np.array([1, 2, 3, 4, 5], np.int32),
np.array([5, 15], np.int32),
np.array([[ 1, 10],
[ 5, 10],
[ 2, 3],
[10, 4],
[10, 6]], np.int32),
1*1+2*5+(2*2+1*3)+4*4+5*6+5*10)
def test_01_03_threshold(self):
self.check(
np.array([1, 2, 3, 4, 5], np.int32),
np.array([5, 15], np.int32),
np.array([[ 1, 10],
[ 5, 10],
[ 2, 3],
[10, 4],
[10, 6]], np.int32),
1*1+2*5+(2*2+1*3)+4*4+5*6+5*6,
extra_mass_penalty=6)
def test_02_01_flow(self):
tests = (
([1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
2 - np.eye(5, dtype=np.int32), 5,
np.eye(5, dtype=np.int32)),
([1, 2, 3, 4, 5],
[3, 3, 3, 3, 3],
[[1, 9, 9, 9, 9],
[2, 9, 9, 9, 9],
[9, 9, 9, 9, 2],
[9, 9, 1, 5, 9],
[9, 3, 4, 9, 9]],
1*1+2*2+3*2+(3*5 + 1*1)+(3*3+2*4),
[[1, 0, 0, 0, 0],
[2, 0, 0, 0, 0],
[0, 0, 0, 0, 3],
[0, 0, 1, 3, 0],
[0, 3, 2, 0, 0]]),
([1, 2, 3, 4, 5],
[5, 10],
[[1, 9],
[5, 9],
[3, 4],
[9, 5],
[9, 6]],
1*1+2*5+(2*3+1*4)+4*5+5*6,
[[1, 0],
[2, 0],
[2, 1],
[0, 4],
[0, 5]]),
([5, 10],
[1, 2, 3, 4, 5],
np.array([[1, 9],
[5, 9],
[3, 4],
[9, 5],
[9, 6]], np.int32).T,
1*1+2*5+(2*3+1*4)+4*5+5*6,
np.array([[1, 0],
[2, 0],
[2, 1],
[0, 4],
[0, 5]]).T)
)
for p, q, c, expected, expected_flow in tests:
self.check(np.array(p, np.int32),
np.array(q, np.int32),
np.array(c, np.int32),
expected,
expected_flow = np.array(expected_flow),
flow_type=EMD_WITHOUT_EXTRA_MASS_FLOW)
| gpl-2.0 |
NewUnsigned/TeamTalk | win-client/3rdParty/src/json/devtools/batchbuild.py | 132 | 11585 | import collections
import itertools
import json
import os
import os.path
import re
import shutil
import string
import subprocess
import sys
import cgi
class BuildDesc:
def __init__(self, prepend_envs=None, variables=None, build_type=None, generator=None):
self.prepend_envs = prepend_envs or [] # [ { "var": "value" } ]
self.variables = variables or []
self.build_type = build_type
self.generator = generator
def merged_with( self, build_desc ):
"""Returns a new BuildDesc by merging field content.
Prefer build_desc fields to self fields for single valued field.
"""
return BuildDesc( self.prepend_envs + build_desc.prepend_envs,
self.variables + build_desc.variables,
build_desc.build_type or self.build_type,
build_desc.generator or self.generator )
def env( self ):
environ = os.environ.copy()
for values_by_name in self.prepend_envs:
for var, value in values_by_name.items():
var = var.upper()
if type(value) is unicode:
value = value.encode( sys.getdefaultencoding() )
if var in environ:
environ[var] = value + os.pathsep + environ[var]
else:
environ[var] = value
return environ
def cmake_args( self ):
args = ["-D%s" % var for var in self.variables]
# skip build type for Visual Studio solution as it cause warning
if self.build_type and 'Visual' not in self.generator:
args.append( "-DCMAKE_BUILD_TYPE=%s" % self.build_type )
if self.generator:
args.extend( ['-G', self.generator] )
return args
def __repr__( self ):
return "BuildDesc( %s, build_type=%s )" % (" ".join( self.cmake_args()), self.build_type)
class BuildData:
def __init__( self, desc, work_dir, source_dir ):
self.desc = desc
self.work_dir = work_dir
self.source_dir = source_dir
self.cmake_log_path = os.path.join( work_dir, 'batchbuild_cmake.log' )
self.build_log_path = os.path.join( work_dir, 'batchbuild_build.log' )
self.cmake_succeeded = False
self.build_succeeded = False
def execute_build(self):
print 'Build %s' % self.desc
self._make_new_work_dir( )
self.cmake_succeeded = self._generate_makefiles( )
if self.cmake_succeeded:
self.build_succeeded = self._build_using_makefiles( )
return self.build_succeeded
def _generate_makefiles(self):
print ' Generating makefiles: ',
cmd = ['cmake'] + self.desc.cmake_args( ) + [os.path.abspath( self.source_dir )]
succeeded = self._execute_build_subprocess( cmd, self.desc.env(), self.cmake_log_path )
print 'done' if succeeded else 'FAILED'
return succeeded
def _build_using_makefiles(self):
print ' Building:',
cmd = ['cmake', '--build', self.work_dir]
if self.desc.build_type:
cmd += ['--config', self.desc.build_type]
succeeded = self._execute_build_subprocess( cmd, self.desc.env(), self.build_log_path )
print 'done' if succeeded else 'FAILED'
return succeeded
def _execute_build_subprocess(self, cmd, env, log_path):
process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=self.work_dir,
env=env )
stdout, _ = process.communicate( )
succeeded = (process.returncode == 0)
with open( log_path, 'wb' ) as flog:
log = ' '.join( cmd ) + '\n' + stdout + '\nExit code: %r\n' % process.returncode
flog.write( fix_eol( log ) )
return succeeded
def _make_new_work_dir(self):
if os.path.isdir( self.work_dir ):
print ' Removing work directory', self.work_dir
shutil.rmtree( self.work_dir, ignore_errors=True )
if not os.path.isdir( self.work_dir ):
os.makedirs( self.work_dir )
def fix_eol( stdout ):
"""Fixes wrong EOL produced by cmake --build on Windows (\r\r\n instead of \r\n).
"""
return re.sub( '\r*\n', os.linesep, stdout )
def load_build_variants_from_config( config_path ):
with open( config_path, 'rb' ) as fconfig:
data = json.load( fconfig )
variants = data[ 'cmake_variants' ]
build_descs_by_axis = collections.defaultdict( list )
for axis in variants:
axis_name = axis["name"]
build_descs = []
if "generators" in axis:
for generator_data in axis["generators"]:
for generator in generator_data["generator"]:
build_desc = BuildDesc( generator=generator,
prepend_envs=generator_data.get("env_prepend") )
build_descs.append( build_desc )
elif "variables" in axis:
for variables in axis["variables"]:
build_desc = BuildDesc( variables=variables )
build_descs.append( build_desc )
elif "build_types" in axis:
for build_type in axis["build_types"]:
build_desc = BuildDesc( build_type=build_type )
build_descs.append( build_desc )
build_descs_by_axis[axis_name].extend( build_descs )
return build_descs_by_axis
def generate_build_variants( build_descs_by_axis ):
"""Returns a list of BuildDesc generated for the partial BuildDesc for each axis."""
axis_names = build_descs_by_axis.keys()
build_descs = []
for axis_name, axis_build_descs in build_descs_by_axis.items():
if len(build_descs):
# for each existing build_desc and each axis build desc, create a new build_desc
new_build_descs = []
for prototype_build_desc, axis_build_desc in itertools.product( build_descs, axis_build_descs):
new_build_descs.append( prototype_build_desc.merged_with( axis_build_desc ) )
build_descs = new_build_descs
else:
build_descs = axis_build_descs
return build_descs
HTML_TEMPLATE = string.Template('''<html>
<head>
<title>$title</title>
<style type="text/css">
td.failed {background-color:#f08080;}
td.ok {background-color:#c0eec0;}
</style>
</head>
<body>
<table border="1">
<thead>
<tr>
<th>Variables</th>
$th_vars
</tr>
<tr>
<th>Build type</th>
$th_build_types
</tr>
</thead>
<tbody>
$tr_builds
</tbody>
</table>
</body></html>''')
def generate_html_report( html_report_path, builds ):
report_dir = os.path.dirname( html_report_path )
# Vertical axis: generator
# Horizontal: variables, then build_type
builds_by_generator = collections.defaultdict( list )
variables = set()
build_types_by_variable = collections.defaultdict( set )
build_by_pos_key = {} # { (generator, var_key, build_type): build }
for build in builds:
builds_by_generator[build.desc.generator].append( build )
var_key = tuple(sorted(build.desc.variables))
variables.add( var_key )
build_types_by_variable[var_key].add( build.desc.build_type )
pos_key = (build.desc.generator, var_key, build.desc.build_type)
build_by_pos_key[pos_key] = build
variables = sorted( variables )
th_vars = []
th_build_types = []
for variable in variables:
build_types = sorted( build_types_by_variable[variable] )
nb_build_type = len(build_types_by_variable[variable])
th_vars.append( '<th colspan="%d">%s</th>' % (nb_build_type, cgi.escape( ' '.join( variable ) ) ) )
for build_type in build_types:
th_build_types.append( '<th>%s</th>' % cgi.escape(build_type) )
tr_builds = []
for generator in sorted( builds_by_generator ):
tds = [ '<td>%s</td>\n' % cgi.escape( generator ) ]
for variable in variables:
build_types = sorted( build_types_by_variable[variable] )
for build_type in build_types:
pos_key = (generator, variable, build_type)
build = build_by_pos_key.get(pos_key)
if build:
cmake_status = 'ok' if build.cmake_succeeded else 'FAILED'
build_status = 'ok' if build.build_succeeded else 'FAILED'
cmake_log_url = os.path.relpath( build.cmake_log_path, report_dir )
build_log_url = os.path.relpath( build.build_log_path, report_dir )
td = '<td class="%s"><a href="%s" class="%s">CMake: %s</a>' % (
build_status.lower(), cmake_log_url, cmake_status.lower(), cmake_status)
if build.cmake_succeeded:
td += '<br><a href="%s" class="%s">Build: %s</a>' % (
build_log_url, build_status.lower(), build_status)
td += '</td>'
else:
td = '<td></td>'
tds.append( td )
tr_builds.append( '<tr>%s</tr>' % '\n'.join( tds ) )
html = HTML_TEMPLATE.substitute(
title='Batch build report',
th_vars=' '.join(th_vars),
th_build_types=' '.join( th_build_types),
tr_builds='\n'.join( tr_builds ) )
with open( html_report_path, 'wt' ) as fhtml:
fhtml.write( html )
print 'HTML report generated in:', html_report_path
def main():
usage = r"""%prog WORK_DIR SOURCE_DIR CONFIG_JSON_PATH [CONFIG2_JSON_PATH...]
Build a given CMake based project located in SOURCE_DIR with multiple generators/options.dry_run
as described in CONFIG_JSON_PATH building in WORK_DIR.
Example of call:
python devtools\batchbuild.py e:\buildbots\jsoncpp\build . devtools\agent_vmw7.json
"""
from optparse import OptionParser
parser = OptionParser(usage=usage)
parser.allow_interspersed_args = True
# parser.add_option('-v', '--verbose', dest="verbose", action='store_true',
# help="""Be verbose.""")
parser.enable_interspersed_args()
options, args = parser.parse_args()
if len(args) < 3:
parser.error( "Missing one of WORK_DIR SOURCE_DIR CONFIG_JSON_PATH." )
work_dir = args[0]
source_dir = args[1].rstrip('/\\')
config_paths = args[2:]
for config_path in config_paths:
if not os.path.isfile( config_path ):
parser.error( "Can not read: %r" % config_path )
# generate build variants
build_descs = []
for config_path in config_paths:
build_descs_by_axis = load_build_variants_from_config( config_path )
build_descs.extend( generate_build_variants( build_descs_by_axis ) )
print 'Build variants (%d):' % len(build_descs)
# assign build directory for each variant
if not os.path.isdir( work_dir ):
os.makedirs( work_dir )
builds = []
with open( os.path.join( work_dir, 'matrix-dir-map.txt' ), 'wt' ) as fmatrixmap:
for index, build_desc in enumerate( build_descs ):
build_desc_work_dir = os.path.join( work_dir, '%03d' % (index+1) )
builds.append( BuildData( build_desc, build_desc_work_dir, source_dir ) )
fmatrixmap.write( '%s: %s\n' % (build_desc_work_dir, build_desc) )
for build in builds:
build.execute_build()
html_report_path = os.path.join( work_dir, 'batchbuild-report.html' )
generate_html_report( html_report_path, builds )
print 'Done'
if __name__ == '__main__':
main()
| apache-2.0 |
dongjiaqiang/thrift | test/py/TSimpleJSONProtocolTest.py | 69 | 4068 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
import glob
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--genpydir', type='string', dest='genpydir', default='gen-py')
options, args = parser.parse_args()
del sys.argv[1:] # clean up hack so unittest doesn't complain
sys.path.insert(0, options.genpydir)
sys.path.insert(0, glob.glob('../../lib/py/build/lib.*')[0])
from ThriftTest.ttypes import *
from thrift.protocol import TJSONProtocol
from thrift.transport import TTransport
import json
import unittest
class SimpleJSONProtocolTest(unittest.TestCase):
protocol_factory = TJSONProtocol.TSimpleJSONProtocolFactory()
def _assertDictEqual(self, a ,b, msg=None):
if hasattr(self, 'assertDictEqual'):
# assertDictEqual only in Python 2.7. Depends on your machine.
self.assertDictEqual(a, b, msg)
return
# Substitute implementation not as good as unittest library's
self.assertEquals(len(a), len(b), msg)
for k, v in a.iteritems():
self.assertTrue(k in b, msg)
self.assertEquals(b.get(k), v, msg)
def _serialize(self, obj):
trans = TTransport.TMemoryBuffer()
prot = self.protocol_factory.getProtocol(trans)
obj.write(prot)
return trans.getvalue()
def _deserialize(self, objtype, data):
prot = self.protocol_factory.getProtocol(TTransport.TMemoryBuffer(data))
ret = objtype()
ret.read(prot)
return ret
def testWriteOnly(self):
self.assertRaises(NotImplementedError,
self._deserialize, VersioningTestV1, '{}')
def testSimpleMessage(self):
v1obj = VersioningTestV1(
begin_in_both=12345,
old_string='aaa',
end_in_both=54321)
expected = dict(begin_in_both=v1obj.begin_in_both,
old_string=v1obj.old_string,
end_in_both=v1obj.end_in_both)
actual = json.loads(self._serialize(v1obj))
self._assertDictEqual(expected, actual)
def testComplicated(self):
v2obj = VersioningTestV2(
begin_in_both=12345,
newint=1,
newbyte=2,
newshort=3,
newlong=4,
newdouble=5.0,
newstruct=Bonk(message="Hello!", type=123),
newlist=[7,8,9],
newset=set([42,1,8]),
newmap={1:2,2:3},
newstring="Hola!",
end_in_both=54321)
expected = dict(begin_in_both=v2obj.begin_in_both,
newint=v2obj.newint,
newbyte=v2obj.newbyte,
newshort=v2obj.newshort,
newlong=v2obj.newlong,
newdouble=v2obj.newdouble,
newstruct=dict(message=v2obj.newstruct.message,
type=v2obj.newstruct.type),
newlist=v2obj.newlist,
newset=list(v2obj.newset),
newmap=v2obj.newmap,
newstring=v2obj.newstring,
end_in_both=v2obj.end_in_both)
# Need to load/dump because map keys get escaped.
expected = json.loads(json.dumps(expected))
actual = json.loads(self._serialize(v2obj))
self._assertDictEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
cloxp/cloxp-install | win/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/input_test.py | 604 | 3207 | #!/usr/bin/env python
# Copyright 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the input.py file."""
import gyp.input
import unittest
import sys
class TestFindCycles(unittest.TestCase):
def setUp(self):
self.nodes = {}
for x in ('a', 'b', 'c', 'd', 'e'):
self.nodes[x] = gyp.input.DependencyGraphNode(x)
def _create_dependency(self, dependent, dependency):
dependent.dependencies.append(dependency)
dependency.dependents.append(dependent)
def test_no_cycle_empty_graph(self):
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_no_cycle_line(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['d'])
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_no_cycle_dag(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['a'], self.nodes['c'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_cycle_self_reference(self):
self._create_dependency(self.nodes['a'], self.nodes['a'])
self.assertEquals([(self.nodes['a'], self.nodes['a'])],
self.nodes['a'].FindCycles())
def test_cycle_two_nodes(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['a'])
self.assertEquals([(self.nodes['a'], self.nodes['b'], self.nodes['a'])],
self.nodes['a'].FindCycles())
self.assertEquals([(self.nodes['b'], self.nodes['a'], self.nodes['b'])],
self.nodes['b'].FindCycles())
def test_two_cycles(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['a'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['b'])
cycles = self.nodes['a'].FindCycles()
self.assertTrue(
(self.nodes['a'], self.nodes['b'], self.nodes['a']) in cycles)
self.assertTrue(
(self.nodes['b'], self.nodes['c'], self.nodes['b']) in cycles)
self.assertEquals(2, len(cycles))
def test_big_cycle(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['d'])
self._create_dependency(self.nodes['d'], self.nodes['e'])
self._create_dependency(self.nodes['e'], self.nodes['a'])
self.assertEquals([(self.nodes['a'],
self.nodes['b'],
self.nodes['c'],
self.nodes['d'],
self.nodes['e'],
self.nodes['a'])],
self.nodes['a'].FindCycles())
if __name__ == '__main__':
unittest.main()
| mit |
fangxingli/hue | desktop/core/ext-py/Pygments-1.3.1/pygments/styles/default.py | 75 | 2532 | # -*- coding: utf-8 -*-
"""
pygments.styles.default
~~~~~~~~~~~~~~~~~~~~~~~
The default highlighting style.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class DefaultStyle(Style):
"""
The default style (inspired by Emacs 22).
"""
background_color = "#f8f8f8"
default_style = ""
styles = {
Whitespace: "#bbbbbb",
Comment: "italic #408080",
Comment.Preproc: "noitalic #BC7A00",
#Keyword: "bold #AA22FF",
Keyword: "bold #008000",
Keyword.Pseudo: "nobold",
Keyword.Type: "nobold #B00040",
Operator: "#666666",
Operator.Word: "bold #AA22FF",
Name.Builtin: "#008000",
Name.Function: "#0000FF",
Name.Class: "bold #0000FF",
Name.Namespace: "bold #0000FF",
Name.Exception: "bold #D2413A",
Name.Variable: "#19177C",
Name.Constant: "#880000",
Name.Label: "#A0A000",
Name.Entity: "bold #999999",
Name.Attribute: "#7D9029",
Name.Tag: "bold #008000",
Name.Decorator: "#AA22FF",
String: "#BA2121",
String.Doc: "italic",
String.Interpol: "bold #BB6688",
String.Escape: "bold #BB6622",
String.Regex: "#BB6688",
#String.Symbol: "#B8860B",
String.Symbol: "#19177C",
String.Other: "#008000",
Number: "#666666",
Generic.Heading: "bold #000080",
Generic.Subheading: "bold #800080",
Generic.Deleted: "#A00000",
Generic.Inserted: "#00A000",
Generic.Error: "#FF0000",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold #000080",
Generic.Output: "#888",
Generic.Traceback: "#04D",
Error: "border:#FF0000"
}
| apache-2.0 |
edureis95/xbmc | tools/EventClients/examples/python/example_button2.py | 228 | 2074 | #!/usr/bin/python
# This is a simple example showing how you can send a key press event
# to XBMC in a non-queued fashion to achieve a button pressed down
# event i.e. a key press that repeats.
# The repeat interval is currently hard coded in XBMC but that might
# change in the future.
# NOTE: Read the comments in 'example_button1.py' for a more detailed
# explanation.
import sys
sys.path.append("../../lib/python")
from xbmcclient import *
from socket import *
def main():
import time
import sys
host = "localhost"
port = 9777
addr = (host, port)
sock = socket(AF_INET,SOCK_DGRAM)
# First packet must be HELO and can contain an icon
packet = PacketHELO("Example Remote", ICON_PNG,
"../../icons/bluetooth.png")
packet.send(sock, addr)
# wait for notification window to close (in XBMC)
time.sleep(5)
# send a up key press using the xbox gamepad map "XG" and button
# name "dpadup" ( see PacketBUTTON doc for more details)
packet = PacketBUTTON(map_name="XG", button_name="dpadup")
packet.send(sock, addr)
# wait for a few seconds to see its effect
time.sleep(5)
# send a down key press using the raw keyboard code
packet = PacketBUTTON(code=0x28)
packet.send(sock, addr)
# wait for a few seconds to see its effect
time.sleep(5)
# send a right key press using the keyboard map "KB" and button
# name "right"
packet = PacketBUTTON(map_name="KB", button_name="right")
packet.send(sock, addr)
# wait for a few seconds to see its effect
time.sleep(5)
# that's enough, release the button. During release, button code
# doesn't matter.
packet = PacketBUTTON(code=0x28, down=0)
packet.send(sock, addr)
# ok we're done, close the connection
# Note that closing the connection clears any repeat key that is
# active. So in this example, the actual release button event above
# need not have been sent.
packet = PacketBYE()
packet.send(sock, addr)
if __name__=="__main__":
main()
| gpl-2.0 |
h3biomed/ansible-modules-core | cloud/azure/azure_rm_storageaccount_facts.py | 50 | 6444 | #!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'committer',
'version': '1.0'}
DOCUMENTATION = '''
---
module: azure_rm_storageaccount_facts
version_added: "2.1"
short_description: Get storage account facts.
description:
- Get facts for one storage account or all storage accounts within a resource group.
options:
name:
description:
- Only show results for a specific account.
required: false
default: null
resource_group:
description:
- Limit results to a resource group. Required when filtering by name.
required: false
default: null
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
required: false
default: null
extends_documentation_fragment:
- azure
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: Get facts for one account
azure_rm_storageaccount_facts:
resource_group: Testing
name: clh0002
- name: Get facts for all accounts in a resource group
azure_rm_storageaccount_facts:
resource_group: Testing
- name: Get facts for all accounts by tags
azure_rm_storageaccount_facts:
tags:
- testing
- foo:bar
'''
RETURN = '''
azure_storageaccounts:
description: List of storage account dicts.
returned: always
type: list
example: [{
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/testing/providers/Microsoft.Storage/storageAccounts/testaccount001",
"location": "eastus2",
"name": "testaccount001",
"properties": {
"accountType": "Standard_LRS",
"creationTime": "2016-03-28T02:46:58.290113Z",
"primaryEndpoints": {
"blob": "https://testaccount001.blob.core.windows.net/",
"file": "https://testaccount001.file.core.windows.net/",
"queue": "https://testaccount001.queue.core.windows.net/",
"table": "https://testaccount001.table.core.windows.net/"
},
"primaryLocation": "eastus2",
"provisioningState": "Succeeded",
"statusOfPrimary": "Available"
},
"tags": {},
"type": "Microsoft.Storage/storageAccounts"
}]
'''
AZURE_OBJECT_CLASS = 'StorageAccount'
from ansible.module_utils.basic import *
from ansible.module_utils.azure_rm_common import *
try:
from msrestazure.azure_exceptions import CloudError
from azure.common import AzureMissingResourceHttpError, AzureHttpError
except:
# This is handled in azure_rm_common
pass
class AzureRMStorageAccountFacts(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
name=dict(type='str'),
resource_group=dict(type='str'),
tags=dict(type='list'),
)
self.results = dict(
changed=False,
ansible_facts=dict(azure_storageaccounts=[])
)
self.name = None
self.resource_group = None
self.tags = None
super(AzureRMStorageAccountFacts, self).__init__(self.module_arg_spec,
supports_tags=False,
facts_module=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.name and not self.resource_group:
self.fail("Parameter error: resource group required when filtering by name.")
if self.name:
self.results['ansible_facts']['azure_storageaccounts'] = self.get_account()
elif self.resource_group:
self.results['ansible_facts']['azure_storageaccounts'] = self.list_resource_group()
else:
self.results['ansible_facts']['azure_storageaccounts'] = self.list_all()
return self.results
def get_account(self):
self.log('Get properties for account {0}'.format(self.name))
account = None
result = []
try:
account = self.storage_client.storage_accounts.get_properties(self.resource_group, self.name)
except CloudError:
pass
if account and self.has_tags(account.tags, self.tags):
result = [self.serialize_obj(account, AZURE_OBJECT_CLASS)]
return result
def list_resource_group(self):
self.log('List items')
try:
response = self.storage_client.storage_accounts.list_by_resource_group(self.resource_group)
except Exception as exc:
self.fail("Error listing for resource group {0} - {1}".format(self.resource_group, str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS))
return results
def list_all(self):
self.log('List all items')
try:
response = self.storage_client.storage_accounts.list_by_resource_group(self.resource_group)
except Exception as exc:
self.fail("Error listing all items - {0}".format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS))
return results
def main():
AzureRMStorageAccountFacts()
if __name__ == '__main__':
main()
| gpl-3.0 |
wasade/picrust | picrust/ace.py | 1 | 3860 | #!/usr/bin/env python
# Author: Morgan Langille (morgan.g.i.langille@gmail.com)
# count_wagner.py
""" Application controller for the `ace' function within the R package `ape`.
File created on Feb 2012.
"""
from __future__ import division
from cogent.util.table import Table
from os.path import split, splitext
from os import remove, environ
from glob import glob
from cogent.app.util import CommandLineApplication, ResultPath, get_tmp_filename
from cogent.app.parameters import ValuedParameter, FilePath
from cogent import LoadTree
from cogent import LoadTable
from picrust.util import get_picrust_project_dir
from os.path import join
__author__ = "Morgan Langille"
__copyright__ = "Copyright 2011-2013, The PICRUSt Project"
__credits__ = ["Morgan Langille", "Greg Caporaso"]
__license__ = "GPL"
__version__ = "1.0.0-dev"
__maintainer__ = "Morgan Langille"
__email__ = "morgan.g.i.langille@gmail.com"
__status__ = "Development"
class Ace(CommandLineApplication):
""" Application controller for 'ace' fucntion within the 'ape' R package."""
ace_script_fp = join(get_picrust_project_dir(),'picrust','support_files','R','ace.R')
_command = ace_script_fp
_input_handler = '_input_as_string'
_suppress_stdout = False
_suppress_stderr = False
# Overridden to call script with R rather than directly - this is useful
# because permisssions on the script are set to 644 when PICRUSt is installed
# with setup.py. This is fine if we're executing it with R, but not if we're
# trying to execute it directly.
def _get_base_command(self):
""" Returns the full command string
input_arg: the argument to the command which represents the input
to the program, this will be a string, either
representing input or a filename to get input from
"""
command_parts = []
# Append a change directory to the beginning of the command to change
# to self.WorkingDir before running the command
# WorkingDir should be in quotes -- filenames might contain spaces
cd_command = ''.join(['cd ',str(self.WorkingDir),';'])
if self._command is None:
raise ApplicationError, '_command has not been set.'
command = self._command
parameters = self.Parameters
command_parts.append(cd_command)
command_parts.append("R")
command_parts.append("-f")
command_parts.append(command)
command_parts.append("--args")
command_parts.append(self._command_delimiter.join(filter(\
None,(map(str,parameters.values())))))
return self._command_delimiter.join(command_parts).strip()
BaseCommand = property(_get_base_command)
def ace_for_picrust(tree_path,trait_table_path,method='pic',HALT_EXEC=False):
'''Runs the Ace application controller given path of tree and trait table and returns a Table'''
#initialize Ace app controller
ace=Ace(HALT_EXEC=HALT_EXEC)
tmp_output_count_path=get_tmp_filename()
tmp_output_prob_path=get_tmp_filename()
#quote file names
tree_path='"{0}"'.format(tree_path)
trait_table_path='"{0}"'.format(trait_table_path)
as_string = " ".join([tree_path,trait_table_path,method,tmp_output_count_path,tmp_output_prob_path])
#Run ace here
result = ace(data=as_string)
#Load the output into Table objects
try:
asr_table=LoadTable(filename=tmp_output_count_path,header=True,sep='\t')
except IOError:
raise RuntimeError,\
("R reported an error on stderr:"
" %s" % "\n".join(result["StdErr"].readlines()))
asr_prob_table=LoadTable(filename=tmp_output_prob_path,header=True,sep='\t')
#Remove tmp files
remove(tmp_output_count_path)
remove(tmp_output_prob_path)
return asr_table,asr_prob_table
| gpl-3.0 |
frederick623/HTI | fa_util_py/HTI_ExportSIToMSS.py | 1 | 16771 | import ael
import acm
import time
import datetime
import os
import stat
import smtplib
import shutil
import string
#import HTI_DB_Functions
from datetime import date
from datetime import datetime
import shlex
#import HTI_MailFunction
import os
SEQNBR = 0
TRDNBR = 1
SETTTYPE = 2
VALUEDAY = 3
TEXT1 = 4
PRFID = 5
AMOUNT = 6
ISIN = 7
INSTYPE = 8
UI_ISIN = 9
'''
select s.seqnbr, t.trdnbr, s.type, s.value_day, t.text1
from settlement s, trade t, instrument i
where s.trdnbr = t.trdnbr
and t.insaddr = i.insaddr
and s.status = 'Released'
and s.updat_time >= Today and s.updat_time < Today + 1
and s.type in ('Security Nominal', 'End Security')
'''
dsn = "HTIConnString"
msse_fa_acc_mapping = {'Trading Book 5': '02-0238771-22',
'Trading Book 6': '02-0228640-30',
'Trading Book 7': '02-0228640-30',
'Trading Book 8': '02-0228640-30',
'Trading Book 13': '02-0263880-22',
'Trading Book 14': '02-0228640-30',
'Trading Book 17': '02-0238771-22'}
pfs = acm.FPhysicalPortfolio.Select('')
def get_dates():
dates = []
dates.append("TODAY")
dates.append(ael.date('2015-05-28'))
return dates
def get_all_setttypes():
settType = []
settType.append('Coupon')
settType.append('Coupon Transfer')
def get_all_instypes():
insType = []
insType.append('BasketRepo/Reverse')
insType.append('BasketSecurityLoan')
insType.append('Bill')
insType.append('Bond')
insType.append('BondIndex')
insType.append('BuySellback')
insType.append('CallAccount')
insType.append('Cap')
insType.append('CashCollateral')
insType.append('CD')
insType.append('Certificate')
insType.append('CFD')
insType.append('CLN')
insType.append('Collar')
insType.append('Collateral')
insType.append('Combination')
insType.append('Commodity')
insType.append('Commodity Index')
insType.append('Commodity Variant')
insType.append('Convertible')
insType.append('Credit Balance')
insType.append('CreditDefaultSwap')
insType.append('CreditIndex')
insType.append('Curr')
insType.append('CurrSwap')
insType.append('Deposit')
insType.append('Depositary Receipt')
insType.append('Dividend Point Index')
insType.append('DualCurrBond')
insType.append('EquityIndex')
insType.append('EquitySwap')
insType.append('ETF')
insType.append('Flexi Bond')
insType.append('Floor')
insType.append('FRA')
insType.append('FreeDefCF')
insType.append('FRN')
insType.append('Fund')
insType.append('Future/Forward')
insType.append('Fx Rate')
insType.append('FXOptionDatedFwd')
insType.append('FxSwap')
insType.append('IndexLinkedBond')
insType.append('IndexLinkedSwap')
insType.append('LEPO')
insType.append('MBS/ABS')
insType.append('MultiAsset')
insType.append('MultiOption')
insType.append('None')
insType.append('Option')
insType.append('Portfolio Swap')
insType.append('PriceIndex')
insType.append('PriceSwap')
insType.append('PromisLoan')
insType.append('RateIndex')
insType.append('Repo/Reverse')
insType.append('SecurityLoan')
insType.append('Stock')
insType.append('StockRight')
insType.append('Swap')
insType.append('TotalReturnSwap')
insType.append('UnKnown')
insType.append('VarianceSwap')
insType.append('VolatilitySwap')
insType.append('Warrant')
insType.append('Zero')
insType.sort()
return insType
def get_all_portfolios():
portfolios = []
for port in ael.Portfolio.select():
portfolios.append(port.display_id())
portfolios.sort()
return portfolios
def get_all_acquirers():
acquirers = []
for acq in ael.Party.select("type = 'Intern Dept'"):
acquirers.append(acq.display_id())
acquirers.sort()
return acquirers
def get_all_fileMsgType():
msgType = []
msgType.append("SI") # Sec In/Out
msgType.sort()
return msgType
def disable_variables(variables, enable = 0):
for i in variables:
for j in ael_variables:
if i == j[0]:
j[9] = enable
def get_all_status():
status = []
status.append('Released')
status.append('Pending Closure')
status.append('Closed')
status.sort()
return status
ael_variables = [['acquirers', 'Acquirers', 'string', get_all_acquirers(), 'HTIFP', 1, 1, 'Acquirers', None, 1], \
['sett_status', 'Settlement Status', 'string', get_all_status(), 'Released', 1, 1, 'Settlement Status', None, 1], \
['instypes', 'Instrument Types', 'string', get_all_instypes(), 'Bond', 1, 1, 'Instrument Types', None, 1], \
['not_setttypes', 'Not Settlement Types', 'string', get_all_setttypes(), 'Coupon,Coupon Transfer', 1, 1, 'Not Settlement Types', None, 1], \
['pf', 'Portfolio', 'string', get_all_portfolios(), None, 1, 1, 'Portfolio', None, 1], \
['filePath', 'File Path', 'string', None, 'c:\\temp', 1, 0, 'File Name', None, 1], \
['fileName', 'File Name', 'string', None, '<FileMsgType>_<YYYYMMDDhhmmss>.csv', 1, 0, 'File Name', None, 0], \
['participant_id', 'Participant Id', 'string', None, 'B01143', 1, 0, 'Haitong Participant Id', None, 1], \
['asofdate', 'Date', 'string', get_dates(), "TODAY", 1, 0, 'Date', None, 1], \
['fileMsgType', 'File Message Type', 'string', get_all_fileMsgType(), 'SI', 1, 0, 'File Message Type', None, 0]]
def EmailNotify(subject, messg, RECIPIENTS):
session = smtplib.SMTP(smtpserver)
BODY = string.join((
"From: %s" % SENDER,
"To: %s" % RECIPIENTS,
"Subject: %s" % subject,
"",
messg
), "\r\n")
#print BODY
if AUTHREQUIRED:
session.login(smtpuser, smtppass)
smtpresult = session.sendmail(SENDER, RECIPIENTS, BODY)
if smtpresult:
errstr = ''
for recip in smtpresult.keys():
errstr = 'Could not delivery mail to: %s Server said: %s %s %s' % (recip, smtpresult[recip][0], smtpresult[recip][1])
raise smtplib.SMTPException, errstr
session.quit()
def ValidPortfolio(array_pf, portfolio):
for pf in array_pf:
if portfolio == pf:
return True
return False
def getExecBroker(ptyid):
p = ael.Party[ptyid]
for ai in p.additional_infos():
if ai.addinf_specnbr.field_name == 'Broker Ref':
return ai.value.strip()
return ''
def ConvertDateToYYYYMMDD(dt):
d = ael.date(dt).to_ymd()
yy = str(d[0])
mm = str(d[1])
if d[1] < 10:
mm = "0" + mm
dd = str(d[2])
if d[2] < 10:
dd = "0" + dd
return yy+mm+dd
def getChildPortfolio(pPf, pfarr):
if (pPf == None):
return pfarr
for child in pPf.children():
pfid = child.display_id()
cPf = ael.Portfolio[pfid]
if cPf != None:
if cPf.compound == True:
pfarr = getChildPortfolio(cPf, pfarr)
else:
pfarr.append(pfid)
return pfarr
def ael_main(dict):
# Acquirers
acq_array_list = dict['acquirers']
acq_list = ''
for acq in acq_array_list:
if acq_list == '':
acq_list = "'" + acq + "'"
else:
acq_list = acq_list + ",'" + acq + "'"
# instypes
instype_array_list = dict['instypes']
instype_list = ''
for instype in instype_array_list:
if instype_list == '':
instype_list = "'" + instype + "'"
else:
instype_list = instype_list + ",'" + instype + "'"
# settlement status
sett_status_array_list = dict['sett_status']
sett_status_list = ''
for sett_status in sett_status_array_list:
if sett_status_list == '':
sett_status_list = "'" + sett_status + "'"
else:
sett_status_list = sett_status_list + ",'" + sett_status + "'"
# Portfolios
pf_array_list = dict['pf']
pf_list = ''
for pf in pf_array_list:
if pf_list == '':
pf_list = "'" + pf + "'"
else:
pf_list = pf_list + ",'" + pf + "'"
# sett_types
not_setttype_array_list = dict['not_setttypes']
not_setttype_list = ''
for setttype in not_setttype_array_list:
if not_setttype_list == '':
not_setttype_list = "'" + setttype + "'"
else:
not_setttype_list = not_setttype_list + ",'" + setttype + "'"
participant_id = dict['participant_id']
print 'pf_list', pf_list
print 'acq_list', acq_list
print 'sett_status_list', sett_status_list
print 'not_setttype_list', not_setttype_list
print 'instype_list', instype_list
# File Message Type
fileMsgType = dict['fileMsgType']
# Asof Date
asofdate = dict['asofdate']
if asofdate == 'TODAY':
d = ael.date_today().to_ymd()
d1 = ael.date_today().add_days(1).to_ymd()
else:
d = ael.date(asofdate).to_ymd()
d1 = ael.date(asofdate).add_days(1).to_ymd()
yy = str(d[0])
mm = str(d[1])
mm = "%02d" % int(mm)
dd = str(d[2])
dd = "%02d" % int(dd)
asofdate = yy+'-'+mm+'-'+dd
yy = str(d1[0])
mm = str(d1[1])
mm = "%02d" % int(mm)
dd = str(d1[2])
dd = "%02d" % int(dd)
d1_date = yy+'-'+mm+'-'+dd
# File Name
filePath = dict['filePath']
fileName = dict['fileName']
fileName = filePath + '\\' + fileName
genDate = ael.date_today()
timeStamp = time.strftime("%Y%m%d%H%M%S")
fileName = fileName.replace("<YYYYMMDDhhmmss>", timeStamp)
fileName = fileName.replace("<FileMsgType>", fileMsgType)
errMsg = ''
print fileName
f = open(fileName, "w")
# trade details
if fileMsgType == 'SI':
# Header
headerLine = "settleDate,instructionType,settleMethod,haitongParticipantId,market,stockCode,shares,payment,ccassClientAccountNo,haitongClientAccountNo"
headerLine = str(headerLine) + '\n'
print headerLine
f.write(headerLine)
strSql = """select s.seqnbr, t.trdnbr, s.type, s.value_day, t.text1, pf.prfid, s.amount, i.isin, i.instype, ui.isin
from settlement s, trade t, instrument i, party acq, portfolio pf, instrument ui
where s.trdnbr = t.trdnbr
and t.insaddr = i.insaddr
and t.acquirer_ptynbr = acq.ptynbr
and t.prfnbr = pf.prfnbr
and acq.ptyid in (%s)
and s.status in (%s)
and s.updat_time >= '%s' and s.updat_time < '%s'
and i.instype in (%s)
and t.category ~= 'Collateral'
and pf.prfid in (%s)
and i.und_insaddr *= ui.insaddr
and s.type in ('Security Nominal', 'End Security')""" % (acq_list, sett_status_list, asofdate, d1_date, instype_list, pf_list)
print strSql
recCnt = 0
rs = ael.asql(strSql)
columns, buf = rs
for table in buf:
for row in table:
print row
seqnbr = str(row[SEQNBR]).strip()
trdnbr = str(row[TRDNBR]).strip()
setttype = str(row[SETTTYPE]).strip()
valueday = str(row[VALUEDAY]).strip()
text1 = str(row[TEXT1]).strip()
sec_amount = str(row[AMOUNT]).strip()
instype = str(row[INSTYPE]).strip()
print 'louis1'
if instype == 'Repo/Reverse':
if text1 == '':
prfid = str(row[PRFID]).strip()
else:
prfid = text1
isin = str(row[UI_ISIN]).strip()
else:
prfid = str(row[PRFID]).strip()
isin = str(row[ISIN]).strip()
accountId = ''
try:
accountId = msse_fa_acc_mapping[prfid]
except:
print 'cannot get accountId'
settledt = ael.date(valueday).to_string("%Y-%m-%d")
if float(sec_amount) >= 0:
instructionType = 'DELIVER'
else:
instructionType = 'RECEIVE'
settlemethod = 'FOP'
marketcode = 'OTC'
payment = '0.00'
sec_amount = str(abs(float(sec_amount)))
payment_strSql = """select sum(s.amount) 'amount'
from settlement s, trade t, instrument i, party acq, portfolio pf, instrument ui
where s.trdnbr = t.trdnbr
and t.insaddr = i.insaddr
and t.acquirer_ptynbr = acq.ptynbr
and t.prfnbr = pf.prfnbr
and acq.ptyid in (%s)
and s.status in (%s)
and i.instype in (%s)
and t.category ~= 'Collateral'
and pf.prfid in (%s)
and i.und_insaddr *= ui.insaddr
and s.type not in ('Security Nominal', 'End Security')
and s.type not in (%s)
and s.value_day = '%s'
and t.trdnbr = %s""" % (acq_list, sett_status_list, instype_list, pf_list, not_setttype_list, settledt, int(trdnbr))
print payment_strSql
payment_rs = ael.asql(payment_strSql)
payment_columns, payment_buf = payment_rs
for payment_table in payment_buf:
for payment_row in payment_table:
payment = str(abs(float(str(payment_row[0]).strip())))
settlemethod = 'DVP'
print 'payment', payment
detailLine = settledt + ',' + instructionType + ',' + settlemethod + ',' + participant_id + ',' + marketcode + ',' + isin + ',' + sec_amount + ',' + payment + ',' + '' + ',' + accountId
detailLine = str(detailLine) + '\n'
recCnt = recCnt + 1
print detailLine
f.write(detailLine)
else:
recCnt = 0
f.close()
mb = acm.GetFunction("msgBox", 3)
if mb != None:
mb("Message", "File has been generated successfully at " + fileName, 0)
mb = None
return
| apache-2.0 |
zhoulingjun/zipline | zipline/assets/assets.py | 8 | 34670 | # Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta
from numbers import Integral
import numpy as np
import sqlite3
from sqlite3 import Row
import warnings
from logbook import Logger
import pandas as pd
from pandas.tseries.tools import normalize_date
from six import with_metaclass, string_types
from zipline.errors import (
ConsumeAssetMetaDataError,
InvalidAssetType,
MultipleSymbolsFound,
RootSymbolNotFound,
SidAssignmentError,
SidNotFound,
SymbolNotFound,
MapAssetIdentifierIndexError,
)
from zipline.assets._assets import (
Asset, Equity, Future
)
log = Logger('assets.py')
# Expected fields for an Asset's metadata
ASSET_FIELDS = [
'sid',
'asset_type',
'symbol',
'root_symbol',
'asset_name',
'start_date',
'end_date',
'first_traded',
'exchange',
'notice_date',
'expiration_date',
'contract_multiplier',
# The following fields are for compatibility with other systems
'file_name', # Used as symbol
'company_name', # Used as asset_name
'start_date_nano', # Used as start_date
'end_date_nano', # Used as end_date
]
# Expected fields for an Asset's metadata
ASSET_TABLE_FIELDS = [
'sid',
'symbol',
'asset_name',
'start_date',
'end_date',
'first_traded',
'exchange',
]
# Expected fields for an Asset's metadata
FUTURE_TABLE_FIELDS = ASSET_TABLE_FIELDS + [
'root_symbol',
'notice_date',
'expiration_date',
'contract_multiplier',
]
EQUITY_TABLE_FIELDS = ASSET_TABLE_FIELDS
# Create the query once from the fields, so that the join is not done
# repeatedly.
FUTURE_BY_SID_QUERY = 'select {0} from futures where sid=?'.format(
", ".join(FUTURE_TABLE_FIELDS))
EQUITY_BY_SID_QUERY = 'select {0} from equities where sid=?'.format(
", ".join(EQUITY_TABLE_FIELDS))
class AssetFinder(object):
def __init__(self,
metadata=None,
allow_sid_assignment=True,
fuzzy_char=None,
db_path=':memory:',
create_table=True):
self.fuzzy_char = fuzzy_char
# This flag controls if the AssetFinder is allowed to generate its own
# sids. If False, metadata that does not contain a sid will raise an
# exception when building assets.
self.allow_sid_assignment = allow_sid_assignment
if allow_sid_assignment:
self.end_date_to_assign = normalize_date(
pd.Timestamp('now', tz='UTC'))
self.conn = sqlite3.connect(db_path)
self.conn.text_factory = str
self.cursor = self.conn.cursor()
# The AssetFinder also holds a nested-dict of all metadata for
# reference when building Assets
self.metadata_cache = {}
# Create table and read in metadata.
# Should we use flags like 'r', 'w', instead?
# What we need to support is:
# - A 'throwaway' mode where the metadata is read each run.
# - A 'write' mode where the data is written to the provided db_path
# - A 'read' mode where the asset finder uses a prexisting db.
if create_table:
self.create_db_tables()
if metadata is not None:
self.consume_metadata(metadata)
# Cache for lookup of assets by sid, the objects in the asset lookp may
# be shared with the results from equity and future lookup caches.
#
# The top level cache exists to minimize lookups on the asset type
# routing.
#
# The caches are read through, i.e. accessing an asset through
# retrieve_asset, _retrieve_equity etc. will populate the cache on
# first retrieval.
self._asset_cache = {}
self._equity_cache = {}
self._future_cache = {}
self._asset_type_cache = {}
# Populated on first call to `lifetimes`.
self._asset_lifetimes = None
def create_db_tables(self):
c = self.conn.cursor()
c.execute("""
CREATE TABLE equities(
sid integer,
symbol text,
asset_name text,
start_date integer,
end_date integer,
first_traded integer,
exchange text,
fuzzy text
)""")
c.execute('CREATE INDEX equities_sid on equities(sid)')
c.execute('CREATE INDEX equities_symbol on equities(symbol)')
c.execute('CREATE INDEX equities_fuzzy on equities(fuzzy)')
c.execute("""
CREATE TABLE futures(
sid integer,
symbol text,
asset_name text,
start_date integer,
end_date integer,
first_traded integer,
exchange text,
root_symbol text,
notice_date integer,
expiration_date integer,
contract_multiplier real
)""")
c.execute('CREATE INDEX futures_sid on futures(sid)')
c.execute('CREATE INDEX futures_root_symbol on equities(symbol)')
c.execute("""
CREATE TABLE asset_router
(sid integer,
asset_type text)
""")
c.execute('CREATE INDEX asset_router_sid on asset_router(sid)')
self.conn.commit()
def asset_type_by_sid(self, sid):
try:
return self._asset_type_cache[sid]
except KeyError:
pass
c = self.conn.cursor()
# Python 3 compatibility required forcing to int for sid = 0.
t = (int(sid),)
query = 'select asset_type from asset_router where sid=:sid'
c.execute(query, t)
data = c.fetchone()
if data is None:
return
asset_type = data[0]
self._asset_type_cache[sid] = asset_type
return asset_type
def retrieve_asset(self, sid, default_none=False):
if isinstance(sid, Asset):
return sid
try:
asset = self._asset_cache[sid]
except KeyError:
asset_type = self.asset_type_by_sid(sid)
if asset_type == 'equity':
asset = self._retrieve_equity(sid)
elif asset_type == 'future':
asset = self._retrieve_futures_contract(sid)
else:
asset = None
self._asset_cache[sid] = asset
if asset is not None:
return asset
elif default_none:
return None
else:
raise SidNotFound(sid=sid)
def retrieve_all(self, sids, default_none=False):
return [self.retrieve_asset(sid) for sid in sids]
def _retrieve_equity(self, sid):
try:
return self._equity_cache[sid]
except KeyError:
pass
c = self.conn.cursor()
c.row_factory = Row
t = (int(sid),)
c.execute(EQUITY_BY_SID_QUERY, t)
data = dict(c.fetchone())
if data:
if data['start_date']:
data['start_date'] = pd.Timestamp(data['start_date'], tz='UTC')
if data['end_date']:
data['end_date'] = pd.Timestamp(data['end_date'], tz='UTC')
if data['first_traded']:
data['first_traded'] = pd.Timestamp(
data['first_traded'], tz='UTC')
equity = Equity(**data)
else:
equity = None
self._equity_cache[sid] = equity
return equity
def _retrieve_futures_contract(self, sid):
try:
return self._future_cache[sid]
except KeyError:
pass
c = self.conn.cursor()
t = (int(sid),)
c.row_factory = Row
c.execute(FUTURE_BY_SID_QUERY, t)
data = dict(c.fetchone())
if data:
if data['start_date']:
data['start_date'] = pd.Timestamp(data['start_date'], tz='UTC')
if data['end_date']:
data['end_date'] = pd.Timestamp(data['end_date'], tz='UTC')
if data['first_traded']:
data['first_traded'] = pd.Timestamp(
data['first_traded'], tz='UTC')
if data['notice_date']:
data['notice_date'] = pd.Timestamp(
data['notice_date'], tz='UTC')
if data['expiration_date']:
data['expiration_date'] = pd.Timestamp(
data['expiration_date'], tz='UTC')
future = Future(**data)
else:
future = None
self._future_cache[sid] = future
return future
def lookup_symbol_resolve_multiple(self, symbol, as_of_date=None):
"""
Return matching Asset of name symbol in database.
If multiple Assets are found and as_of_date is not set,
raises MultipleSymbolsFound.
If no Asset was active at as_of_date, and allow_expired is False
raises SymbolNotFound.
"""
if as_of_date is not None:
as_of_date = pd.Timestamp(normalize_date(as_of_date))
c = self.conn.cursor()
if as_of_date:
# If one SID exists for symbol, return that symbol
t = (symbol, as_of_date.value, as_of_date.value)
query = ("select sid from equities "
"where symbol=? "
"and start_date<=? "
"and end_date>=?")
c.execute(query, t)
candidates = c.fetchall()
if len(candidates) == 1:
return self._retrieve_equity(candidates[0][0])
# If no SID exists for symbol, return SID with the
# highest-but-not-over end_date
if len(candidates) == 0:
t = (symbol, as_of_date.value)
query = ("select sid from equities "
"where symbol=? "
"and start_date<=? "
"order by end_date desc "
"limit 1")
c.execute(query, t)
data = c.fetchone()
if data:
return self._retrieve_equity(data[0])
# If multiple SIDs exist for symbol, return latest start_date with
# end_date as a tie-breaker
if len(candidates) > 1:
t = (symbol, as_of_date.value)
query = ("select sid from equities "
"where symbol=? " +
"and start_date<=? " +
"order by start_date desc, end_date desc " +
"limit 1")
c.execute(query, t)
data = c.fetchone()
if data:
return self._retrieve_equity(data[0])
raise SymbolNotFound(symbol=symbol)
else:
t = (symbol,)
query = ("select sid from equities where symbol=?")
c.execute(query, t)
data = c.fetchall()
if len(data) == 1:
return self._retrieve_equity(data[0][0])
elif not data:
raise SymbolNotFound(symbol=symbol)
else:
options = []
for row in data:
sid = row[0]
asset = self._retrieve_equity(sid)
options.append(asset)
raise MultipleSymbolsFound(symbol=symbol,
options=options)
def lookup_symbol(self, symbol, as_of_date, fuzzy=False):
"""
If a fuzzy string is provided, then we try various symbols based on
the provided symbol. This is to facilitate mapping from a broker's
symbol to ours in cases where mapping to the broker's symbol loses
information. For example, if we have CMCS_A, but a broker has CMCSA,
when the broker provides CMCSA, it can also provide fuzzy='_',
so we can find a match by inserting an underscore.
"""
symbol = symbol.upper()
as_of_date = normalize_date(as_of_date)
if not fuzzy:
try:
return self.lookup_symbol_resolve_multiple(symbol, as_of_date)
except SymbolNotFound:
return None
else:
c = self.conn.cursor()
fuzzy = symbol.replace(self.fuzzy_char, '')
t = (fuzzy, as_of_date.value, as_of_date.value)
query = ("select sid from equities "
"where fuzzy=? " +
"and start_date<=? " +
"and end_date>=?")
c.execute(query, t)
candidates = c.fetchall()
# If one SID exists for symbol, return that symbol
if len(candidates) == 1:
return self._retrieve_equity(candidates[0][0])
# If multiple SIDs exist for symbol, return latest start_date with
# end_date as a tie-breaker
if len(candidates) > 1:
t = (symbol, as_of_date.value)
query = ("select sid from equities "
"where symbol=? " +
"and start_date<=? " +
"order by start_date desc, end_date desc" +
"limit 1")
c.execute(query, t)
data = c.fetchone()
if data:
return self._retrieve_equity(data[0])
def lookup_future_chain(self, root_symbol, as_of_date, knowledge_date):
""" Return the futures chain for a given root symbol.
Parameters
----------
root_symbol : str
Root symbol of the desired future.
as_of_date : pd.Timestamp or pd.NaT
Date at which the chain determination is rooted. I.e. the
existing contract whose notice date is first after this
date is the primary contract, etc. If NaT is given, the
chain is unbounded, and all contracts for this root symbol
are returned.
knowledge_date : pd.Timestamp or pd.NaT
Date for determining which contracts exist for inclusion in
this chain. Contracts exist only if they have a start_date
on or before this date. If NaT is given and as_of_date is
is not NaT, the value of as_of_date is used for
knowledge_date.
Returns
-------
list
A list of Future objects, the chain for the given
parameters.
Raises
------
RootSymbolNotFound
Raised when a future chain could not be found for the given
root symbol.
"""
c = self.conn.cursor()
if as_of_date is pd.NaT:
# If the as_of_date is NaT, get all contracts for this
# root symbol.
t = {'root_symbol': root_symbol}
c.execute("""
select sid from futures
where root_symbol=:root_symbol
order by notice_date asc
""", t)
else:
if knowledge_date is pd.NaT:
# If knowledge_date is NaT, default to using as_of_date
t = {'root_symbol': root_symbol,
'as_of_date': as_of_date.value,
'knowledge_date': as_of_date.value}
else:
t = {'root_symbol': root_symbol,
'as_of_date': as_of_date.value,
'knowledge_date': knowledge_date.value}
c.execute("""
select sid from futures
where root_symbol=:root_symbol
and :as_of_date < notice_date
and start_date <= :knowledge_date
order by notice_date asc
""", t)
sids = [r[0] for r in c.fetchall()]
if not sids:
# Check if root symbol exists.
c.execute("""
select count(sid) from futures where root_symbol=:root_symbol
""", t)
count = c.fetchone()[0]
if count == 0:
raise RootSymbolNotFound(root_symbol=root_symbol)
else:
# If symbol exists, return empty future chain.
return []
return [self._retrieve_futures_contract(sid) for sid in sids]
@property
def sids(self):
c = self.conn.cursor()
query = 'select sid from asset_router'
c.execute(query)
return [r[0] for r in c.fetchall()]
def _lookup_generic_scalar(self,
asset_convertible,
as_of_date,
matches,
missing):
"""
Convert asset_convertible to an asset.
On success, append to matches.
On failure, append to missing.
"""
if isinstance(asset_convertible, Asset):
matches.append(asset_convertible)
elif isinstance(asset_convertible, Integral):
try:
result = self.retrieve_asset(int(asset_convertible))
except SidNotFound:
missing.append(asset_convertible)
return None
matches.append(result)
elif isinstance(asset_convertible, string_types):
try:
matches.append(
self.lookup_symbol_resolve_multiple(
asset_convertible,
as_of_date,
)
)
except SymbolNotFound:
missing.append(asset_convertible)
return None
else:
raise NotAssetConvertible(
"Input was %s, not AssetConvertible."
% asset_convertible
)
def lookup_generic(self,
asset_convertible_or_iterable,
as_of_date):
"""
Convert a AssetConvertible or iterable of AssetConvertibles into
a list of Asset objects.
This method exists primarily as a convenience for implementing
user-facing APIs that can handle multiple kinds of input. It should
not be used for internal code where we already know the expected types
of our inputs.
Returns a pair of objects, the first of which is the result of the
conversion, and the second of which is a list containing any values
that couldn't be resolved.
"""
matches = []
missing = []
# Interpret input as scalar.
if isinstance(asset_convertible_or_iterable, AssetConvertible):
self._lookup_generic_scalar(
asset_convertible=asset_convertible_or_iterable,
as_of_date=as_of_date,
matches=matches,
missing=missing,
)
try:
return matches[0], missing
except IndexError:
if hasattr(asset_convertible_or_iterable, '__int__'):
raise SidNotFound(sid=asset_convertible_or_iterable)
else:
raise SymbolNotFound(symbol=asset_convertible_or_iterable)
# Interpret input as iterable.
try:
iterator = iter(asset_convertible_or_iterable)
except TypeError:
raise NotAssetConvertible(
"Input was not a AssetConvertible "
"or iterable of AssetConvertible."
)
for obj in iterator:
self._lookup_generic_scalar(obj, as_of_date, matches, missing)
return matches, missing
def map_identifier_index_to_sids(self, index, as_of_date):
"""
This method is for use in sanitizing a user's DataFrame or Panel
inputs.
Takes the given index of identifiers, checks their types, builds assets
if necessary, and returns a list of the sids that correspond to the
input index.
Parameters
__________
index : Iterable
An iterable containing ints, strings, or Assets
as_of_date : pandas.Timestamp
A date to be used to resolve any dual-mapped symbols
Returns
_______
List
A list of integer sids corresponding to the input index
"""
# This method assumes that the type of the objects in the index is
# consistent and can, therefore, be taken from the first identifier
first_identifier = index[0]
# Ensure that input is AssetConvertible (integer, string, or Asset)
if not isinstance(first_identifier, AssetConvertible):
raise MapAssetIdentifierIndexError(obj=first_identifier)
# If sids are provided, no mapping is necessary
if isinstance(first_identifier, Integral):
return index
# If symbols or Assets are provided, construction and mapping is
# necessary
self.consume_identifiers(index)
# Look up all Assets for mapping
matches = []
missing = []
for identifier in index:
self._lookup_generic_scalar(identifier, as_of_date,
matches, missing)
# Handle missing assets
if len(missing) > 0:
warnings.warn("Missing assets for identifiers: " + missing)
# Return a list of the sids of the found assets
return [asset.sid for asset in matches]
def _insert_metadata(self, identifier, **kwargs):
"""
Inserts the given metadata kwargs to the entry for the given
identifier. Matching fields in the existing entry will be overwritten.
:param identifier: The identifier for which to insert metadata
:param kwargs: The keyed metadata to insert
"""
if identifier in self.metadata_cache:
# Multiple pass insertion no longer supported.
# This could and probably should raise an Exception, but is
# currently just a short-circuit for compatibility with existing
# testing structure in the test_algorithm module which creates
# multiple sources which all insert redundant metadata.
return
entry = {}
for key, value in kwargs.items():
# Do not accept invalid fields
if key not in ASSET_FIELDS:
continue
# Do not accept Nones
if value is None:
continue
# Do not accept empty strings
if value == '':
continue
# Do not accept nans from dataframes
if isinstance(value, float) and np.isnan(value):
continue
entry[key] = value
# Check if the sid is declared
try:
entry['sid']
except KeyError:
# If the identifier is not a sid, assign one
if hasattr(identifier, '__int__'):
entry['sid'] = identifier.__int__()
else:
if self.allow_sid_assignment:
# Assign the sid the value of its insertion order.
# This assumes that we are assigning values to all assets.
entry['sid'] = len(self.metadata_cache)
else:
raise SidAssignmentError(identifier=identifier)
# If the file_name is in the kwargs, it will be used as the symbol
try:
entry['symbol'] = entry.pop('file_name')
except KeyError:
pass
# If the identifier coming in was a string and there is no defined
# symbol yet, set the symbol to the incoming identifier
try:
entry['symbol']
pass
except KeyError:
if isinstance(identifier, string_types):
entry['symbol'] = identifier
# If the company_name is in the kwargs, it may be the asset_name
try:
company_name = entry.pop('company_name')
try:
entry['asset_name']
except KeyError:
entry['asset_name'] = company_name
except KeyError:
pass
# If dates are given as nanos, pop them
try:
entry['start_date'] = entry.pop('start_date_nano')
except KeyError:
pass
try:
entry['end_date'] = entry.pop('end_date_nano')
except KeyError:
pass
try:
entry['notice_date'] = entry.pop('notice_date_nano')
except KeyError:
pass
try:
entry['expiration_date'] = entry.pop('expiration_date_nano')
except KeyError:
pass
# Process dates to Timestamps
try:
entry['start_date'] = pd.Timestamp(entry['start_date'], tz='UTC')
except KeyError:
# Set a default start_date of the EPOCH, so that all date queries
# work when a start date is not provided.
entry['start_date'] = pd.Timestamp(0, tz='UTC')
try:
# Set a default end_date of 'now', so that all date queries
# work when a end date is not provided.
entry['end_date'] = pd.Timestamp(entry['end_date'], tz='UTC')
except KeyError:
entry['end_date'] = self.end_date_to_assign
try:
entry['notice_date'] = pd.Timestamp(entry['notice_date'],
tz='UTC')
except KeyError:
pass
try:
entry['expiration_date'] = pd.Timestamp(entry['expiration_date'],
tz='UTC')
except KeyError:
pass
# Build an Asset of the appropriate type, default to Equity
asset_type = entry.pop('asset_type', 'equity')
if asset_type.lower() == 'equity':
try:
fuzzy = entry['symbol'].replace(self.fuzzy_char, '') \
if self.fuzzy_char else None
except KeyError:
fuzzy = None
asset = Equity(**entry)
c = self.conn.cursor()
t = (asset.sid,
asset.symbol,
asset.asset_name,
asset.start_date.value if asset.start_date else None,
asset.end_date.value if asset.end_date else None,
asset.first_traded.value if asset.first_traded else None,
asset.exchange,
fuzzy)
c.execute("""INSERT INTO equities(
sid,
symbol,
asset_name,
start_date,
end_date,
first_traded,
exchange,
fuzzy)
VALUES(?, ?, ?, ?, ?, ?, ?, ?)""", t)
t = (asset.sid,
'equity')
c.execute("""INSERT INTO asset_router(sid, asset_type)
VALUES(?, ?)""", t)
elif asset_type.lower() == 'future':
asset = Future(**entry)
c = self.conn.cursor()
t = (asset.sid,
asset.symbol,
asset.asset_name,
asset.start_date.value if asset.start_date else None,
asset.end_date.value if asset.end_date else None,
asset.first_traded.value if asset.first_traded else None,
asset.exchange,
asset.root_symbol,
asset.notice_date.value if asset.notice_date else None,
asset.expiration_date.value
if asset.expiration_date else None,
asset.contract_multiplier)
c.execute("""INSERT INTO futures(
sid,
symbol,
asset_name,
start_date,
end_date,
first_traded,
exchange,
root_symbol,
notice_date,
expiration_date,
contract_multiplier)
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""", t)
t = (asset.sid,
'future')
c.execute("""INSERT INTO asset_router(sid, asset_type)
VALUES(?, ?)""", t)
else:
raise InvalidAssetType(asset_type=asset_type)
self.metadata_cache[identifier] = entry
def consume_identifiers(self, identifiers):
"""
Consumes the given identifiers in to the metadata cache of this
AssetFinder.
"""
for identifier in identifiers:
# Handle case where full Assets are passed in
# For example, in the creation of a DataFrameSource, the source's
# 'sid' args may be full Assets
if isinstance(identifier, Asset):
sid = identifier.sid
metadata = identifier.to_dict()
metadata['asset_type'] = identifier.__class__.__name__
self.insert_metadata(identifier=sid, **metadata)
else:
self.insert_metadata(identifier)
def consume_metadata(self, metadata):
"""
Consumes the provided metadata in to the metadata cache. The
existing values in the cache will be overwritten when there
is a conflict.
:param metadata: The metadata to be consumed
"""
# Handle dicts
if isinstance(metadata, dict):
self._insert_metadata_dict(metadata)
# Handle DataFrames
elif isinstance(metadata, pd.DataFrame):
self._insert_metadata_dataframe(metadata)
# Handle readables
elif hasattr(metadata, 'read'):
self._insert_metadata_readable(metadata)
else:
raise ConsumeAssetMetaDataError(obj=metadata)
def clear_metadata(self):
"""
Used for testing.
"""
self.metadata_cache = {}
self.conn = sqlite3.connect(':memory:')
self.create_db_tables()
def insert_metadata(self, identifier, **kwargs):
self._insert_metadata(identifier, **kwargs)
self.conn.commit()
def _insert_metadata_dataframe(self, dataframe):
for identifier, row in dataframe.iterrows():
self._insert_metadata(identifier, **row)
self.conn.commit()
def _insert_metadata_dict(self, dict):
for identifier, entry in dict.items():
self._insert_metadata(identifier, **entry)
self.conn.commit()
def _insert_metadata_readable(self, readable):
for row in readable.read():
# Parse out the row of the readable object
metadata_dict = {}
for field in ASSET_FIELDS:
try:
row_value = row[field]
# Avoid passing placeholders
if row_value and (row_value != 'None'):
metadata_dict[field] = row[field]
except KeyError:
continue
except IndexError:
continue
# Locate the identifier, fail if not found
if 'sid' in metadata_dict:
identifier = metadata_dict['sid']
elif 'symbol' in metadata_dict:
identifier = metadata_dict['symbol']
else:
raise ConsumeAssetMetaDataError(obj=row)
self._insert_metadata(identifier, **metadata_dict)
self.conn.commit()
def _compute_asset_lifetimes(self):
"""
Compute and cache a recarry of asset lifetimes.
FUTURE OPTIMIZATION: We're looping over a big array, which means this
probably should be in C/Cython.
"""
with self.conn as transaction:
results = transaction.execute(
'SELECT sid, start_date, end_date from equities'
).fetchall()
lifetimes = np.recarray(
shape=(len(results),),
dtype=[('sid', 'i8'), ('start', 'i8'), ('end', 'i8')],
)
# TODO: This is **WAY** slower than it could be because we have to
# check for None everywhere. If we represented "no start date" as
# 0, and "no end date" as MAX_INT in our metadata, this would be
# significantly faster.
NO_START = 0
NO_END = np.iinfo(int).max
for idx, (sid, start, end) in enumerate(results):
lifetimes[idx] = (
sid,
start if start is not None else NO_START,
end if end is not None else NO_END,
)
return lifetimes
def lifetimes(self, dates):
"""
Compute a DataFrame representing asset lifetimes for the specified date
range.
Parameters
----------
dates : pd.DatetimeIndex
The dates for which to compute lifetimes.
Returns
-------
lifetimes : pd.DataFrame
A frame of dtype bool with `dates` as index and an Int64Index of
assets as columns. The value at `lifetimes.loc[date, asset]` will
be True iff `asset` existed on `data`.
See Also
--------
numpy.putmask
"""
# This is a less than ideal place to do this, because if someone adds
# assets to the finder after we've touched lifetimes we won't have
# those new assets available. Mutability is not my favorite
# programming feature.
if self._asset_lifetimes is None:
self._asset_lifetimes = self._compute_asset_lifetimes()
lifetimes = self._asset_lifetimes
raw_dates = dates.asi8[:, None]
mask = (lifetimes.start <= raw_dates) & (raw_dates <= lifetimes.end)
return pd.DataFrame(mask, index=dates, columns=lifetimes.sid)
class AssetConvertible(with_metaclass(ABCMeta)):
"""
ABC for types that are convertible to integer-representations of
Assets.
Includes Asset, six.string_types, and Integral
"""
pass
AssetConvertible.register(Integral)
AssetConvertible.register(Asset)
# Use six.string_types for Python2/3 compatibility
for _type in string_types:
AssetConvertible.register(_type)
class NotAssetConvertible(ValueError):
pass
| apache-2.0 |
idrogeno/FusionOE | tools/host_tools/FormatConverter/lamedb.py | 78 | 2262 | from datasource import datasource
class lamedb(datasource):
def __init__(self, filename = "lamedb"):
datasource.__init__(self)
self.setFilename(filename)
def setFilename(self, filename):
self.filename = filename
def getName(self):
return "lamedb"
def getCapabilities(self):
return [("read file", self.read), ("print all", self.printAll)]
def read(self):
inputfile = open(self.filename, "r")
lines = inputfile.readlines()
inputfile.close()
versionstring = lines[0].split('/')
version = int(versionstring[1])
if 3 > version or 4 < version:
print "unsupported lamedb version"
return
transpondersreading = False
sats = {}
transponders = {}
for line in lines:
if line.strip() == "transponders":
transpondersreading = True
continue
if line.strip() == "services":
transpondersreading = False
continue
if transpondersreading:
if ord(line[0]) == 9:
transponder = line.strip().split(' ')[1].split(':')
sat = transponder[4]
if not sats.has_key(sat):
sats[sat] = []
sats[sat].append((transponder, tsid, onid))
tsid = None
onid = None
elif line.strip() != "/" and line.strip() != "end":
data = line.strip().split(":")
tsid = str(int(data[1], 16))
onid = str(int(data[2], 16))
satlist = sats.keys()
satlist.sort()
for sat in satlist:
print sat
self.addSat(sat, sat)
transponders = sats[sat]
transponders.sort(key = lambda a: a[0])
for transpondertuple in transponders:
transponder = transpondertuple[0]
tsid = transpondertuple[1]
onid = transpondertuple[2]
print transponder, tsid, onid
tmp_transponder = {"frequency": transponder[0], "symbol_rate": transponder[1], "polarization": transponder[2], "fec": transponder[3]}
if version == 3:
if len(transponder) > 6:
tmp_transponder["system"] = transponder[6]
tmp_transponder["modulation"] = transponder[7]
elif version == 4:
if len(transponder) > 7:
tmp_transponder["system"] = transponder[7]
tmp_transponder["modulation"] = transponder[8]
if tsid != "1" or onid != "1":
tmp_transponder["tsid"] = transponder[0]
tmp_transponder["onid"] = transponder[0]
self.addTransponder(sat, tmp_transponder)
| gpl-2.0 |
jamielennox/keystone | keystone/contrib/federation/migrate_repo/versions/001_add_identity_provider_table.py | 14 | 1536 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
idp_table = sql.Table(
'identity_provider',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('enabled', sql.Boolean, nullable=False),
sql.Column('description', sql.Text(), nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8')
idp_table.create(migrate_engine, checkfirst=True)
federation_protocol_table = sql.Table(
'federation_protocol',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('idp_id', sql.String(64),
sql.ForeignKey('identity_provider.id', ondelete='CASCADE'),
primary_key=True),
sql.Column('mapping_id', sql.String(64), nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8')
federation_protocol_table.create(migrate_engine, checkfirst=True)
| apache-2.0 |
lightmare/mapnik | scons/scons-local-4.1.0/SCons/Tool/suncxx.py | 4 | 4879 | """SCons.Tool.sunc++
Tool-specific initialization for C++ on SunOS / Solaris.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import SCons
import os
import re
import subprocess
import SCons.Tool.cxx
cplusplus = SCons.Tool.cxx
# cplusplus = __import__('c++', globals(), locals(), [])
package_info = {}
def get_package_info(package_name, pkginfo, pkgchk):
try:
return package_info[package_name]
except KeyError:
version = None
pathname = None
from subprocess import DEVNULL
try:
with open('/var/sadm/install/contents', 'r', encoding='UTF-8') as f:
sadm_contents = f.read()
except EnvironmentError:
pass
else:
sadm_re = re.compile(r'^(\S*/bin/CC)(=\S*)? %s$' % package_name, re.M)
sadm_match = sadm_re.search(sadm_contents)
if sadm_match:
pathname = os.path.dirname(sadm_match.group(1))
try:
p = subprocess.Popen([pkginfo, '-l', package_name],
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=DEVNULL)
except EnvironmentError:
pass
else:
pkginfo_contents = p.communicate()[0]
version_re = re.compile(r'^ *VERSION:\s*(.*)$', re.M)
version_match = version_re.search(pkginfo_contents)
if version_match:
version = version_match.group(1)
if pathname is None:
try:
p = subprocess.Popen([pkgchk, '-l', package_name],
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=DEVNULL)
except EnvironmentError:
pass
else:
pkgchk_contents = p.communicate()[0]
pathname_re = re.compile(r'^Pathname:\s*(.*/bin/CC)$', re.M)
pathname_match = pathname_re.search(pkgchk_contents)
if pathname_match:
pathname = os.path.dirname(pathname_match.group(1))
package_info[package_name] = (pathname, version)
return package_info[package_name]
# use the package installer tool "pkg" to figure out where cppc and what
# version of it is installed
def get_cppc(env):
cxx = env.subst('$CXX')
if cxx:
cppcPath = os.path.dirname(cxx)
else:
cppcPath = None
cppcVersion = None
pkginfo = env.subst('$PKGINFO')
pkgchk = env.subst('$PKGCHK')
for package in ['SPROcpl']:
path, version = get_package_info(package, pkginfo, pkgchk)
if path and version:
cppcPath, cppcVersion = path, version
break
return (cppcPath, 'CC', 'CC', cppcVersion)
def generate(env):
"""Add Builders and construction variables for SunPRO C++."""
path, cxx, shcxx, version = get_cppc(env)
if path:
cxx = os.path.join(path, cxx)
shcxx = os.path.join(path, shcxx)
cplusplus.generate(env)
env['CXX'] = cxx
env['SHCXX'] = shcxx
env['CXXVERSION'] = version
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS -KPIC')
env['SHOBJPREFIX'] = 'so_'
env['SHOBJSUFFIX'] = '.o'
def exists(env):
path, cxx, shcxx, version = get_cppc(env)
if path and cxx:
cppc = os.path.join(path, cxx)
if os.path.exists(cppc):
return cppc
return None
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| lgpl-2.1 |
blink1073/pexpect | examples/monitor.py | 20 | 7437 | #!/usr/bin/env python
''' This runs a sequence of commands on a remote host using SSH. It runs a
simple system checks such as uptime and free to monitor the state of the remote
host.
./monitor.py [-s server_hostname] [-u username] [-p password]
-s : hostname of the remote server to login to.
-u : username to user for login.
-p : Password to user for login.
Example:
This will print information about the given host:
./monitor.py -s www.example.com -u mylogin -p mypassword
It works like this:
Login via SSH (This is the hardest part).
Run and parse 'uptime'.
Run 'iostat'.
Run 'vmstat'.
Run 'netstat'
Run 'free'.
Exit the remote host.
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
from __future__ import print_function
from __future__ import absolute_import
import os, sys, re, getopt, getpass
import pexpect
try:
raw_input
except NameError:
raw_input = input
#
# Some constants.
#
COMMAND_PROMPT = '[#$] ' ### This is way too simple for industrial use -- we will change is ASAP.
TERMINAL_PROMPT = '(?i)terminal type\?'
TERMINAL_TYPE = 'vt100'
# This is the prompt we get if SSH does not have the remote host's public key stored in the cache.
SSH_NEWKEY = '(?i)are you sure you want to continue connecting'
def exit_with_usage():
print(globals()['__doc__'])
os._exit(1)
def main():
global COMMAND_PROMPT, TERMINAL_PROMPT, TERMINAL_TYPE, SSH_NEWKEY
######################################################################
## Parse the options, arguments, get ready, etc.
######################################################################
try:
optlist, args = getopt.getopt(sys.argv[1:], 'h?s:u:p:', ['help','h','?'])
except Exception as e:
print(str(e))
exit_with_usage()
options = dict(optlist)
if len(args) > 1:
exit_with_usage()
if [elem for elem in options if elem in ['-h','--h','-?','--?','--help']]:
print("Help:")
exit_with_usage()
if '-s' in options:
host = options['-s']
else:
host = raw_input('hostname: ')
if '-u' in options:
user = options['-u']
else:
user = raw_input('username: ')
if '-p' in options:
password = options['-p']
else:
password = getpass.getpass('password: ')
#
# Login via SSH
#
child = pexpect.spawn('ssh -l %s %s'%(user, host))
i = child.expect([pexpect.TIMEOUT, SSH_NEWKEY, COMMAND_PROMPT, '(?i)password'])
if i == 0: # Timeout
print('ERROR! could not login with SSH. Here is what SSH said:')
print(child.before, child.after)
print(str(child))
sys.exit (1)
if i == 1: # In this case SSH does not have the public key cached.
child.sendline ('yes')
child.expect ('(?i)password')
if i == 2:
# This may happen if a public key was setup to automatically login.
# But beware, the COMMAND_PROMPT at this point is very trivial and
# could be fooled by some output in the MOTD or login message.
pass
if i == 3:
child.sendline(password)
# Now we are either at the command prompt or
# the login process is asking for our terminal type.
i = child.expect ([COMMAND_PROMPT, TERMINAL_PROMPT])
if i == 1:
child.sendline (TERMINAL_TYPE)
child.expect (COMMAND_PROMPT)
#
# Set command prompt to something more unique.
#
COMMAND_PROMPT = "\[PEXPECT\]\$ "
child.sendline ("PS1='[PEXPECT]\$ '") # In case of sh-style
i = child.expect ([pexpect.TIMEOUT, COMMAND_PROMPT], timeout=10)
if i == 0:
print("# Couldn't set sh-style prompt -- trying csh-style.")
child.sendline ("set prompt='[PEXPECT]\$ '")
i = child.expect ([pexpect.TIMEOUT, COMMAND_PROMPT], timeout=10)
if i == 0:
print("Failed to set command prompt using sh or csh style.")
print("Response was:")
print(child.before)
sys.exit (1)
# Now we should be at the command prompt and ready to run some commands.
print('---------------------------------------')
print('Report of commands run on remote host.')
print('---------------------------------------')
# Run uname.
child.sendline ('uname -a')
child.expect (COMMAND_PROMPT)
print(child.before)
if 'linux' in child.before.lower():
LINUX_MODE = 1
else:
LINUX_MODE = 0
# Run and parse 'uptime'.
child.sendline ('uptime')
child.expect('up\s+(.*?),\s+([0-9]+) users?,\s+load averages?: ([0-9]+\.[0-9][0-9]),?\s+([0-9]+\.[0-9][0-9]),?\s+([0-9]+\.[0-9][0-9])')
duration, users, av1, av5, av15 = child.match.groups()
days = '0'
hours = '0'
mins = '0'
if 'day' in duration:
child.match = re.search('([0-9]+)\s+day',duration)
days = str(int(child.match.group(1)))
if ':' in duration:
child.match = re.search('([0-9]+):([0-9]+)',duration)
hours = str(int(child.match.group(1)))
mins = str(int(child.match.group(2)))
if 'min' in duration:
child.match = re.search('([0-9]+)\s+min',duration)
mins = str(int(child.match.group(1)))
print()
print('Uptime: %s days, %s users, %s (1 min), %s (5 min), %s (15 min)' % (
duration, users, av1, av5, av15))
child.expect (COMMAND_PROMPT)
# Run iostat.
child.sendline ('iostat')
child.expect (COMMAND_PROMPT)
print(child.before)
# Run vmstat.
child.sendline ('vmstat')
child.expect (COMMAND_PROMPT)
print(child.before)
# Run free.
if LINUX_MODE:
child.sendline ('free') # Linux systems only.
child.expect (COMMAND_PROMPT)
print(child.before)
# Run df.
child.sendline ('df')
child.expect (COMMAND_PROMPT)
print(child.before)
# Run lsof.
child.sendline ('lsof')
child.expect (COMMAND_PROMPT)
print(child.before)
# # Run netstat
# child.sendline ('netstat')
# child.expect (COMMAND_PROMPT)
# print child.before
# # Run MySQL show status.
# child.sendline ('mysql -p -e "SHOW STATUS;"')
# child.expect (PASSWORD_PROMPT_MYSQL)
# child.sendline (password_mysql)
# child.expect (COMMAND_PROMPT)
# print
# print child.before
# Now exit the remote host.
child.sendline ('exit')
index = child.expect([pexpect.EOF, "(?i)there are stopped jobs"])
if index==1:
child.sendline("exit")
child.expect(EOF)
if __name__ == "__main__":
main()
| isc |
edx/edx-platform | openedx/features/lti_course_tab/tests.py | 5 | 1590 | """
Tests for LTI Course tabs.
"""
from unittest.mock import Mock, patch
from lms.djangoapps.courseware.tests.test_tabs import TabTestCase
from openedx.features.lti_course_tab.tab import DiscussionLtiCourseTab
class DiscussionLtiCourseTabTestCase(TabTestCase):
"""Test cases for LTI Discussion Tab."""
def check_discussion_tab(self):
"""Helper function for verifying the LTI discussion tab."""
return self.check_tab(
tab_class=DiscussionLtiCourseTab,
dict_tab={'type': DiscussionLtiCourseTab.type, 'name': 'same'},
expected_link=self.reverse('course_tab_view', args=[str(self.course.id), DiscussionLtiCourseTab.type]),
expected_tab_id=DiscussionLtiCourseTab.type,
invalid_dict_tab=None,
)
@patch('openedx.features.lti_course_tab.tab.DiscussionsConfiguration.get')
@patch('common.djangoapps.student.models.CourseEnrollment.is_enrolled')
def test_discussion_lti_tab(self, is_enrolled, discussion_config_get):
is_enrolled.return_value = True
mock_config = Mock()
mock_config.lti_configuration = {}
mock_config.enabled = False
discussion_config_get.return_value = mock_config
tab = self.check_discussion_tab()
self.check_can_display_results(
tab, for_staff_only=True, for_enrolled_users_only=True, expected_value=False
)
mock_config.enabled = True
self.check_discussion_tab()
self.check_can_display_results(
tab, for_staff_only=True, for_enrolled_users_only=True
)
| agpl-3.0 |
Endika/brotli | python/bro.py | 88 | 4909 | #! /usr/bin/env python
"""bro %s -- compression/decompression utility using the Brotli algorithm."""
from __future__ import print_function
import argparse
import sys
import os
import brotli
import platform
# default values of encoder parameters
DEFAULT_PARAMS = {
'mode': brotli.MODE_GENERIC,
'quality': 11,
'lgwin': 22,
'lgblock': 0,
}
def get_binary_stdio(stream):
""" Return the specified standard input, output or errors stream as a
'raw' buffer object suitable for reading/writing binary data from/to it.
"""
assert stream in ['stdin', 'stdout', 'stderr'], "invalid stream name"
stdio = getattr(sys, stream)
if sys.version_info[0] < 3:
if sys.platform == 'win32':
# set I/O stream binary flag on python2.x (Windows)
runtime = platform.python_implementation()
if runtime == "PyPy":
# the msvcrt trick doesn't work in pypy, so I use fdopen
mode = "rb" if stream == "stdin" else "wb"
stdio = os.fdopen(stdio.fileno(), mode, 0)
else:
# this works with CPython -- untested on other implementations
import msvcrt
msvcrt.setmode(stdio.fileno(), os.O_BINARY)
return stdio
else:
# get 'buffer' attribute to read/write binary data on python3.x
if hasattr(stdio, 'buffer'):
return stdio.buffer
else:
orig_stdio = getattr(sys, "__%s__" % stream)
return orig_stdio.buffer
def main():
parser = argparse.ArgumentParser(
prog='bro.py',
description="Compression/decompression utility using the Brotli algorithm.")
parser.add_argument('--version', action='version', version=brotli.__version__)
parser.add_argument('-i', '--input', metavar='FILE', type=str, dest='infile',
help='Input file', default=None)
parser.add_argument('-o', '--output', metavar='FILE', type=str, dest='outfile',
help='Output file', default=None)
parser.add_argument('-f', '--force', action='store_true',
help='Overwrite existing output file', default=False)
parser.add_argument('-d', '--decompress', action='store_true',
help='Decompress input file', default=False)
params = parser.add_argument_group('optional encoder parameters')
params.add_argument('-m', '--mode', metavar="MODE", type=int, choices=[0, 1],
help='The compression mode can be 0 for generic input, '
'1 for UTF-8 encoded text, or 2 for WOFF 2.0 font data.'
'Defaults to 0.')
params.add_argument('-q', '--quality', metavar="QUALITY", type=int,
choices=list(range(0, 12)),
help='Controls the compression-speed vs compression-density '
'tradeoff. The higher the quality, the slower the '
'compression. Range is 0 to 11. Defaults to 11.')
params.add_argument('--lgwin', metavar="LGWIN", type=int,
choices=list(range(16, 25)),
help='Base 2 logarithm of the sliding window size. Range is '
'16 to 24. Defaults to 22.')
params.add_argument('--lgblock', metavar="LGBLOCK", type=int,
choices=[0] + list(range(16, 25)),
help='Base 2 logarithm of the maximum input block size. '
'Range is 16 to 24. If set to 0, the value will be set based '
'on the quality. Defaults to 0.')
# set default values using global DEFAULT_PARAMS dictionary
parser.set_defaults(**DEFAULT_PARAMS)
options = parser.parse_args()
if options.infile:
if not os.path.isfile(options.infile):
parser.error('file "%s" not found' % options.infile)
with open(options.infile, "rb") as infile:
data = infile.read()
else:
if sys.stdin.isatty():
# interactive console, just quit
parser.error('no input')
infile = get_binary_stdio('stdin')
data = infile.read()
if options.outfile:
if os.path.isfile(options.outfile) and not options.force:
parser.error('output file exists')
outfile = open(options.outfile, "wb")
else:
outfile = get_binary_stdio('stdout')
try:
if options.decompress:
data = brotli.decompress(data)
else:
data = brotli.compress(
data, mode=options.mode, quality=options.quality,
lgwin=options.lgwin, lgblock=options.lgblock)
except brotli.error as e:
parser.exit(1,'bro: error: %s: %s' % (e, options.infile or 'sys.stdin'))
outfile.write(data)
outfile.close()
if __name__ == '__main__':
main()
| apache-2.0 |
blaze/partd | partd/dict.py | 5 | 1701 | from .core import Interface
from threading import Lock
class Dict(Interface):
def __init__(self):
self.lock = Lock()
self.data = dict()
Interface.__init__(self)
def __getstate__(self):
return {'data': self.data}
def __setstate__(self, state):
Interface.__setstate__(self, state)
Dict.__init__(self)
self.data = state['data']
def append(self, data, lock=True, **kwargs):
if lock: self.lock.acquire()
try:
for k, v in data.items():
if k not in self.data:
self.data[k] = []
self.data[k].append(v)
finally:
if lock: self.lock.release()
def _get(self, keys, lock=True, **kwargs):
assert isinstance(keys, (list, tuple, set))
if lock:
self.lock.acquire()
try:
result = [b''.join(self.data.get(key, [])) for key in keys]
finally:
if lock:
self.lock.release()
return result
def _iset(self, key, value, lock=True):
""" Idempotent set """
if lock:
self.lock.acquire()
try:
self.data[key] = [value]
finally:
if lock:
self.lock.release()
def _delete(self, keys, lock=True):
if lock:
self.lock.acquire()
try:
for key in keys:
if key in self.data:
del self.data[key]
finally:
if lock:
self.lock.release()
def drop(self):
self._iset_seen.clear()
self.data.clear()
def __exit__(self, *args):
self.drop()
| bsd-3-clause |
graphite-project/graphite-web | webapp/graphite/composer/views.py | 4 | 2742 | """Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import os
from graphite.user_util import getProfile
from graphite.logger import log
from graphite.account.models import MyGraph
from django.shortcuts import render
from django.http import HttpResponse
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
def composer(request):
profile = getProfile(request)
context = {
'queryString' : request.GET.urlencode().replace('+','%20'),
'showTarget' : request.GET.get('showTarget',''),
'user' : request.user,
'profile' : profile,
'showMyGraphs' : int( profile.user.username != 'default' ),
'searchEnabled' : int( os.access(settings.INDEX_FILE, os.R_OK) ),
'refreshInterval': settings.AUTO_REFRESH_INTERVAL,
'debug' : settings.DEBUG,
'jsdebug' : settings.DEBUG,
}
return render(request, "composer.html", context)
def mygraph(request):
profile = getProfile(request, allowDefault=False)
if not profile:
return HttpResponse("You are not logged in!")
action = request.GET['action']
graphName = request.GET['graphName']
if not graphName:
return HttpResponse("You must type in a graph name.")
if action == 'save':
url = request.GET['url']
try:
existingGraph = profile.mygraph_set.get(name=graphName)
existingGraph.url = url
existingGraph.save()
except ObjectDoesNotExist:
try:
newGraph = MyGraph(profile=profile,name=graphName,url=url)
newGraph.save()
except Exception:
log.exception("Failed to create new MyGraph in /composer/mygraph/, graphName=%s" % graphName)
return HttpResponse("Failed to save graph %s" % graphName)
return HttpResponse("SAVED")
elif action == 'delete':
try:
existingGraph = profile.mygraph_set.get(name=graphName)
existingGraph.delete()
except ObjectDoesNotExist:
return HttpResponse("No such graph '%s'" % graphName)
return HttpResponse("DELETED")
else:
return HttpResponse("Invalid operation '%s'" % action)
| apache-2.0 |
awesto/django-shop | shop/views/auth.py | 1 | 8484 | from django.contrib.auth import logout, get_user_model
from django.contrib.auth.models import AnonymousUser
from django.contrib.auth.tokens import default_token_generator
from django.core.exceptions import NON_FIELD_ERRORS
from django.utils.encoding import force_str
from django.utils.translation import gettext_lazy as _
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.exceptions import ErrorDetail, ValidationError
from rest_framework.generics import GenericAPIView
from rest_framework.permissions import AllowAny
from rest_framework.renderers import JSONRenderer, BrowsableAPIRenderer
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_auth.views import LoginView as OriginalLoginView, PasswordChangeView as OriginalPasswordChangeView
from shop.models.cart import CartModel
from shop.models.customer import CustomerModel
from shop.rest.renderers import CMSPageRenderer
from shop.serializers.auth import PasswordResetRequestSerializer, PasswordResetConfirmSerializer
from shop.signals import email_queued
class AuthFormsView(GenericAPIView):
"""
Generic view to handle authentication related forms such as user registration
"""
serializer_class = None
form_class = None
def post(self, request, *args, **kwargs):
if request.customer.is_visitor:
customer = CustomerModel.objects.get_or_create_from_request(request)
else:
customer = request.customer
form_data = request.data.get(self.form_class.scope_prefix, {})
form = self.form_class(data=form_data, instance=customer)
if form.is_valid():
form.save(request=request)
response_data = {form.form_name: {
'success_message': _("Successfully registered yourself."),
}}
return Response(response_data, status=status.HTTP_200_OK)
errors = dict(form.errors)
if 'email' in errors:
errors.update({NON_FIELD_ERRORS: errors.pop('email')})
return Response({form.form_name: errors}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
class LoginView(OriginalLoginView):
form_name = 'login_form'
def login(self):
"""
Logs in as the given user, and moves the items from the current to the new cart.
"""
try:
anonymous_cart = CartModel.objects.get_from_request(self.request)
except CartModel.DoesNotExist:
anonymous_cart = None
if self.request.customer.user.is_anonymous or self.request.customer.is_registered:
previous_user = None
else:
previous_user = self.request.customer.user
super().login() # this rotates the session_key
if not self.serializer.data.get('stay_logged_in'):
self.request.session.set_expiry(0) # log out when the browser is closed
authenticated_cart = CartModel.objects.get_from_request(self.request)
if anonymous_cart:
# an anonymous customer logged in, now merge his current cart with a cart,
# which previously might have been created under his account.
authenticated_cart.merge_with(anonymous_cart)
if previous_user and previous_user.is_active is False and previous_user != self.request.user:
# keep the database clean and remove this anonymous entity
if previous_user.customer.orders.count() == 0:
previous_user.delete()
def post(self, request, *args, **kwargs):
self.request = request
if request.user.is_anonymous:
form_data = request.data.get('form_data', {})
self.serializer = self.get_serializer(data=form_data)
if self.serializer.is_valid():
self.login()
return self.get_response()
exc = ValidationError({self.form_name: self.serializer.errors})
else:
message = ErrorDetail("Please log out before signing in again.")
exc = ValidationError({self.form_name: {api_settings.NON_FIELD_ERRORS_KEY: [message]}})
response = self.handle_exception(exc)
self.response = self.finalize_response(request, response, *args, **kwargs)
return self.response
class LogoutView(APIView):
"""
Calls Django logout method and delete the auth Token assigned to the current User object.
"""
permission_classes = (AllowAny,)
form_name = 'logout_form'
def post(self, request):
try:
request.user.auth_token.delete()
except:
pass
logout(request)
request.user = AnonymousUser()
response_data = {self.form_name: {'success_message': _("Successfully logged out.")}}
return Response(response_data)
class PasswordChangeView(OriginalPasswordChangeView):
form_name = 'password_change_form'
def post(self, request, *args, **kwargs):
form_data = request.data.get('form_data', {})
serializer = self.get_serializer(data=form_data)
if serializer.is_valid():
serializer.save()
response_data = {self.form_name: {
'success_message': _("Password has been changed successfully."),
}}
return Response(response_data)
return Response({self.form_name: serializer.errors}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
class PasswordResetRequestView(GenericAPIView):
"""
Calls Django Auth PasswordResetRequestForm save method.
Accepts the following POST parameters: email
Returns the success/fail message.
"""
serializer_class = PasswordResetRequestSerializer
permission_classes = (AllowAny,)
form_name = 'password_reset_request_form'
def post(self, request, *args, **kwargs):
form_data = request.data.get('form_data', {})
serializer = self.get_serializer(data=form_data)
if not serializer.is_valid():
return Response({self.form_name: serializer.errors}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
# send email containing a reset link
serializer.save()
# trigger async email queue
email_queued()
# Return the success message with OK HTTP status
msg = _("Instructions on how to reset the password have been sent to '{email}'.")
response_data = {self.form_name: {
'success_message': msg.format(**serializer.data),
}}
return Response(response_data)
class PasswordResetConfirmView(GenericAPIView):
"""
Password reset e-mail link points onto a CMS page with the Page ID = 'password-reset-confirm'.
This page then shall render the CMS plugin as provided by the **ShopAuthenticationPlugin** using
the form "Confirm Password Reset".
"""
renderer_classes = (CMSPageRenderer, JSONRenderer, BrowsableAPIRenderer)
serializer_class = PasswordResetConfirmSerializer
permission_classes = (AllowAny,)
token_generator = default_token_generator
form_name = 'password_reset_confirm_form'
def get(self, request, uidb64=None, token=None):
data = {'uid': uidb64, 'token': token}
serializer_class = self.get_serializer_class()
password = get_user_model().objects.make_random_password()
data.update(new_password1=password, new_password2=password)
serializer = serializer_class(data=data, context=self.get_serializer_context())
if not serializer.is_valid():
return Response({'validlink': False})
return Response({
'validlink': True,
'user_name': force_str(serializer.user),
'form_name': 'password_reset_form',
})
def post(self, request, uidb64=None, token=None):
try:
data = dict(request.data['form_data'], uid=uidb64, token=token)
except (KeyError, TypeError, ValueError):
errors = {'non_field_errors': [_("Invalid POST data.")]}
else:
serializer = self.get_serializer(data=data)
if serializer.is_valid():
serializer.save()
response_data = {self.form_name: {
'success_message': _("Password has been reset with the new password."),
}}
return Response(response_data)
else:
errors = serializer.errors
return Response({self.form_name: errors}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
| bsd-3-clause |
kawamuray/ganeti | lib/uidpool.py | 6 | 11942 | #
#
# Copyright (C) 2010, 2012 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""User-id pool related functions.
The user-id pool is cluster-wide configuration option.
It is stored as a list of user-id ranges.
This module contains functions used for manipulating the
user-id pool parameter and for requesting/returning user-ids
from the pool.
"""
import errno
import logging
import os
import random
from ganeti import errors
from ganeti import constants
from ganeti import utils
from ganeti import pathutils
def ParseUidPool(value, separator=None):
"""Parse a user-id pool definition.
@param value: string representation of the user-id pool.
The accepted input format is a list of integer ranges.
The boundaries are inclusive.
Example: '1000-5000,8000,9000-9010'.
@param separator: the separator character between the uids/uid-ranges.
Defaults to a comma.
@return: a list of integer pairs (lower, higher range boundaries)
"""
if separator is None:
separator = ","
ranges = []
for range_def in value.split(separator):
if not range_def:
# Skip empty strings
continue
boundaries = range_def.split("-")
n_elements = len(boundaries)
if n_elements > 2:
raise errors.OpPrereqError(
"Invalid user-id range definition. Only one hyphen allowed: %s"
% boundaries, errors.ECODE_INVAL)
try:
lower = int(boundaries[0])
except (ValueError, TypeError), err:
raise errors.OpPrereqError("Invalid user-id value for lower boundary of"
" user-id range: %s"
% str(err), errors.ECODE_INVAL)
try:
higher = int(boundaries[n_elements - 1])
except (ValueError, TypeError), err:
raise errors.OpPrereqError("Invalid user-id value for higher boundary of"
" user-id range: %s"
% str(err), errors.ECODE_INVAL)
ranges.append((lower, higher))
ranges.sort()
return ranges
def AddToUidPool(uid_pool, add_uids):
"""Add a list of user-ids/user-id ranges to a user-id pool.
@param uid_pool: a user-id pool (list of integer tuples)
@param add_uids: user-id ranges to be added to the pool
(list of integer tuples)
"""
for uid_range in add_uids:
if uid_range not in uid_pool:
uid_pool.append(uid_range)
uid_pool.sort()
def RemoveFromUidPool(uid_pool, remove_uids):
"""Remove a list of user-ids/user-id ranges from a user-id pool.
@param uid_pool: a user-id pool (list of integer tuples)
@param remove_uids: user-id ranges to be removed from the pool
(list of integer tuples)
"""
for uid_range in remove_uids:
if uid_range not in uid_pool:
raise errors.OpPrereqError(
"User-id range to be removed is not found in the current"
" user-id pool: %s" % str(uid_range), errors.ECODE_INVAL)
uid_pool.remove(uid_range)
def _FormatUidRange(lower, higher):
"""Convert a user-id range definition into a string.
"""
if lower == higher:
return str(lower)
return "%s-%s" % (lower, higher)
def FormatUidPool(uid_pool, separator=None):
"""Convert the internal representation of the user-id pool into a string.
The output format is also accepted by ParseUidPool()
@param uid_pool: a list of integer pairs representing UID ranges
@param separator: the separator character between the uids/uid-ranges.
Defaults to ", ".
@return: a string with the formatted results
"""
if separator is None:
separator = ", "
return separator.join([_FormatUidRange(lower, higher)
for lower, higher in uid_pool])
def CheckUidPool(uid_pool):
"""Sanity check user-id pool range definition values.
@param uid_pool: a list of integer pairs (lower, higher range boundaries)
"""
for lower, higher in uid_pool:
if lower > higher:
raise errors.OpPrereqError(
"Lower user-id range boundary value (%s)"
" is larger than higher boundary value (%s)" %
(lower, higher), errors.ECODE_INVAL)
if lower < constants.UIDPOOL_UID_MIN:
raise errors.OpPrereqError(
"Lower user-id range boundary value (%s)"
" is smaller than UIDPOOL_UID_MIN (%s)." %
(lower, constants.UIDPOOL_UID_MIN),
errors.ECODE_INVAL)
if higher > constants.UIDPOOL_UID_MAX:
raise errors.OpPrereqError(
"Higher user-id boundary value (%s)"
" is larger than UIDPOOL_UID_MAX (%s)." %
(higher, constants.UIDPOOL_UID_MAX),
errors.ECODE_INVAL)
def ExpandUidPool(uid_pool):
"""Expands a uid-pool definition to a list of uids.
@param uid_pool: a list of integer pairs (lower, higher range boundaries)
@return: a list of integers
"""
uids = set()
for lower, higher in uid_pool:
uids.update(range(lower, higher + 1))
return list(uids)
def _IsUidUsed(uid):
"""Check if there is any process in the system running with the given user-id
@type uid: integer
@param uid: the user-id to be checked.
"""
pgrep_command = [constants.PGREP, "-u", uid]
result = utils.RunCmd(pgrep_command)
if result.exit_code == 0:
return True
elif result.exit_code == 1:
return False
else:
raise errors.CommandError("Running pgrep failed. exit code: %s"
% result.exit_code)
class LockedUid(object):
"""Class representing a locked user-id in the uid-pool.
This binds together a userid and a lock.
"""
def __init__(self, uid, lock):
"""Constructor
@param uid: a user-id
@param lock: a utils.FileLock object
"""
self._uid = uid
self._lock = lock
def Unlock(self):
# Release the exclusive lock and close the filedescriptor
self._lock.Close()
def GetUid(self):
return self._uid
def AsStr(self):
return "%s" % self._uid
def RequestUnusedUid(all_uids):
"""Tries to find an unused uid from the uid-pool, locks it and returns it.
Usage pattern
=============
1. When starting a process::
from ganeti import ssconf
from ganeti import uidpool
# Get list of all user-ids in the uid-pool from ssconf
ss = ssconf.SimpleStore()
uid_pool = uidpool.ParseUidPool(ss.GetUidPool(), separator="\\n")
all_uids = set(uidpool.ExpandUidPool(uid_pool))
uid = uidpool.RequestUnusedUid(all_uids)
try:
<start a process with the UID>
# Once the process is started, we can release the file lock
uid.Unlock()
except ..., err:
# Return the UID to the pool
uidpool.ReleaseUid(uid)
2. Stopping a process::
from ganeti import uidpool
uid = <get the UID the process is running under>
<stop the process>
uidpool.ReleaseUid(uid)
@type all_uids: set of integers
@param all_uids: a set containing all the user-ids in the user-id pool
@return: a LockedUid object representing the unused uid. It's the caller's
responsibility to unlock the uid once an instance is started with
this uid.
"""
# Create the lock dir if it's not yet present
try:
utils.EnsureDirs([(pathutils.UIDPOOL_LOCKDIR, 0755)])
except errors.GenericError, err:
raise errors.LockError("Failed to create user-id pool lock dir: %s" % err)
# Get list of currently used uids from the filesystem
try:
taken_uids = set()
for taken_uid in os.listdir(pathutils.UIDPOOL_LOCKDIR):
try:
taken_uid = int(taken_uid)
except ValueError, err:
# Skip directory entries that can't be converted into an integer
continue
taken_uids.add(taken_uid)
except OSError, err:
raise errors.LockError("Failed to get list of used user-ids: %s" % err)
# Filter out spurious entries from the directory listing
taken_uids = all_uids.intersection(taken_uids)
# Remove the list of used uids from the list of all uids
unused_uids = list(all_uids - taken_uids)
if not unused_uids:
logging.info("All user-ids in the uid-pool are marked 'taken'")
# Randomize the order of the unused user-id list
random.shuffle(unused_uids)
# Randomize the order of the unused user-id list
taken_uids = list(taken_uids)
random.shuffle(taken_uids)
for uid in (unused_uids + taken_uids):
try:
# Create the lock file
# Note: we don't care if it exists. Only the fact that we can
# (or can't) lock it later is what matters.
uid_path = utils.PathJoin(pathutils.UIDPOOL_LOCKDIR, str(uid))
lock = utils.FileLock.Open(uid_path)
except OSError, err:
raise errors.LockError("Failed to create lockfile for user-id %s: %s"
% (uid, err))
try:
# Try acquiring an exclusive lock on the lock file
lock.Exclusive()
# Check if there is any process running with this user-id
if _IsUidUsed(uid):
logging.debug("There is already a process running under"
" user-id %s", uid)
lock.Unlock()
continue
return LockedUid(uid, lock)
except IOError, err:
if err.errno == errno.EAGAIN:
# The file is already locked, let's skip it and try another unused uid
logging.debug("Lockfile for user-id is already locked %s: %s", uid, err)
continue
except errors.LockError, err:
# There was an unexpected error while trying to lock the file
logging.error("Failed to lock the lockfile for user-id %s: %s", uid, err)
raise
raise errors.LockError("Failed to find an unused user-id")
def ReleaseUid(uid):
"""This should be called when the given user-id is no longer in use.
@type uid: LockedUid or integer
@param uid: the uid to release back to the pool
"""
if isinstance(uid, LockedUid):
# Make sure we release the exclusive lock, if there is any
uid.Unlock()
uid_filename = uid.AsStr()
else:
uid_filename = str(uid)
try:
uid_path = utils.PathJoin(pathutils.UIDPOOL_LOCKDIR, uid_filename)
os.remove(uid_path)
except OSError, err:
raise errors.LockError("Failed to remove user-id lockfile"
" for user-id %s: %s" % (uid_filename, err))
def ExecWithUnusedUid(fn, all_uids, *args, **kwargs):
"""Execute a callable and provide an unused user-id in its kwargs.
This wrapper function provides a simple way to handle the requesting,
unlocking and releasing a user-id.
"fn" is called by passing a "uid" keyword argument that
contains an unused user-id (as an integer) selected from the set of user-ids
passed in all_uids.
If there is an error while executing "fn", the user-id is returned
to the pool.
@param fn: a callable that accepts a keyword argument called "uid"
@type all_uids: a set of integers
@param all_uids: a set containing all user-ids in the user-id pool
"""
uid = RequestUnusedUid(all_uids)
kwargs["uid"] = uid.GetUid()
try:
return_value = fn(*args, **kwargs)
except:
# The failure of "callabe" means that starting a process with the uid
# failed, so let's put the uid back into the pool.
ReleaseUid(uid)
raise
uid.Unlock()
return return_value
| gpl-2.0 |
frappe/erpnext | erpnext/tests/utils.py | 2 | 2180 | # Copyright (c) 2021, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import copy
from contextlib import contextmanager
import frappe
def create_test_contact_and_address():
frappe.db.sql('delete from tabContact')
frappe.db.sql('delete from `tabContact Email`')
frappe.db.sql('delete from `tabContact Phone`')
frappe.db.sql('delete from tabAddress')
frappe.db.sql('delete from `tabDynamic Link`')
frappe.get_doc({
"doctype": "Address",
"address_title": "_Test Address for Customer",
"address_type": "Office",
"address_line1": "Station Road",
"city": "_Test City",
"state": "Test State",
"country": "India",
"links": [
{
"link_doctype": "Customer",
"link_name": "_Test Customer"
}
]
}).insert()
contact = frappe.get_doc({
"doctype": 'Contact',
"first_name": "_Test Contact for _Test Customer",
"links": [
{
"link_doctype": "Customer",
"link_name": "_Test Customer"
}
]
})
contact.add_email("test_contact_customer@example.com", is_primary=True)
contact.add_phone("+91 0000000000", is_primary_phone=True)
contact.insert()
@contextmanager
def change_settings(doctype, settings_dict):
""" A context manager to ensure that settings are changed before running
function and restored after running it regardless of exceptions occured.
This is useful in tests where you want to make changes in a function but
don't retain those changes.
import and use as decorator to cover full function or using `with` statement.
example:
@change_settings("Stock Settings", {"item_naming_by": "Naming Series"})
def test_case(self):
...
"""
try:
settings = frappe.get_doc(doctype)
# remember setting
previous_settings = copy.deepcopy(settings_dict)
for key in previous_settings:
previous_settings[key] = getattr(settings, key)
# change setting
for key, value in settings_dict.items():
setattr(settings, key, value)
settings.save()
yield # yield control to calling function
finally:
# restore settings
settings = frappe.get_doc(doctype)
for key, value in previous_settings.items():
setattr(settings, key, value)
settings.save()
| gpl-3.0 |
thinkgen/thirdparty | plugin.video.GOtv/default.py | 1 | 171848 | # -*- coding: utf-8 -*-
'''
GOtv XBMC Addon
Copyright (C) 2014 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urllib,urllib2,re,os,threading,datetime,time,base64,xbmc,xbmcplugin,xbmcgui,xbmcaddon,xbmcvfs
from operator import itemgetter
try: import json
except: import simplejson as json
try: import CommonFunctions
except: import commonfunctionsdummy as CommonFunctions
try: import StorageServer
except: import storageserverdummy as StorageServer
from metahandler import metahandlers
from metahandler import metacontainers
action = None
common = CommonFunctions
metaget = metahandlers.MetaData(preparezip=False)
language = xbmcaddon.Addon().getLocalizedString
setSetting = xbmcaddon.Addon().setSetting
getSetting = xbmcaddon.Addon().getSetting
addonName = xbmcaddon.Addon().getAddonInfo("name")
addonVersion = xbmcaddon.Addon().getAddonInfo("version")
addonId = xbmcaddon.Addon().getAddonInfo("id")
addonPath = xbmcaddon.Addon().getAddonInfo("path")
addonFullId = addonName + addonVersion
addonDesc = language(30450).encode("utf-8")
cache = StorageServer.StorageServer(addonFullId,1).cacheFunction
cache2 = StorageServer.StorageServer(addonFullId,24).cacheFunction
cache3 = StorageServer.StorageServer(addonFullId,720).cacheFunction
addonIcon = os.path.join(addonPath,'icon.png')
addonFanart = os.path.join(addonPath,'fanart.jpg')
addonArt = os.path.join(addonPath,'resources/art')
addonPoster = os.path.join(addonPath,'resources/art/Poster.png')
addonDownloads = os.path.join(addonPath,'resources/art/Downloads.png')
addonGenres = os.path.join(addonPath,'resources/art/Genres.png')
addonCalendar = os.path.join(addonPath,'resources/art/Calendar.png')
addonLists = os.path.join(addonPath,'resources/art/Lists.png')
addonNext = os.path.join(addonPath,'resources/art/Next.png')
dataPath = xbmc.translatePath('special://profile/addon_data/%s' % (addonId))
viewData = os.path.join(dataPath,'views.cfg')
offData = os.path.join(dataPath,'offset.cfg')
favData = os.path.join(dataPath,'favourites.cfg')
subData = os.path.join(dataPath,'subscriptions.cfg')
class main:
def __init__(self):
global action
index().container_data()
index().settings_reset()
params = {}
splitparams = sys.argv[2][sys.argv[2].find('?') + 1:].split('&')
for param in splitparams:
if (len(param) > 0):
splitparam = param.split('=')
key = splitparam[0]
try: value = splitparam[1].encode("utf-8")
except: value = splitparam[1]
params[key] = value
try: action = urllib.unquote_plus(params["action"])
except: action = None
try: name = urllib.unquote_plus(params["name"])
except: name = None
try: url = urllib.unquote_plus(params["url"])
except: url = None
try: image = urllib.unquote_plus(params["image"])
except: image = None
try: query = urllib.unquote_plus(params["query"])
except: query = None
try: title = urllib.unquote_plus(params["title"])
except: title = None
try: year = urllib.unquote_plus(params["year"])
except: year = None
try: imdb = urllib.unquote_plus(params["imdb"])
except: imdb = None
try: tvdb = urllib.unquote_plus(params["tvdb"])
except: tvdb = None
try: genre = urllib.unquote_plus(params["genre"])
except: genre = None
try: plot = urllib.unquote_plus(params["plot"])
except: plot = None
try: show = urllib.unquote_plus(params["show"])
except: show = None
try: show_alt = urllib.unquote_plus(params["show_alt"])
except: show_alt = None
try: season = urllib.unquote_plus(params["season"])
except: season = None
try: episode = urllib.unquote_plus(params["episode"])
except: episode = None
if action == None: root().get()
elif action == 'root_search': root().search()
elif action == 'item_play': contextMenu().item_play()
elif action == 'item_random_play': contextMenu().item_random_play()
elif action == 'item_queue': contextMenu().item_queue()
elif action == 'item_play_from_here': contextMenu().item_play_from_here(url)
elif action == 'favourite_add': contextMenu().favourite_add(favData, name, url, image, imdb, year)
elif action == 'favourite_from_search': contextMenu().favourite_from_search(favData, name, url, image, imdb, year)
elif action == 'favourite_delete': contextMenu().favourite_delete(favData, name, url)
elif action == 'favourite_moveUp': contextMenu().favourite_moveUp(favData, name, url)
elif action == 'favourite_moveDown': contextMenu().favourite_moveDown(favData, name, url)
elif action == 'subscription_add': contextMenu().subscription_add(name, url, image, imdb, year)
elif action == 'subscription_from_search': contextMenu().subscription_from_search(name, url, image, imdb, year)
elif action == 'subscription_delete': contextMenu().subscription_delete(name, url)
elif action == 'subscriptions_update': contextMenu().subscriptions_update()
elif action == 'subscriptions_service': contextMenu().subscriptions_update(silent=True)
elif action == 'playlist_open': contextMenu().playlist_open()
elif action == 'settings_open': contextMenu().settings_open()
elif action == 'addon_home': contextMenu().addon_home()
elif action == 'view_tvshows': contextMenu().view('tvshows')
elif action == 'view_seasons': contextMenu().view('seasons')
elif action == 'view_episodes': contextMenu().view('episodes')
elif action == 'metadata_tvshows': contextMenu().metadata('tvshow', imdb, '', '')
elif action == 'metadata_tvshows2': contextMenu().metadata('tvshow', imdb, '', '')
elif action == 'metadata_seasons': contextMenu().metadata('season', imdb, season, '')
elif action == 'metadata_episodes': contextMenu().metadata('episode', imdb, season, episode)
elif action == 'playcount_tvshows': contextMenu().playcount('tvshow', imdb, '', '')
elif action == 'playcount_seasons': contextMenu().playcount('season', imdb, season, '')
elif action == 'playcount_episodes': contextMenu().playcount('episode', imdb, season, episode)
elif action == 'subscriptions_batch': contextMenu().subscriptions_batch(url)
elif action == 'library': contextMenu().library(name, url, imdb, year)
elif action == 'download': contextMenu().download(name, title, imdb, tvdb, year, season, episode, show, show_alt)
elif action == 'sources': contextMenu().sources(name, title, imdb, tvdb, year, season, episode, show, show_alt)
elif action == 'autoplay': contextMenu().autoplay(name, title, imdb, tvdb, year, season, episode, show, show_alt)
elif action == 'shows_favourites': favourites().shows()
elif action == 'shows_subscriptions': subscriptions().shows()
elif action == 'episodes_subscriptions': subscriptions().episodes()
elif action == 'shows': shows().get(url)
elif action == 'shows_userlists': shows().get(url)
elif action == 'shows_popular': shows().popular()
elif action == 'shows_rating': shows().rating()
elif action == 'shows_views': shows().views()
elif action == 'shows_active': shows().active()
elif action == 'shows_trending': shows().trending()
elif action == 'shows_search': shows().search(query)
elif action == 'actors_search': actors().search(query)
elif action == 'genres_shows': genres().get()
elif action == 'calendar_episodes': calendar().get()
elif action == 'userlists_trakt': userlists().trakt()
elif action == 'userlists_imdb': userlists().imdb()
elif action == 'seasons': seasons().get(url, image, year, imdb, genre, plot, show)
elif action == 'episodes': episodes().get(name, url, image, year, imdb, tvdb, genre, plot, show, show_alt)
elif action == 'episodes_calendar': episodes().calendar(url)
elif action == 'play': resolver().run(name, title, imdb, tvdb, year, season, episode, show, show_alt, url)
if action is None:
pass
elif action.startswith('shows'):
xbmcplugin.setContent(int(sys.argv[1]), 'tvshows')
index().container_view('tvshows', {'skin.confluence' : 500})
elif action.startswith('seasons'):
xbmcplugin.setContent(int(sys.argv[1]), 'seasons')
index().container_view('seasons', {'skin.confluence' : 500})
elif action.startswith('episodes'):
xbmcplugin.setContent(int(sys.argv[1]), 'episodes')
index().container_view('episodes', {'skin.confluence' : 504})
xbmcplugin.setPluginFanart(int(sys.argv[1]), addonFanart)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
return
class getUrl(object):
def __init__(self, url, close=True, proxy=None, post=None, mobile=False, referer=None, cookie=None, output='', timeout='10'):
if not proxy is None:
proxy_handler = urllib2.ProxyHandler({'http':'%s' % (proxy)})
opener = urllib2.build_opener(proxy_handler, urllib2.HTTPHandler)
opener = urllib2.install_opener(opener)
if output == 'cookie' or not close == True:
import cookielib
cookie_handler = urllib2.HTTPCookieProcessor(cookielib.LWPCookieJar())
opener = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
opener = urllib2.install_opener(opener)
if not post is None:
request = urllib2.Request(url, post)
else:
request = urllib2.Request(url,None)
if mobile == True:
request.add_header('User-Agent', 'Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_0 like Mac OS X; en-us) AppleWebKit/532.9 (KHTML, like Gecko) Version/4.0.5 Mobile/8A293 Safari/6531.22.7')
else:
request.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0')
if not referer is None:
request.add_header('Referer', referer)
if not cookie is None:
request.add_header('cookie', cookie)
response = urllib2.urlopen(request, timeout=int(timeout))
if output == 'cookie':
result = str(response.headers.get('Set-Cookie'))
elif output == 'geturl':
result = response.geturl()
else:
result = response.read()
if close == True:
response.close()
self.result = result
class uniqueList(object):
def __init__(self, list):
uniqueSet = set()
uniqueList = []
for n in list:
if n not in uniqueSet:
uniqueSet.add(n)
uniqueList.append(n)
self.list = uniqueList
class Thread(threading.Thread):
def __init__(self, target, *args):
self._target = target
self._args = args
threading.Thread.__init__(self)
def run(self):
self._target(*self._args)
class player(xbmc.Player):
def __init__ (self):
self.folderPath = xbmc.getInfoLabel('Container.FolderPath')
self.PseudoTVRunning = index().getProperty('PseudoTVRunning')
self.loadingStarting = time.time()
xbmc.Player.__init__(self)
def run(self, name, url, imdb='0'):
self.video_info(name, imdb)
if self.folderPath.startswith(sys.argv[0]) or self.PseudoTVRunning == 'True':
item = xbmcgui.ListItem(path=url)
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
else:
try:
file = self.name + '.strm'
file = file.translate(None, '\/:*?"<>|')
meta = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": {"filter":{"and": [{"field": "season", "operator": "is", "value": "%s"}, {"field": "episode", "operator": "is", "value": "%s"}]}, "properties": ["title", "plot", "votes", "rating", "writer", "firstaired", "playcount", "runtime", "director", "productioncode", "season", "episode", "originaltitle", "showtitle", "lastplayed", "fanart", "thumbnail", "file", "resume", "tvshowid", "dateadded", "uniqueid"]}, "id": 1}' % (self.season, self.episode))
meta = unicode(meta, 'utf-8', errors='ignore')
meta = json.loads(meta)
meta = meta['result']['episodes']
self.meta = [i for i in meta if i['file'].endswith(file)][0]
meta = {'title': self.meta['title'], 'tvshowtitle': self.meta['showtitle'], 'season': self.meta['season'], 'episode': self.meta['episode'], 'writer': str(self.meta['writer']).replace("[u'", '').replace("']", '').replace("', u'", ' / '), 'director': str(self.meta['director']).replace("[u'", '').replace("']", '').replace("', u'", ' / '), 'rating': self.meta['rating'], 'duration': self.meta['runtime'], 'premiered': self.meta['firstaired'], 'plot': self.meta['plot']}
poster = self.meta['thumbnail']
except:
meta = {'label': self.name, 'title': self.name}
poster = ''
item = xbmcgui.ListItem(path=url, iconImage="DefaultVideo.png", thumbnailImage=poster)
item.setInfo( type="Video", infoLabels= meta )
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
for i in range(0, 250):
try: self.totalTime = self.getTotalTime()
except: self.totalTime = 0
if not self.totalTime == 0: continue
xbmc.sleep(1000)
if self.totalTime == 0: return
while True:
try: self.currentTime = self.getTime()
except: break
xbmc.sleep(1000)
def video_info(self, name, imdb):
self.name = name
self.content = 'episode'
self.show = self.name.rsplit(' ', 1)[0]
if imdb == '0': imdb = metaget.get_meta('tvshow', self.show)['imdb_id']
self.imdb = re.sub('[^0-9]', '', imdb)
self.season = '%01d' % int(name.rsplit(' ', 1)[-1].split('S')[-1].split('E')[0])
self.episode = '%01d' % int(name.rsplit(' ', 1)[-1].split('E')[-1])
self.subtitle = subtitles().get(self.name, self.imdb, self.season, self.episode)
def container_refresh(self):
try:
params = {}
query = self.folderPath[self.folderPath.find('?') + 1:].split('&')
for i in query: params[i.split('=')[0]] = i.split('=')[1]
if not params["action"].endswith('_search'): index().container_refresh()
except:
pass
def offset_add(self):
try:
file = xbmcvfs.File(offData)
read = file.read()
file.close()
write = [i.strip('\n').strip('\r') for i in read.splitlines(True) if i.strip('\r\n')]
write.append('"%s"|"%s"|"%s"' % (self.name, self.imdb, self.currentTime))
write = '\r\n'.join(write)
file = xbmcvfs.File(offData, 'w')
file.write(str(write))
file.close()
except:
return
def offset_delete(self):
try:
file = xbmcvfs.File(offData)
read = file.read()
file.close()
write = [i.strip('\n').strip('\r') for i in read.splitlines(True) if i.strip('\r\n')]
write = [i for i in write if not '"%s"|"%s"|"' % (self.name, self.imdb) in i]
write = '\r\n'.join(write)
file = xbmcvfs.File(offData, 'w')
file.write(str(write))
file.close()
except:
return
def offset_read(self):
try:
self.offset = '0'
file = xbmcvfs.File(offData)
read = file.read()
file.close()
read = [i for i in read.splitlines(True) if '"%s"|"%s"|"' % (self.name, self.imdb) in i][0]
self.offset = re.compile('".+?"[|]".+?"[|]"(.+?)"').findall(read)[0]
except:
return
def change_watched(self):
try:
xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "VideoLibrary.SetEpisodeDetails", "params": {"episodeid" : %s, "playcount" : 1 }, "id": 1 }' % str(self.meta['episodeid']))
except:
metaget.change_watched(self.content, '', self.imdb, season=self.season, episode=self.episode, year='', watched=7)
def resume_playback(self):
offset = float(self.offset)
if not offset > 0: return
minutes, seconds = divmod(offset, 60)
hours, minutes = divmod(minutes, 60)
offset_time = '%02d:%02d:%02d' % (hours, minutes, seconds)
yes = index().yesnoDialog('%s %s' % (language(30350).encode("utf-8"), offset_time), '', self.name, language(30351).encode("utf-8"), language(30352).encode("utf-8"))
if yes: self.seekTime(offset)
def onPlayBackStarted(self):
try: self.setSubtitles(self.subtitle)
except: pass
if self.PseudoTVRunning == 'True': return
if getSetting("playback_info") == 'true':
elapsedTime = '%s %.2f seconds' % (language(30319).encode("utf-8"), (time.time() - self.loadingStarting))
index().infoDialog(elapsedTime, header=self.name)
if getSetting("resume_playback") == 'true':
self.offset_read()
self.resume_playback()
def onPlayBackEnded(self):
if self.PseudoTVRunning == 'True': return
self.change_watched()
self.offset_delete()
self.container_refresh()
def onPlayBackStopped(self):
if self.PseudoTVRunning == 'True': return
if self.currentTime / self.totalTime >= .9:
self.change_watched()
self.offset_delete()
self.offset_add()
self.container_refresh()
class subtitles:
def get(self, name, imdb, season, episode):
if not getSetting("subtitles") == 'true': return
quality = ['bluray', 'hdrip', 'brrip', 'bdrip', 'dvdrip', 'webrip', 'hdtv']
langDict = {'Afrikaans': 'afr', 'Albanian': 'alb', 'Arabic': 'ara', 'Armenian': 'arm', 'Basque': 'baq', 'Bengali': 'ben', 'Bosnian': 'bos', 'Breton': 'bre', 'Bulgarian': 'bul', 'Burmese': 'bur', 'Catalan': 'cat', 'Chinese': 'chi', 'Croatian': 'hrv', 'Czech': 'cze', 'Danish': 'dan', 'Dutch': 'dut', 'English': 'eng', 'Esperanto': 'epo', 'Estonian': 'est', 'Finnish': 'fin', 'French': 'fre', 'Galician': 'glg', 'Georgian': 'geo', 'German': 'ger', 'Greek': 'ell', 'Hebrew': 'heb', 'Hindi': 'hin', 'Hungarian': 'hun', 'Icelandic': 'ice', 'Indonesian': 'ind', 'Italian': 'ita', 'Japanese': 'jpn', 'Kazakh': 'kaz', 'Khmer': 'khm', 'Korean': 'kor', 'Latvian': 'lav', 'Lithuanian': 'lit', 'Luxembourgish': 'ltz', 'Macedonian': 'mac', 'Malay': 'may', 'Malayalam': 'mal', 'Manipuri': 'mni', 'Mongolian': 'mon', 'Montenegrin': 'mne', 'Norwegian': 'nor', 'Occitan': 'oci', 'Persian': 'per', 'Polish': 'pol', 'Portuguese': 'por,pob', 'Portuguese(Brazil)': 'pob,por', 'Romanian': 'rum', 'Russian': 'rus', 'Serbian': 'scc', 'Sinhalese': 'sin', 'Slovak': 'slo', 'Slovenian': 'slv', 'Spanish': 'spa', 'Swahili': 'swa', 'Swedish': 'swe', 'Syriac': 'syr', 'Tagalog': 'tgl', 'Tamil': 'tam', 'Telugu': 'tel', 'Thai': 'tha', 'Turkish': 'tur', 'Ukrainian': 'ukr', 'Urdu': 'urd'}
langs = []
try: langs.append(langDict[getSetting("sublang1")])
except: pass
try: langs.append(langDict[getSetting("sublang2")])
except: pass
langs = ','.join(langs)
try:
import xmlrpclib
server = xmlrpclib.Server('http://api.opensubtitles.org/xml-rpc', verbose=0)
token = server.LogIn('', '', 'en', 'XBMC_Subtitles_v1')['token']
result = server.SearchSubtitles(token, [{'sublanguageid': langs, 'imdbid': imdb, 'season': season, 'episode': episode}])['data']
result = [i for i in result if i['SubSumCD'] == '1']
except:
return
subtitles = []
for lang in langs.split(','):
filter = [i for i in result if lang == i['SubLanguageID']]
if filter == []: continue
for q in quality: subtitles += [i for i in filter if q in i['MovieReleaseName'].lower()]
subtitles += [i for i in filter if not any(x in i['MovieReleaseName'].lower() for x in quality)]
try: lang = xbmc.convertLanguage(lang, xbmc.ISO_639_1)
except: pass
break
try:
import zlib, base64
content = [subtitles[0]["IDSubtitleFile"],]
content = server.DownloadSubtitles(token, content)
content = base64.b64decode(content['data'][0]['data'])
content = zlib.decompressobj(16+zlib.MAX_WBITS).decompress(content)
subtitle = xbmc.translatePath('special://temp/')
subtitle = os.path.join(subtitle, 'TemporarySubs.%s.srt' % lang)
file = open(subtitle, 'wb')
file.write(content)
file.close()
return subtitle
except:
index().infoDialog(language(30317).encode("utf-8"), name)
return
class index:
def infoDialog(self, str, header=addonName):
try: xbmcgui.Dialog().notification(header, str, addonIcon, 3000, sound=False)
except: xbmc.executebuiltin("Notification(%s,%s, 3000, %s)" % (header, str, addonIcon))
def okDialog(self, str1, str2, header=addonName):
xbmcgui.Dialog().ok(header, str1, str2)
def selectDialog(self, list, header=addonName):
select = xbmcgui.Dialog().select(header, list)
return select
def yesnoDialog(self, str1, str2, header=addonName, str3='', str4=''):
answer = xbmcgui.Dialog().yesno(header, str1, str2, '', str4, str3)
return answer
def getProperty(self, str):
property = xbmcgui.Window(10000).getProperty(str)
return property
def setProperty(self, str1, str2):
xbmcgui.Window(10000).setProperty(str1, str2)
def clearProperty(self, str):
xbmcgui.Window(10000).clearProperty(str)
def addon_status(self, id):
check = xbmcaddon.Addon(id=id).getAddonInfo("name")
if not check == addonName: return True
def container_refresh(self):
xbmc.executebuiltin("Container.Refresh")
def container_data(self):
if not xbmcvfs.exists(dataPath):
xbmcvfs.mkdir(dataPath)
if not xbmcvfs.exists(favData):
file = xbmcvfs.File(favData, 'w')
file.write('')
file.close()
if not xbmcvfs.exists(subData):
file = xbmcvfs.File(subData, 'w')
file.write('')
file.close()
if not xbmcvfs.exists(viewData):
file = xbmcvfs.File(viewData, 'w')
file.write('')
file.close()
if not xbmcvfs.exists(offData):
file = xbmcvfs.File(offData, 'w')
file.write('')
file.close()
def settings_reset(self):
try:
if getSetting("settings_version") == '2.4.0': return
settings = os.path.join(addonPath,'resources/settings.xml')
file = xbmcvfs.File(settings)
read = file.read()
file.close()
for i in range (1,7): setSetting('hosthd' + str(i), common.parseDOM(read, "setting", ret="default", attrs = {"id": 'hosthd' + str(i)})[0])
for i in range (1,16): setSetting('host' + str(i), common.parseDOM(read, "setting", ret="default", attrs = {"id": 'host' + str(i)})[0])
setSetting('autoplay_library', common.parseDOM(read, "setting", ret="default", attrs = {"id": 'autoplay_library'})[0])
setSetting('autoplay', common.parseDOM(read, "setting", ret="default", attrs = {"id": 'autoplay'})[0])
setSetting('settings_version', '2.4.0')
except:
return
def container_view(self, content, viewDict):
try:
skin = xbmc.getSkinDir()
file = xbmcvfs.File(viewData)
read = file.read().replace('\n','')
file.close()
view = re.compile('"%s"[|]"%s"[|]"(.+?)"' % (skin, content)).findall(read)[0]
xbmc.executebuiltin('Container.SetViewMode(%s)' % str(view))
except:
try:
id = str(viewDict[skin])
xbmc.executebuiltin('Container.SetViewMode(%s)' % id)
except:
pass
def rootList(self, rootList):
total = len(rootList)
for i in rootList:
try:
name = language(i['name']).encode("utf-8")
image = '%s/%s' % (addonArt, i['image'])
action = i['action']
u = '%s?action=%s' % (sys.argv[0], action)
cm = []
if action == 'shows_trending':
cm.append((language(30423).encode("utf-8"), 'RunPlugin(%s?action=subscriptions_batch&url=%s)' % (sys.argv[0], action)))
elif action.endswith('_subscriptions'):
cm.append((language(30425).encode("utf-8"), 'RunPlugin(%s?action=subscriptions_update)' % (sys.argv[0])))
item = xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=image)
item.setInfo( type="Video", infoLabels={ "Label": name, "Title": name, "Plot": addonDesc } )
item.setProperty("Fanart_Image", addonFanart)
item.addContextMenuItems(cm, replaceItems=False)
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=item,totalItems=total,isFolder=True)
except:
pass
def pageList(self, pageList):
if pageList == None: return
total = len(pageList)
for i in pageList:
try:
name, url, image = i['name'], i['url'], i['image']
sysname, sysurl, sysimage = urllib.quote_plus(name), urllib.quote_plus(url), urllib.quote_plus(image)
u = '%s?action=shows&url=%s' % (sys.argv[0], sysurl)
item = xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=image)
item.setInfo( type="Video", infoLabels={ "Label": name, "Title": name, "Plot": addonDesc } )
item.setProperty("Fanart_Image", addonFanart)
item.addContextMenuItems([], replaceItems=False)
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=item,totalItems=total,isFolder=True)
except:
pass
def pageList2(self, pageList):
if pageList == None: return
total = len(pageList)
for i in pageList:
try:
name, url, image = i['name'], i['url'], i['image']
sysname, sysurl, sysimage = urllib.quote_plus(name), urllib.quote_plus(url), urllib.quote_plus(image)
u = '%s?action=episodes_calendar&url=%s' % (sys.argv[0], sysurl)
cm = []
cm.append((language(30409).encode("utf-8"), 'RunPlugin(%s?action=settings_open)' % (sys.argv[0])))
cm.append((language(30410).encode("utf-8"), 'RunPlugin(%s?action=playlist_open)' % (sys.argv[0])))
item = xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=image)
item.setInfo( type="Video", infoLabels={ "Label": name, "Title": name, "Plot": addonDesc } )
item.setProperty("Fanart_Image", addonFanart)
item.addContextMenuItems(cm, replaceItems=True)
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=item,totalItems=total,isFolder=True)
except:
pass
def userList(self, userList):
if userList == None: return
total = len(userList)
for i in userList:
try:
name, url, image = i['name'], i['url'], i['image']
sysname, sysurl, sysimage = urllib.quote_plus(name), urllib.quote_plus(url), urllib.quote_plus(image)
u = '%s?action=shows_userlists&url=%s' % (sys.argv[0], sysurl)
cm = []
cm.append((language(30423).encode("utf-8"), 'RunPlugin(%s?action=subscriptions_batch&url=%s)' % (sys.argv[0], sysurl)))
item = xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=image)
item.setInfo( type="Video", infoLabels={ "Label": name, "Title": name, "Plot": addonDesc } )
item.setProperty("Fanart_Image", addonFanart)
item.addContextMenuItems(cm, replaceItems=False)
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=item,totalItems=total,isFolder=True)
except:
pass
def nextList(self, nextList):
try: next = nextList[0]['next']
except: return
if next == '': return
name, url, image = language(30361).encode("utf-8"), next, addonNext
sysurl = urllib.quote_plus(url)
u = '%s?action=shows&url=%s' % (sys.argv[0], sysurl)
item = xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=image)
item.setInfo( type="Video", infoLabels={ "Label": name, "Title": name, "Plot": addonDesc } )
item.setProperty("Fanart_Image", addonFanart)
item.addContextMenuItems([], replaceItems=False)
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=item,isFolder=True)
def downloadList(self):
u = getSetting("downloads")
if u == '': return
name, image = language(30363).encode("utf-8"), addonDownloads
item = xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=image)
item.setInfo( type="Video", infoLabels={ "Label": name, "Title": name, "Plot": addonDesc } )
item.setProperty("Fanart_Image", addonFanart)
item.addContextMenuItems([], replaceItems=False)
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=item,isFolder=True)
def showList(self, showList):
if showList == None: return
getmeta = getSetting("meta")
if action == 'shows_search': getmeta = ''
file = xbmcvfs.File(favData)
favRead = file.read()
file.close()
file = xbmcvfs.File(subData)
subRead = file.read()
file.close()
total = len(showList)
for i in showList:
try:
name, url, image, year, imdb, genre, plot = i['name'], i['url'], i['image'], i['year'], i['imdb'], i['genre'], i['plot']
if plot == '': plot = addonDesc
if genre == '': genre = ' '
title = name
sysname, sysurl, sysimage, sysyear, sysimdb, sysgenre, sysplot = urllib.quote_plus(name), urllib.quote_plus(url), urllib.quote_plus(image), urllib.quote_plus(year), urllib.quote_plus(imdb), urllib.quote_plus(genre), urllib.quote_plus(plot)
u = '%s?action=seasons&url=%s&image=%s&year=%s&imdb=%s&genre=%s&plot=%s&show=%s' % (sys.argv[0], sysurl, sysimage, sysyear, sysimdb, sysgenre, sysplot, sysname)
if getmeta == 'true':
meta = metaget.get_meta('tvshow', title, imdb_id=imdb)
meta.update({'playcount': 0, 'overlay': 0})
playcountMenu = language(30407).encode("utf-8")
if meta['overlay'] == 6: playcountMenu = language(30408).encode("utf-8")
metaimdb = urllib.quote_plus(re.sub('[^0-9]', '', meta['imdb_id']))
poster, banner = meta['cover_url'], meta['banner_url']
if banner == '': banner = poster
if banner == '': banner = image
if poster == '': poster = image
else:
meta = {'label': title, 'title': title, 'tvshowtitle': title, 'year' : year, 'imdb_id' : imdb, 'genre' : genre, 'plot': plot}
poster, banner = image, image
if getmeta == 'true' and getSetting("fanart") == 'true':
fanart = meta['backdrop_url']
if fanart == '': fanart = addonFanart
else:
fanart = addonFanart
meta.update({'art(banner)': banner, 'art(poster)': poster})
cm = []
cm.append((language(30401).encode("utf-8"), 'RunPlugin(%s?action=item_play)' % (sys.argv[0])))
cm.append((language(30413).encode("utf-8"), 'Action(Info)'))
if action == 'shows_favourites':
if getmeta == 'true': cm.append((language(30415).encode("utf-8"), 'RunPlugin(%s?action=metadata_tvshows&imdb=%s)' % (sys.argv[0], metaimdb)))
#if getmeta == 'true': cm.append((playcountMenu, 'RunPlugin(%s?action=playcount_tvshows&imdb=%s)' % (sys.argv[0], metaimdb)))
if not '"%s"' % url in subRead: cm.append((language(30423).encode("utf-8"), 'RunPlugin(%s?action=subscription_add&name=%s&imdb=%s&url=%s&image=%s&year=%s)' % (sys.argv[0], sysname, sysimdb, sysurl, sysimage, sysyear)))
else: cm.append((language(30424).encode("utf-8"), 'RunPlugin(%s?action=subscription_delete&name=%s&url=%s)' % (sys.argv[0], sysname, sysurl)))
cm.append((language(30429).encode("utf-8"), 'RunPlugin(%s?action=view_tvshows)' % (sys.argv[0])))
if getSetting("fav_sort") == '2': cm.append((language(30419).encode("utf-8"), 'RunPlugin(%s?action=favourite_moveUp&name=%s&url=%s)' % (sys.argv[0], sysname, sysurl)))
if getSetting("fav_sort") == '2': cm.append((language(30420).encode("utf-8"), 'RunPlugin(%s?action=favourite_moveDown&name=%s&url=%s)' % (sys.argv[0], sysname, sysurl)))
cm.append((language(30421).encode("utf-8"), 'RunPlugin(%s?action=favourite_delete&name=%s&url=%s)' % (sys.argv[0], sysname, sysurl)))
elif action == 'shows_subscriptions':
if getmeta == 'true': cm.append((language(30415).encode("utf-8"), 'RunPlugin(%s?action=metadata_tvshows&imdb=%s)' % (sys.argv[0], metaimdb)))
#if getmeta == 'true': cm.append((playcountMenu, 'RunPlugin(%s?action=playcount_tvshows&imdb=%s)' % (sys.argv[0], metaimdb)))
if not '"%s"' % url in subRead: cm.append((language(30423).encode("utf-8"), 'RunPlugin(%s?action=subscription_add&name=%s&imdb=%s&url=%s&image=%s&year=%s)' % (sys.argv[0], sysname, sysimdb, sysurl, sysimage, sysyear)))
else: cm.append((language(30424).encode("utf-8"), 'RunPlugin(%s?action=subscription_delete&name=%s&url=%s)' % (sys.argv[0], sysname, sysurl)))
cm.append((language(30425).encode("utf-8"), 'RunPlugin(%s?action=subscriptions_update)' % (sys.argv[0])))
if not '"%s"' % url in favRead: cm.append((language(30417).encode("utf-8"), 'RunPlugin(%s?action=favourite_add&name=%s&imdb=%s&url=%s&image=%s&year=%s)' % (sys.argv[0], sysname, sysimdb, sysurl, sysimage, sysyear)))
else: cm.append((language(30418).encode("utf-8"), 'RunPlugin(%s?action=favourite_delete&name=%s&url=%s)' % (sys.argv[0], sysname, sysurl)))
cm.append((language(30429).encode("utf-8"), 'RunPlugin(%s?action=view_tvshows)' % (sys.argv[0])))
cm.append((language(30409).encode("utf-8"), 'RunPlugin(%s?action=settings_open)' % (sys.argv[0])))
elif action.startswith('shows_search'):
cm.append((language(30423).encode("utf-8"), 'RunPlugin(%s?action=subscription_from_search&name=%s&imdb=%s&url=%s&image=%s&year=%s)' % (sys.argv[0], sysname, sysimdb, sysurl, sysimage, sysyear)))
cm.append((language(30417).encode("utf-8"), 'RunPlugin(%s?action=favourite_from_search&name=%s&imdb=%s&url=%s&image=%s&year=%s)' % (sys.argv[0], sysname, sysimdb, sysurl, sysimage, sysyear)))
cm.append((language(30429).encode("utf-8"), 'RunPlugin(%s?action=view_tvshows)' % (sys.argv[0])))
cm.append((language(30409).encode("utf-8"), 'RunPlugin(%s?action=settings_open)' % (sys.argv[0])))
cm.append((language(30410).encode("utf-8"), 'RunPlugin(%s?action=playlist_open)' % (sys.argv[0])))
cm.append((language(30411).encode("utf-8"), 'RunPlugin(%s?action=addon_home)' % (sys.argv[0])))
else:
if getmeta == 'true': cm.append((language(30415).encode("utf-8"), 'RunPlugin(%s?action=metadata_tvshows2&imdb=%s)' % (sys.argv[0], metaimdb)))
if not '"%s"' % url in subRead: cm.append((language(30423).encode("utf-8"), 'RunPlugin(%s?action=subscription_add&name=%s&imdb=%s&url=%s&image=%s&year=%s)' % (sys.argv[0], sysname, sysimdb, sysurl, sysimage, sysyear)))
else: cm.append((language(30424).encode("utf-8"), 'RunPlugin(%s?action=subscription_delete&name=%s&url=%s)' % (sys.argv[0], sysname, sysurl)))
if not '"%s"' % url in favRead: cm.append((language(30417).encode("utf-8"), 'RunPlugin(%s?action=favourite_add&name=%s&imdb=%s&url=%s&image=%s&year=%s)' % (sys.argv[0], sysname, sysimdb, sysurl, sysimage, sysyear)))
else: cm.append((language(30418).encode("utf-8"), 'RunPlugin(%s?action=favourite_delete&name=%s&url=%s)' % (sys.argv[0], sysname, sysurl)))
cm.append((language(30429).encode("utf-8"), 'RunPlugin(%s?action=view_tvshows)' % (sys.argv[0])))
cm.append((language(30409).encode("utf-8"), 'RunPlugin(%s?action=settings_open)' % (sys.argv[0])))
cm.append((language(30410).encode("utf-8"), 'RunPlugin(%s?action=playlist_open)' % (sys.argv[0])))
cm.append((language(30411).encode("utf-8"), 'RunPlugin(%s?action=addon_home)' % (sys.argv[0])))
if action == 'shows_search':
if ('"%s"' % url in favRead and '"%s"' % url in subRead): suffix = '|F|S| '
elif '"%s"' % url in favRead: suffix = '|F| '
elif '"%s"' % url in subRead: suffix = '|S| '
else: suffix = ''
name = suffix + name
item = xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=poster)
item.setInfo( type="Video", infoLabels = meta )
item.setProperty("IsPlayable", "true")
item.setProperty("Video", "true")
item.setProperty("Fanart_Image", fanart)
item.addContextMenuItems(cm, replaceItems=True)
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=item,totalItems=total,isFolder=True)
except:
pass
def seasonList(self, seasonList):
if seasonList == None: return
try:
year, imdb, tvdb, genre, plot, show, show_alt = seasonList[0]['year'], seasonList[0]['imdb'], seasonList[0]['tvdb'], seasonList[0]['genre'], seasonList[0]['plot'], seasonList[0]['show'], seasonList[0]['show_alt']
if plot == '': plot = addonDesc
if genre == '': genre = ' '
if getSetting("meta") == 'true':
seasons = []
for i in seasonList: seasons.append(i['season'])
season_meta = metaget.get_seasons(show, imdb, seasons)
meta = metaget.get_meta('tvshow', show, imdb_id=imdb)
banner = meta['banner_url']
else:
meta = {'tvshowtitle': show, 'imdb_id' : imdb, 'genre' : genre, 'plot': plot}
banner = ''
if getSetting("meta") == 'true' and getSetting("fanart") == 'true':
fanart = meta['backdrop_url']
if fanart == '': fanart = addonFanart
else:
fanart = addonFanart
except:
return
total = len(seasonList)
for i in range(0, int(total)):
try:
name, url, image = seasonList[i]['name'], seasonList[i]['url'], seasonList[i]['image']
sysname, sysurl, sysimage, sysyear, sysimdb, systvdb, sysgenre, sysplot, sysshow, sysshow_alt = urllib.quote_plus(name), urllib.quote_plus(url), urllib.quote_plus(image), urllib.quote_plus(year), urllib.quote_plus(imdb), urllib.quote_plus(tvdb), urllib.quote_plus(genre), urllib.quote_plus(plot), urllib.quote_plus(show), urllib.quote_plus(show_alt)
u = '%s?action=episodes&name=%s&url=%s&image=%s&year=%s&imdb=%s&tvdb=%s&genre=%s&plot=%s&show=%s&show_alt=%s' % (sys.argv[0], sysname, sysurl, sysimage, sysyear, sysimdb, systvdb, sysgenre, sysplot, sysshow, sysshow_alt)
if getSetting("meta") == 'true':
meta.update({'playcount': 0, 'overlay': 0})
#meta.update({'playcount': season_meta[i]['playcount'], 'overlay': season_meta[i]['overlay']})
poster = season_meta[i]['cover_url']
playcountMenu = language(30407).encode("utf-8")
if season_meta[i]['overlay'] == 6: playcountMenu = language(30408).encode("utf-8")
metaimdb, metaseason = urllib.quote_plus(re.sub('[^0-9]', '', str(season_meta[i]['imdb_id']))), urllib.quote_plus(str(season_meta[i]['season']))
if poster == '': poster = image
if banner == '': banner = poster
if banner == '': banner = image
else:
poster, banner = image, image
meta.update({'label': name, 'title': name, 'art(season.banner)': banner, 'art(season.poster': poster})
cm = []
cm.append((language(30401).encode("utf-8"), 'RunPlugin(%s?action=item_play)' % (sys.argv[0])))
cm.append((language(30404).encode("utf-8"), 'RunPlugin(%s?action=item_queue)' % (sys.argv[0])))
cm.append((language(30413).encode("utf-8"), 'Action(Info)'))
if getSetting("meta") == 'true': cm.append((language(30415).encode("utf-8"), 'RunPlugin(%s?action=metadata_seasons&imdb=%s&season=%s)' % (sys.argv[0], metaimdb, metaseason)))
#if getSetting("meta") == 'true': cm.append((playcountMenu, 'RunPlugin(%s?action=playcount_seasons&imdb=%s&season=%s)' % (sys.argv[0], metaimdb, metaseason)))
cm.append((language(30430).encode("utf-8"), 'RunPlugin(%s?action=view_seasons)' % (sys.argv[0])))
cm.append((language(30409).encode("utf-8"), 'RunPlugin(%s?action=settings_open)' % (sys.argv[0])))
cm.append((language(30410).encode("utf-8"), 'RunPlugin(%s?action=playlist_open)' % (sys.argv[0])))
cm.append((language(30411).encode("utf-8"), 'RunPlugin(%s?action=addon_home)' % (sys.argv[0])))
item = xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=poster)
item.setInfo( type="Video", infoLabels = meta )
item.setProperty("IsPlayable", "true")
item.setProperty("Video", "true")
item.setProperty("Fanart_Image", fanart)
item.addContextMenuItems(cm, replaceItems=True)
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=item,totalItems=total,isFolder=True)
except:
pass
def episodeList(self, episodeList):
if episodeList == None: return
getmeta = getSetting("meta")
if action == 'episodes_calendar': getmeta = ''
total = len(episodeList)
for i in episodeList:
try:
name, url, image, date, year, imdb, tvdb, genre, plot, title, show, show_alt, season, episode = i['name'], i['url'], i['image'], i['date'], i['year'], i['imdb'], i['tvdb'], i['genre'], i['plot'], i['title'], i['show'], i['show_alt'], i['season'], i['episode']
if plot == '': plot = addonDesc
if genre == '': genre = ' '
sysname, systitle, sysimdb, systvdb, sysyear, sysseason, sysepisode, sysshow, sysshow_alt, sysurl = urllib.quote_plus(name), urllib.quote_plus(title), urllib.quote_plus(imdb), urllib.quote_plus(tvdb), urllib.quote_plus(year), urllib.quote_plus(season), urllib.quote_plus(episode), urllib.quote_plus(show), urllib.quote_plus(show_alt), urllib.quote_plus(url)
u = '%s?action=play&name=%s&title=%s&imdb=%s&tvdb=%s&year=%s&season=%s&episode=%s&show=%s&show_alt=%s&url=%s&t=%s' % (sys.argv[0], sysname, systitle, sysimdb, systvdb, sysyear, sysseason, sysepisode, sysshow, sysshow_alt, sysurl, datetime.datetime.now().strftime("%Y%m%d%H%M%S%f"))
if getmeta == 'true':
imdb = re.sub('[^0-9]', '', imdb)
meta = metaget.get_episode_meta(title, imdb, season, episode)
meta.update({'tvshowtitle': show})
if meta['title'] == '': meta.update({'title': title})
if meta['episode'] == '': meta.update({'episode': episode})
if meta['premiered'] == '': meta.update({'premiered': date})
if meta['plot'] == '': meta.update({'plot': plot})
playcountMenu = language(30407).encode("utf-8")
if meta['overlay'] == 6: playcountMenu = language(30408).encode("utf-8")
metaimdb, metaseason, metaepisode = urllib.quote_plus(re.sub('[^0-9]', '', str(meta['imdb_id']))), urllib.quote_plus(str(meta['season'])), urllib.quote_plus(str(meta['episode']))
label = str(meta['season']) + 'x' + '%02d' % int(meta['episode']) + ' . ' + meta['title']
if action == 'episodes_subscriptions' or action == 'episodes_calendar': label = show + ' - ' + label
poster = meta['cover_url']
if poster == '': poster = image
else:
meta = {'label': title, 'title': title, 'tvshowtitle': show, 'season': season, 'episode': episode, 'imdb_id' : imdb, 'year' : year, 'premiered' : date, 'genre' : genre, 'plot': plot}
label = season + 'x' + '%02d' % int(episode) + ' . ' + title
if action == 'episodes_subscriptions' or action == 'episodes_calendar': label = show + ' - ' + label
poster = image
if getmeta == 'true' and getSetting("fanart") == 'true':
fanart = meta['backdrop_url']
if fanart == '': fanart = addonFanart
else:
fanart = addonFanart
cm = []
if getSetting("autoplay") == 'true': cm.append((language(30432).encode("utf-8"), 'RunPlugin(%s?action=sources&name=%s&title=%s&imdb=%s&tvdb=%s&year=%s&season=%s&episode=%s&show=%s&show_alt=%s&url=%s)' % (sys.argv[0], sysname, systitle, sysimdb, systvdb, sysyear, sysseason, sysepisode, sysshow, sysshow_alt, sysurl)))
else: cm.append((language(30433).encode("utf-8"), 'RunPlugin(%s?action=autoplay&name=%s&title=%s&imdb=%s&tvdb=%s&year=%s&season=%s&episode=%s&show=%s&show_alt=%s&url=%s)' % (sys.argv[0], sysname, systitle, sysimdb, systvdb, sysyear, sysseason, sysepisode, sysshow, sysshow_alt, sysurl)))
cm.append((language(30405).encode("utf-8"), 'RunPlugin(%s?action=item_queue)' % (sys.argv[0])))
cm.append((language(30406).encode("utf-8"), 'RunPlugin(%s?action=download&name=%s&title=%s&imdb=%s&tvdb=%s&year=%s&season=%s&episode=%s&show=%s&show_alt=%s&url=%s)' % (sys.argv[0], sysname, systitle, sysimdb, systvdb, sysyear, sysseason, sysepisode, sysshow, sysshow_alt, sysurl)))
cm.append((language(30403).encode("utf-8"), 'RunPlugin(%s?action=item_play_from_here&url=%s)' % (sys.argv[0], sysurl)))
cm.append((language(30414).encode("utf-8"), 'Action(Info)'))
if getmeta == 'true': cm.append((language(30415).encode("utf-8"), 'RunPlugin(%s?action=metadata_episodes&imdb=%s&season=%s&episode=%s)' % (sys.argv[0], metaimdb, metaseason, metaepisode)))
if getmeta == 'true': cm.append((playcountMenu, 'RunPlugin(%s?action=playcount_episodes&imdb=%s&season=%s&episode=%s)' % (sys.argv[0], metaimdb, metaseason, metaepisode)))
cm.append((language(30431).encode("utf-8"), 'RunPlugin(%s?action=view_episodes)' % (sys.argv[0])))
cm.append((language(30410).encode("utf-8"), 'RunPlugin(%s?action=playlist_open)' % (sys.argv[0])))
cm.append((language(30411).encode("utf-8"), 'RunPlugin(%s?action=addon_home)' % (sys.argv[0])))
item = xbmcgui.ListItem(label, iconImage="DefaultVideo.png", thumbnailImage=poster)
item.setInfo( type="Video", infoLabels = meta )
item.setProperty("IsPlayable", "true")
item.setProperty("Video", "true")
item.setProperty("Fanart_Image", fanart)
item.addContextMenuItems(cm, replaceItems=True)
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=item,totalItems=total,isFolder=False)
except:
pass
class contextMenu:
def item_play(self):
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.clear()
xbmc.executebuiltin('Action(Queue)')
playlist.unshuffle()
xbmc.Player().play(playlist)
def item_random_play(self):
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.clear()
xbmc.executebuiltin('Action(Queue)')
playlist.shuffle()
xbmc.Player().play(playlist)
def item_queue(self):
xbmc.executebuiltin('Action(Queue)')
def item_play_from_here(self, url):
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.clear()
playlist.unshuffle()
total = xbmc.getInfoLabel('Container.NumItems')
for i in range(0, int(total)):
i = str(i)
label = xbmc.getInfoLabel('ListItemNoWrap(%s).Label' % i)
if label == '': break
params = {}
path = xbmc.getInfoLabel('ListItemNoWrap(%s).FileNameAndPath' % i)
path = urllib.quote_plus(path).replace('+%26+', '+&+')
query = path.split('%3F', 1)[-1].split('%26')
for i in query: params[urllib.unquote_plus(i).split('=')[0]] = urllib.unquote_plus(i).split('=')[1]
sysname, systitle, sysimdb, systvdb, sysyear, sysseason, sysepisode, sysshow, sysshow_alt, sysurl = urllib.quote_plus(params["name"]), urllib.quote_plus(params["title"]), urllib.quote_plus(params["imdb"]), urllib.quote_plus(params["tvdb"]), urllib.quote_plus(params["year"]), urllib.quote_plus(params["season"]), urllib.quote_plus(params["episode"]), urllib.quote_plus(params["show"]), urllib.quote_plus(params["show_alt"]), urllib.quote_plus(params["url"])
u = '%s?action=play&name=%s&title=%s&imdb=%s&tvdb=%s&year=%s&season=%s&episode=%s&show=%s&show_alt=%s&url=%s' % (sys.argv[0], sysname, systitle, sysimdb, systvdb, sysyear, sysseason, sysepisode, sysshow, sysshow_alt, sysurl)
meta = {'title': xbmc.getInfoLabel('ListItemNoWrap(%s).title' % i), 'tvshowtitle': xbmc.getInfoLabel('ListItemNoWrap(%s).tvshowtitle' % i), 'season': xbmc.getInfoLabel('ListItemNoWrap(%s).season' % i), 'episode': xbmc.getInfoLabel('ListItemNoWrap(%s).episode' % i), 'writer': xbmc.getInfoLabel('ListItemNoWrap(%s).writer' % i), 'director': xbmc.getInfoLabel('ListItemNoWrap(%s).director' % i), 'rating': xbmc.getInfoLabel('ListItemNoWrap(%s).rating' % i), 'duration': xbmc.getInfoLabel('ListItemNoWrap(%s).duration' % i), 'premiered': xbmc.getInfoLabel('ListItemNoWrap(%s).premiered' % i), 'plot': xbmc.getInfoLabel('ListItemNoWrap(%s).plot' % i)}
poster, fanart = xbmc.getInfoLabel('ListItemNoWrap(%s).icon' % i), xbmc.getInfoLabel('ListItemNoWrap(%s).Property(Fanart_Image)' % i)
item = xbmcgui.ListItem(label, iconImage="DefaultVideo.png", thumbnailImage=poster)
item.setInfo( type="Video", infoLabels= meta )
item.setProperty("IsPlayable", "true")
item.setProperty("Video", "true")
item.setProperty("Fanart_Image", fanart)
playlist.add(u, item)
xbmc.Player().play(playlist)
def playlist_open(self):
xbmc.executebuiltin('ActivateWindow(VideoPlaylist)')
def settings_open(self):
xbmc.executebuiltin('Addon.OpenSettings(%s)' % (addonId))
def addon_home(self):
xbmc.executebuiltin('Container.Update(plugin://%s/,replace)' % (addonId))
def view(self, content):
try:
skin = xbmc.getSkinDir()
skinPath = xbmc.translatePath('special://skin/')
xml = os.path.join(skinPath,'addon.xml')
file = xbmcvfs.File(xml)
read = file.read().replace('\n','')
file.close()
try: src = re.compile('defaultresolution="(.+?)"').findall(read)[0]
except: src = re.compile('<res.+?folder="(.+?)"').findall(read)[0]
src = os.path.join(skinPath, src)
src = os.path.join(src, 'MyVideoNav.xml')
file = xbmcvfs.File(src)
read = file.read().replace('\n','')
file.close()
views = re.compile('<views>(.+?)</views>').findall(read)[0]
views = [int(x) for x in views.split(',')]
for view in views:
label = xbmc.getInfoLabel('Control.GetLabel(%s)' % (view))
if not (label == '' or label is None): break
file = xbmcvfs.File(viewData)
read = file.read()
file.close()
write = [i.strip('\n').strip('\r') for i in read.splitlines(True) if i.strip('\r\n')]
write = [i for i in write if not '"%s"|"%s"|"' % (skin, content) in i]
write.append('"%s"|"%s"|"%s"' % (skin, content, str(view)))
write = '\r\n'.join(write)
file = xbmcvfs.File(viewData, 'w')
file.write(str(write))
file.close()
viewName = xbmc.getInfoLabel('Container.Viewmode')
index().infoDialog('%s%s%s' % (language(30301).encode("utf-8"), viewName, language(30302).encode("utf-8")))
except:
return
def favourite_add(self, data, name, url, image, imdb, year):
try:
index().container_refresh()
file = xbmcvfs.File(data)
read = file.read()
file.close()
write = [i.strip('\n').strip('\r') for i in read.splitlines(True) if i.strip('\r\n')]
write.append('"%s"|"%s"|"%s"|"%s"|"%s"' % (name, year, imdb, url, image))
write = '\r\n'.join(write)
file = xbmcvfs.File(data, 'w')
file.write(str(write))
file.close()
index().infoDialog(language(30303).encode("utf-8"), name)
except:
return
def favourite_from_search(self, data, name, url, image, imdb, year):
try:
file = xbmcvfs.File(data)
read = file.read()
file.close()
if '"%s"' % url in read:
index().infoDialog(language(30307).encode("utf-8"), name)
return
write = [i.strip('\n').strip('\r') for i in read.splitlines(True) if i.strip('\r\n')]
write.append('"%s"|"%s"|"%s"|"%s"|"%s"' % (name, year, imdb, url, image))
write = '\r\n'.join(write)
file = xbmcvfs.File(data, 'w')
file.write(str(write))
file.close()
index().infoDialog(language(30303).encode("utf-8"), name)
except:
return
def favourite_delete(self, data, name, url):
try:
index().container_refresh()
file = xbmcvfs.File(data)
read = file.read()
file.close()
write = [i.strip('\n').strip('\r') for i in read.splitlines(True) if i.strip('\r\n')]
write = [i for i in write if not '"%s"' % url in i]
write = '\r\n'.join(write)
file = xbmcvfs.File(data, 'w')
file.write(str(write))
file.close()
index().infoDialog(language(30304).encode("utf-8"), name)
except:
return
def favourite_moveUp(self, data, name, url):
try:
index().container_refresh()
file = xbmcvfs.File(data)
read = file.read()
file.close()
write = [i.strip('\n').strip('\r') for i in read.splitlines(True) if i.strip('\r\n')]
i = write.index([i for i in write if '"%s"' % url in i][0])
if i == 0 : return
write[i], write[i-1] = write[i-1], write[i]
write = '\r\n'.join(write)
file = xbmcvfs.File(data, 'w')
file.write(str(write))
file.close()
index().infoDialog(language(30305).encode("utf-8"), name)
except:
return
def favourite_moveDown(self, data, name, url):
try:
index().container_refresh()
file = xbmcvfs.File(data)
read = file.read()
file.close()
write = [i.strip('\n').strip('\r') for i in read.splitlines(True) if i.strip('\r\n')]
i = write.index([i for i in write if '"%s"' % url in i][0])
if i+1 == len(write): return
write[i], write[i+1] = write[i+1], write[i]
write = '\r\n'.join(write)
file = xbmcvfs.File(data, 'w')
file.write(str(write))
file.close()
index().infoDialog(language(30306).encode("utf-8"), name)
except:
return
def subscription_add(self, name, url, image, imdb, year, update=True, silent=False):
try:
file = xbmcvfs.File(subData)
read = file.read()
file.close()
lib = self.library(name, url, imdb, year, check=True, silent=True)
if (silent == False and lib == False):
yes = index().yesnoDialog(language(30348).encode("utf-8"), language(30349).encode("utf-8"), name)
if yes:
self.library(name, url, imdb, year, silent=True)
else:
return
elif lib == False:
return
write = [i.strip('\n').strip('\r') for i in read.splitlines(True) if i.strip('\r\n')]
write.append('"%s"|"%s"|"%s"|"%s"|"%s"' % (name, year, imdb, url, image))
write = '\r\n'.join(write)
file = xbmcvfs.File(subData, 'w')
file.write(str(write))
file.close()
if silent == False:
index().container_refresh()
index().infoDialog(language(30312).encode("utf-8"), name)
if update == True:
xbmc.executebuiltin('UpdateLibrary(video)')
except:
return
def subscription_from_search(self, name, url, image, imdb, year, update=True, silent=False):
try:
file = xbmcvfs.File(subData)
read = file.read()
file.close()
if '"%s"' % url in read:
index().infoDialog(language(30316).encode("utf-8"), name)
return
lib = self.library(name, url, imdb, year, check=True, silent=True)
if (silent == False and lib == False):
yes = index().yesnoDialog(language(30348).encode("utf-8"), language(30349).encode("utf-8"), name)
if yes:
self.library(name, url, imdb, year, silent=True)
else:
return
elif lib == False:
return
write = [i.strip('\n').strip('\r') for i in read.splitlines(True) if i.strip('\r\n')]
write.append('"%s"|"%s"|"%s"|"%s"|"%s"' % (name, year, imdb, url, image))
write = '\r\n'.join(write)
file = xbmcvfs.File(subData, 'w')
file.write(str(write))
file.close()
if silent == False:
index().container_refresh()
index().infoDialog(language(30312).encode("utf-8"), name)
if update == True:
xbmc.executebuiltin('UpdateLibrary(video)')
except:
return
def subscription_delete(self, name, url, silent=False):
try:
file = xbmcvfs.File(subData)
read = file.read()
file.close()
write = [i.strip('\n').strip('\r') for i in read.splitlines(True) if i.strip('\r\n')]
write = [i for i in write if not '"%s"' % url in i]
write = '\r\n'.join(write)
file = xbmcvfs.File(subData, 'w')
file.write(str(write))
file.close()
if silent == False:
index().container_refresh()
index().infoDialog(language(30313).encode("utf-8"), name)
except:
return
def subscriptions_update(self, silent=False):
url = link().trakt_collection % (link().trakt_key, link().trakt_user)
if getSetting("subscriptions_import") == 'true' and not (link().trakt_user == '' or link().trakt_password == ''):
self.subscriptions_batch(url, silent=silent)
else:
self.subscriptions_batch2(silent=silent)
def subscriptions_batch(self, url, update=True, silent=False):
try:
file = xbmcvfs.File(subData)
read = file.read()
file.close()
except:
return
if url == 'shows_trending':
showList = shows().trending(idx=False)
else:
showList = shows().get(url, idx=False)
if showList == None: return
for i in showList:
if xbmc.abortRequested == True: sys.exit()
show = re.sub('\s(|[(])(UK|US|AU|\d{4})(|[)])$', '', i['name'])
if '"%s"' % i['url'] in read:
self.library(show, i['url'], i['imdb'], i['year'], silent=True)
else:
try: self.subscription_add(show, i['url'], i['image'], i['imdb'], i['year'], update=False, silent=True)
except: pass
if silent == False:
index().infoDialog(language(30312).encode("utf-8"))
if update == True and getSetting("subscriptions_updatelibrary") == 'true':
xbmc.executebuiltin('UpdateLibrary(video)')
def subscriptions_batch2(self, silent=False):
try:
file = xbmcvfs.File(subData)
read = file.read()
file.close()
match = re.compile('"(.+?)"[|]"(.+?)"[|]"(.+?)"[|]"(.+?)"[|]"(.+?)"').findall(read)
for name, year, imdb, url, image in match:
if xbmc.abortRequested == True: sys.exit()
self.library(name, url, imdb, year, silent=True)
if getSetting("subscriptions_updatelibrary") == 'true':
xbmc.executebuiltin('UpdateLibrary(video)')
if silent == False:
index().infoDialog(language(30314).encode("utf-8"))
except:
return
def library(self, name, url, imdb, year, check=False, silent=False):
try:
library = xbmc.translatePath(getSetting("tv_library"))
xbmcvfs.mkdir(dataPath)
xbmcvfs.mkdir(library)
show = name
seasonList = seasons().get(url, '', year, imdb, '', '', show, idx=False)
except:
return
try:
if check == False: raise Exception()
year, tvdb = seasonList[0]['year'], seasonList[0]['tvdb']
data = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShows", "params": {"filter":{"or": [{"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}]}, "properties" : ["imdbnumber"]}, "id": 1}' % (year, str(int(year)+1), str(int(year)-1)))
data = unicode(data, 'utf-8', errors='ignore')
data = json.loads(data)
data = data['result']['tvshows']
data = [i for i in data if tvdb in i['imdbnumber']][0]
return False
except:
pass
try:
for i in seasonList:
season, seasonUrl, tvdb, show_alt, idx_data = i['name'], i['url'], i['tvdb'], i['show_alt'], i['idx_data']
enc_show = show_alt.translate(None, '\/:*?"<>|')
folder = os.path.join(library, enc_show)
xbmcvfs.mkdir(folder)
enc_season = season.translate(None, '\/:*?"<>|')
seasonDir = os.path.join(folder, enc_season)
xbmcvfs.mkdir(seasonDir)
episodeList = episodes().get(season, seasonUrl, '', year, imdb, tvdb, '', '', show, show_alt, idx_data, idx=False)
for i in episodeList:
name, title, imdb, tvdb, year, season, episode, show, show_alt, date = i['name'], i['title'], i['imdb'], i['tvdb'], i['year'], i['season'], i['episode'], i['show'], i['show_alt'], i['date']
sysname, systitle, sysimdb, systvdb, sysyear, sysseason, sysepisode, sysshow, sysshow_alt, sysdate = urllib.quote_plus(name), urllib.quote_plus(title), urllib.quote_plus(imdb), urllib.quote_plus(tvdb), urllib.quote_plus(year), urllib.quote_plus(season), urllib.quote_plus(episode), urllib.quote_plus(show), urllib.quote_plus(show_alt), urllib.quote_plus(date)
content = '%s?action=play&name=%s&title=%s&imdb=%s&tvdb=%s&year=%s&season=%s&episode=%s&show=%s&show_alt=%s&date=%s' % (sys.argv[0], sysname, systitle, sysimdb, systvdb, sysyear, sysseason, sysepisode, sysshow, sysshow_alt, sysdate)
enc_name = name.translate(None, '\/:*?"<>|')
stream = os.path.join(seasonDir, enc_name + '.strm')
file = xbmcvfs.File(stream, 'w')
file.write(str(content))
file.close()
if silent == False:
index().infoDialog(language(30311).encode("utf-8"), show)
except:
return
def metadata(self, content, imdb, season, episode):
try:
if content == 'movie' or content == 'tvshow':
metaget.update_meta(content, '', imdb, year='')
index().container_refresh()
elif content == 'season':
metaget.update_episode_meta('', imdb, season, episode)
index().container_refresh()
elif content == 'episode':
metaget.update_season('', imdb, season)
index().container_refresh()
except:
return
def playcount(self, content, imdb, season, episode):
try:
metaget.change_watched(content, '', imdb, season=season, episode=episode, year='', watched='')
index().container_refresh()
except:
return
def download(self, name, title, imdb, tvdb, year, season, episode, show, show_alt):
try:
property = (addonName+name)+'download'
download = xbmc.translatePath(getSetting("downloads"))
enc_name = name.translate(None, '\/:*?"<>|')
xbmcvfs.mkdir(dataPath)
xbmcvfs.mkdir(download)
file = [i for i in xbmcvfs.listdir(download)[1] if i.startswith(enc_name + '.')]
if not file == []: file = os.path.join(download, file[0])
else: file = None
if download == '':
yes = index().yesnoDialog(language(30341).encode("utf-8"), language(30342).encode("utf-8"))
if yes: contextMenu().settings_open()
return
if file is None:
pass
elif not file.endswith('.tmp'):
yes = index().yesnoDialog(language(30343).encode("utf-8"), language(30344).encode("utf-8"), name)
if yes:
xbmcvfs.delete(file)
else:
return
elif file.endswith('.tmp'):
if index().getProperty(property) == 'open':
yes = index().yesnoDialog(language(30345).encode("utf-8"), language(30346).encode("utf-8"), name)
if yes: index().setProperty(property, 'cancel')
return
else:
xbmcvfs.delete(file)
url = resolver().run(name, title, imdb, tvdb, year, season, episode, show, show_alt, 'download://')
if url is None: return
url = url.rsplit('|', 1)[0]
ext = url.rsplit('/', 1)[-1].rsplit('?', 1)[0].rsplit('|', 1)[0].strip().lower()
ext = os.path.splitext(ext)[1][1:]
if ext == '': ext = 'mp4'
stream = os.path.join(download, enc_name + '.' + ext)
temp = stream + '.tmp'
count = 0
CHUNK = 16 * 1024
request = urllib2.Request(url)
request.add_header('User-Agent', 'Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_0 like Mac OS X; en-us) AppleWebKit/532.9 (KHTML, like Gecko) Version/4.0.5 Mobile/8A293 Safari/6531.22.7')
request.add_header('Cookie', 'video=true')
response = urllib2.urlopen(request, timeout=10)
size = response.info()["Content-Length"]
file = xbmcvfs.File(temp, 'w')
index().setProperty(property, 'open')
index().infoDialog(language(30308).encode("utf-8"), name)
while True:
chunk = response.read(CHUNK)
if not chunk: break
if index().getProperty(property) == 'cancel': raise Exception()
if xbmc.abortRequested == True: raise Exception()
part = xbmcvfs.File(temp)
quota = int(100 * float(part.size())/float(size))
part.close()
if not count == quota and count in [0,10,20,30,40,50,60,70,80,90]:
index().infoDialog(language(30309).encode("utf-8") + str(count) + '%', name)
file.write(chunk)
count = quota
response.close()
file.close()
index().clearProperty(property)
xbmcvfs.rename(temp, stream)
index().infoDialog(language(30310).encode("utf-8"), name)
except:
file.close()
index().clearProperty(property)
xbmcvfs.delete(temp)
sys.exit()
return
def sources(self, name, title, imdb, tvdb, year, season, episode, show, show_alt):
meta = {'title': xbmc.getInfoLabel('ListItem.title'), 'tvshowtitle': xbmc.getInfoLabel('ListItem.tvshowtitle'), 'season': xbmc.getInfoLabel('ListItem.season'), 'episode': xbmc.getInfoLabel('ListItem.episode'), 'writer': xbmc.getInfoLabel('ListItem.writer'), 'director': xbmc.getInfoLabel('ListItem.director'), 'rating': xbmc.getInfoLabel('ListItem.rating'), 'duration': xbmc.getInfoLabel('ListItem.duration'), 'premiered': xbmc.getInfoLabel('ListItem.premiered'), 'plot': xbmc.getInfoLabel('ListItem.plot')}
label, poster, fanart = xbmc.getInfoLabel('ListItem.label'), xbmc.getInfoLabel('ListItem.icon'), xbmc.getInfoLabel('ListItem.Property(Fanart_Image)')
sysname, systitle, sysimdb, systvdb, sysyear, sysseason, sysepisode, sysshow, sysshow_alt = urllib.quote_plus(name), urllib.quote_plus(title), urllib.quote_plus(imdb), urllib.quote_plus(tvdb), urllib.quote_plus(year), urllib.quote_plus(season), urllib.quote_plus(episode), urllib.quote_plus(show), urllib.quote_plus(show_alt)
u = '%s?action=play&name=%s&title=%s&imdb=%s&tvdb=%s&year=%s&season=%s&episode=%s&show=%s&show_alt=%s&url=sources://' % (sys.argv[0], sysname, systitle, sysimdb, systvdb, sysyear, sysseason, sysepisode, sysshow, sysshow_alt)
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.clear()
item = xbmcgui.ListItem(label, iconImage="DefaultVideo.png", thumbnailImage=poster)
item.setInfo( type="Video", infoLabels= meta )
item.setProperty("IsPlayable", "true")
item.setProperty("Video", "true")
item.setProperty("Fanart_Image", fanart)
xbmc.Player().play(u, item)
def autoplay(self, name, title, imdb, tvdb, year, season, episode, show, show_alt):
meta = {'title': xbmc.getInfoLabel('ListItem.title'), 'tvshowtitle': xbmc.getInfoLabel('ListItem.tvshowtitle'), 'season': xbmc.getInfoLabel('ListItem.season'), 'episode': xbmc.getInfoLabel('ListItem.episode'), 'writer': xbmc.getInfoLabel('ListItem.writer'), 'director': xbmc.getInfoLabel('ListItem.director'), 'rating': xbmc.getInfoLabel('ListItem.rating'), 'duration': xbmc.getInfoLabel('ListItem.duration'), 'premiered': xbmc.getInfoLabel('ListItem.premiered'), 'plot': xbmc.getInfoLabel('ListItem.plot')}
label, poster, fanart = xbmc.getInfoLabel('ListItem.label'), xbmc.getInfoLabel('ListItem.icon'), xbmc.getInfoLabel('ListItem.Property(Fanart_Image)')
sysname, systitle, sysimdb, systvdb, sysyear, sysseason, sysepisode, sysshow, sysshow_alt = urllib.quote_plus(name), urllib.quote_plus(title), urllib.quote_plus(imdb), urllib.quote_plus(tvdb), urllib.quote_plus(year), urllib.quote_plus(season), urllib.quote_plus(episode), urllib.quote_plus(show), urllib.quote_plus(show_alt)
u = '%s?action=play&name=%s&title=%s&imdb=%s&tvdb=%s&year=%s&season=%s&episode=%s&show=%s&show_alt=%s&url=play://' % (sys.argv[0], sysname, systitle, sysimdb, systvdb, sysyear, sysseason, sysepisode, sysshow, sysshow_alt)
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.clear()
item = xbmcgui.ListItem(label, iconImage="DefaultVideo.png", thumbnailImage=poster)
item.setInfo( type="Video", infoLabels= meta )
item.setProperty("IsPlayable", "true")
item.setProperty("Video", "true")
item.setProperty("Fanart_Image", fanart)
xbmc.Player().play(u, item)
class subscriptions:
def __init__(self):
self.list = []
def shows(self):
file = xbmcvfs.File(subData)
read = file.read()
file.close()
match = re.compile('"(.+?)"[|]"(.+?)"[|]"(.+?)"[|]"(.+?)"[|]"(.+?)"').findall(read)
for name, year, imdb, url, image in match:
self.list.append({'name': name, 'url': url, 'image': image, 'year': year, 'imdb': imdb, 'genre': '', 'plot': ''})
self.list = sorted(self.list, key=itemgetter('name'))
index().showList(self.list)
def episodes(self):
try:
file = xbmcvfs.File(subData)
read = file.read()
file.close()
if read == '':
index().okDialog(language(30323).encode("utf-8"), language(30324).encode("utf-8"))
if not getSetting("subscriptions_update") == 'true':
index().okDialog(language(30325).encode("utf-8"), language(30326).encode("utf-8"))
imdbDict, seasons, episodes = {}, [], []
library = xbmc.translatePath(getSetting("tv_library"))
match = re.compile('"(.+?)"[|]"(.+?)"[|]"(.+?)"[|]"(.+?)"[|]"(.+?)"').findall(read)
for name, year, imdb, url, image in match: imdbDict.update({imdb:image})
shows = [os.path.join(library, i) for i in xbmcvfs.listdir(library)[0]]
for show in shows: seasons += [os.path.join(show, i) for i in xbmcvfs.listdir(show)[0]]
for season in seasons: episodes += [os.path.join(season, i) for i in xbmcvfs.listdir(season)[1] if i.endswith('.strm')]
except:
pass
for episode in episodes:
try:
file = xbmcvfs.File(episode)
read = file.read()
read = read.encode("utf-8")
file.close()
if not read.startswith(sys.argv[0]): raise Exception()
params = {}
query = read[read.find('?') + 1:].split('&')
for i in query: params[i.split('=')[0]] = i.split('=')[1]
name, title, imdb, tvdb, year, season, episode, show, show_alt, date = urllib.unquote_plus(params["name"]), urllib.unquote_plus(params["title"]), urllib.unquote_plus(params["imdb"]), urllib.unquote_plus(params["tvdb"]), urllib.unquote_plus(params["year"]), urllib.unquote_plus(params["season"]), urllib.unquote_plus(params["episode"]), urllib.unquote_plus(params["show"]), urllib.unquote_plus(params["show_alt"]), urllib.unquote_plus(params["date"])
image = imdbDict[imdb]
sort = date.replace('-','')
self.list.append({'name': name, 'url': name, 'image': image, 'date': date, 'year': year, 'imdb': imdb, 'tvdb': tvdb, 'genre': '', 'plot': '', 'title': title, 'show': show, 'show_alt': show_alt, 'season': season, 'episode': episode, 'sort': sort})
except:
pass
self.list = sorted(self.list, key=itemgetter('sort'))
self.list = self.list[::-1][:100]
index().episodeList(self.list)
class favourites:
def __init__(self):
self.list = []
def shows(self):
file = xbmcvfs.File(favData)
read = file.read()
file.close()
match = re.compile('"(.+?)"[|]"(.+?)"[|]"(.+?)"[|]"(.+?)"[|]"(.+?)"').findall(read)
for name, year, imdb, url, image in match:
if getSetting("fav_sort") == '1':
try: status = metaget.get_meta('tvshow', name, imdb_id=imdb)['status']
except: status = ''
else:
status = ''
self.list.append({'name': name, 'url': url, 'image': image, 'year': year, 'imdb': imdb, 'genre': '', 'plot': '', 'status': status})
if getSetting("fav_sort") == '0':
self.list = sorted(self.list, key=itemgetter('name'))
elif getSetting("fav_sort") == '1':
filter = []
self.list = sorted(self.list, key=itemgetter('name'))
filter += [i for i in self.list if not i['status'] == 'Ended']
filter += [i for i in self.list if i['status'] == 'Ended']
self.list = filter
index().showList(self.list)
class root:
def get(self):
rootList = []
rootList.append({'name': 30501, 'image': 'Episodes.png', 'action': 'episodes_subscriptions'})
rootList.append({'name': 30502, 'image': 'Calendar.png', 'action': 'calendar_episodes'})
rootList.append({'name': 30503, 'image': 'Popular.png', 'action': 'shows_popular'})
rootList.append({'name': 30504, 'image': 'Rating.png', 'action': 'shows_rating'})
rootList.append({'name': 30505, 'image': 'Views.png', 'action': 'shows_views'})
rootList.append({'name': 30506, 'image': 'Active.png', 'action': 'shows_active'})
rootList.append({'name': 30507, 'image': 'Trending.png', 'action': 'shows_trending'})
rootList.append({'name': 30508, 'image': 'Genres.png', 'action': 'genres_shows'})
if not (getSetting("trakt_user") == '' or getSetting("trakt_password") == ''):
rootList.append({'name': 30509, 'image': 'Trakt.png', 'action': 'userlists_trakt'})
if not (getSetting("imdb_user") == ''):
rootList.append({'name': 30510, 'image': 'IMDb.png', 'action': 'userlists_imdb'})
rootList.append({'name': 30511, 'image': 'Favourites.png', 'action': 'shows_favourites'})
rootList.append({'name': 30512, 'image': 'Subscriptions.png', 'action': 'shows_subscriptions'})
rootList.append({'name': 30513, 'image': 'Search.png', 'action': 'root_search'})
index().rootList(rootList)
index().downloadList()
def search(self):
rootList = []
rootList.append({'name': 30521, 'image': 'TVShows.png', 'action': 'shows_search'})
rootList.append({'name': 30522, 'image': 'Actors.png', 'action': 'actors_search'})
index().rootList(rootList)
class link:
def __init__(self):
self.imdb_base = 'http://www.imdb.com'
self.imdb_akas = 'http://akas.imdb.com'
self.imdb_mobile = 'http://m.imdb.com'
self.imdb_genre = 'http://akas.imdb.com/genre/'
self.imdb_title = 'http://www.imdb.com/title/tt%s/'
self.imdb_seasons = 'http://akas.imdb.com/title/tt%s/episodes'
self.imdb_episodes = 'http://www.imdb.com/title/tt%s/episodes?season=%s'
self.imdb_image = 'http://i.media-imdb.com/images/SF1b61b592d2fa1b9cfb8336f160e1efcf/nopicture/medium/tv.png'
self.imdb_genres = 'http://akas.imdb.com/search/title?title_type=tv_series,mini_series&sort=moviemeter,asc&count=25&start=1&genres=%s'
self.imdb_popular = 'http://akas.imdb.com/search/title?title_type=tv_series,mini_series&sort=moviemeter,asc&count=25&start=1'
self.imdb_rating = 'http://akas.imdb.com/search/title?title_type=tv_series,mini_series&sort=user_rating,desc&count=25&start=1'
self.imdb_views = 'http://akas.imdb.com/search/title?title_type=tv_series,mini_series&sort=num_votes,desc&count=25&start=1'
self.imdb_active = 'http://akas.imdb.com/search/title?title_type=tv_series,mini_series&production_status=active&sort=moviemeter,asc&count=25&start=1'
self.imdb_search = 'http://akas.imdb.com/search/title?title_type=tv_series,mini_series&sort=moviemeter,asc&count=25&start=1&title=%s'
self.imdb_actors_search = 'http://www.imdb.com/search/name?count=100&name=%s'
self.imdb_actors = 'http://m.imdb.com/name/nm%s/filmotype/%s'
self.imdb_userlists = 'http://akas.imdb.com/user/%s/lists?tab=all&sort=modified:desc&filter=titles'
self.imdb_watchlist ='http://akas.imdb.com/user/%s/watchlist?view=detail&count=100&sort=listorian:asc&start=1'
self.imdb_list ='http://akas.imdb.com/list/%s/?view=detail&count=100&sort=listorian:asc&start=1'
self.imdb_user = 'ur' + getSetting("imdb_user").replace('ur', '')
self.tvdb_base = 'http://thetvdb.com'
self.tvdb_key = base64.urlsafe_b64decode('MUQ2MkYyRjkwMDMwQzQ0NA==')
self.tvdb_series = 'http://thetvdb.com/api/GetSeriesByRemoteID.php?imdbid=tt%s&language=en'
self.tvdb_series2 = 'http://thetvdb.com/api/GetSeries.php?seriesname=%s&language=en'
self.tvdb_episodes = 'http://thetvdb.com/api/%s/series/%s/all/en.xml'
self.tvdb_thumb = 'http://thetvdb.com/banners/_cache/'
self.trakt_base = 'http://api.trakt.tv'
self.trakt_key = base64.urlsafe_b64decode('YmU2NDI5MWFhZmJiYmU2MmZkYzRmM2FhMGVkYjQwNzM=')
self.trakt_summary = 'http://api.trakt.tv/show/summary.json/%s/%s'
self.trakt_trending = 'http://api.trakt.tv/shows/trending.json/%s'
self.trakt_calendar = 'http://api.trakt.tv/calendar/shows.json/%s/%s/1'
self.trakt_user, self.trakt_password = getSetting("trakt_user"), getSetting("trakt_password")
self.trakt_watchlist = 'http://api.trakt.tv/user/watchlist/shows.json/%s/%s'
self.trakt_collection = 'http://api.trakt.tv/user/library/shows/collection.json/%s/%s'
self.trakt_watched = 'http://api.trakt.tv/user/library/shows/watched.json/%s/%s'
self.trakt_rated = 'http://api.trakt.tv/user/ratings/shows.json/%s/%s/rating/extended'
self.trakt_lists = 'http://api.trakt.tv/user/lists.json/%s/%s'
self.trakt_list= 'http://api.trakt.tv/user/list.json/%s/%s'
self.tvrage_base = 'http://services.tvrage.com'
self.tvrage_info = 'http://services.tvrage.com/feeds/full_show_info.php?sid=%s'
class actors:
def __init__(self):
self.list = []
def search(self, query=None):
if query is None:
self.query = common.getUserInput(language(30362).encode("utf-8"), '')
else:
self.query = query
if not (self.query is None or self.query == ''):
self.query = link().imdb_actors_search % urllib.quote_plus(self.query)
self.list = self.imdb_list(self.query)
index().pageList(self.list)
def imdb_list(self, url):
try:
result = getUrl(url).result
result = result.decode('iso-8859-1').encode('utf-8')
actors = common.parseDOM(result, "tr", attrs = { "class": ".+? detailed" })
except:
return
for actor in actors:
try:
name = common.parseDOM(actor, "a", ret="title")[0]
name = common.replaceHTMLCodes(name)
name = name.encode('utf-8')
url = common.parseDOM(actor, "a", ret="href")[0]
url = re.findall('nm(\d*)', url, re.I)[0]
type = common.parseDOM(actor, "span", attrs = { "class": "description" })[0]
if 'Actress' in type: type = 'actress'
elif 'Actor' in type: type = 'actor'
else: raise Exception()
url = link().imdb_actors % (url, type)
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
image = common.parseDOM(actor, "img", ret="src")[0]
if not ('._SX' in image or '._SY' in image): raise Exception()
image = image.rsplit('._SX', 1)[0].rsplit('._SY', 1)[0] + '._SX500.' + image.rsplit('.', 1)[-1]
image = common.replaceHTMLCodes(image)
image = image.encode('utf-8')
self.list.append({'name': name, 'url': url, 'image': image})
except:
pass
return self.list
class genres:
def __init__(self):
self.list = []
def get(self):
#self.list = self.imdb_list()
self.list = cache3(self.imdb_list)
index().pageList(self.list)
def imdb_list(self):
try:
result = getUrl(link().imdb_genre).result
result = common.parseDOM(result, "div", attrs = { "class": "article" })
result = [i for i in result if str('"tv_genres"') in i][0]
genres = common.parseDOM(result, "td")
except:
return
for genre in genres:
try:
name = common.parseDOM(genre, "a")[0]
name = common.replaceHTMLCodes(name)
name = name.encode('utf-8')
url = common.parseDOM(genre, "a", ret="href")[0]
try: url = re.compile('genres=(.+?)&').findall(url)[0]
except: url = re.compile('/genre/(.+?)/').findall(url)[0]
url = link().imdb_genres % url
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
image = addonGenres.encode('utf-8')
self.list.append({'name': name, 'url': url, 'image': image})
except:
pass
return self.list
class calendar:
def __init__(self):
self.list = []
def get(self):
self.list = self.trakt_list()
index().pageList2(self.list)
def trakt_list(self):
now = datetime.datetime.utcnow() - datetime.timedelta(hours = 5)
today = datetime.date(now.year, now.month, now.day)
for i in range(0, 14):
date = today - datetime.timedelta(days=i)
date = str(date)
date = date.encode('utf-8')
image = addonCalendar.encode('utf-8')
self.list.append({'name': date, 'url': date, 'image': image})
return self.list
class userlists:
def __init__(self):
self.list = []
def trakt(self):
post = urllib.urlencode({'username': link().trakt_user, 'password': link().trakt_password})
info = (link().trakt_key, link().trakt_user)
image = addonLists.encode('utf-8')
self.list.append({'name': language(30531).encode("utf-8"), 'url': link().trakt_watchlist % info, 'image': image})
self.list.append({'name': language(30532).encode("utf-8"), 'url': link().trakt_collection % info, 'image': image})
self.list.append({'name': language(30533).encode("utf-8"), 'url': link().trakt_watched % info, 'image': image})
self.list.append({'name': language(30534).encode("utf-8"), 'url': link().trakt_rated % info, 'image': image})
try:
userlists = []
result = getUrl(link().trakt_lists % info, post=post).result
userlists = json.loads(result)
except:
pass
for userlist in userlists:
try:
name = userlist['name']
name = common.replaceHTMLCodes(name)
name = name.encode('utf-8')
url = userlist['slug']
url = '%s/%s' % (link().trakt_list % info, url)
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
self.list.append({'name': name, 'url': url, 'image': image})
except:
pass
index().userList(self.list)
def imdb(self):
image = addonLists.encode('utf-8')
self.list.append({'name': language(30541).encode("utf-8"), 'url': 'watchlist', 'image': image})
self.list.append({'name': language(30542).encode("utf-8"), 'url': 'watchadded', 'image': image})
self.list.append({'name': language(30543).encode("utf-8"), 'url': 'watchtitle', 'image': image})
try:
userlists = []
result = getUrl(link().imdb_userlists % link().imdb_user).result
result = result.decode('iso-8859-1').encode('utf-8')
userlists = common.parseDOM(result, "div", attrs = { "class": "list_name" })
except:
pass
for userlist in userlists:
try:
name = common.parseDOM(userlist, "a")[0]
name = common.replaceHTMLCodes(name)
name = name.encode('utf-8')
url = common.parseDOM(userlist, "a", ret="href")[0]
url = url.split('/list/', 1)[-1].replace('/', '')
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
self.list.append({'name': name, 'url': url, 'image': image})
except:
pass
index().userList(self.list)
class shows:
def __init__(self):
self.list = []
self.data = []
def get(self, url, idx=True):
if url.startswith(link().imdb_base) or url.startswith(link().imdb_akas):
#self.list = self.imdb_list(url)
self.list = cache(self.imdb_list, url)
elif url.startswith(link().imdb_mobile):
#self.list = self.imdb_list2(url)
self.list = cache(self.imdb_list2, url)
elif url.startswith(link().trakt_base):
self.list = self.trakt_list(url)
elif url == 'watchlist':
self.list = self.imdb_list3(link().imdb_watchlist % link().imdb_user)
elif url == 'watchadded':
self.list = self.imdb_list3(link().imdb_watchlist % link().imdb_user)
self.list = self.list[::-1]
elif url == 'watchtitle':
self.list = self.imdb_list3(link().imdb_watchlist % link().imdb_user)
self.list = sorted(self.list, key=itemgetter('name'))
else:
self.list = self.imdb_list3(link().imdb_list % url)
self.list = sorted(self.list, key=itemgetter('name'))
if idx == False: return self.list
index().showList(self.list)
index().nextList(self.list)
def popular(self):
#self.list = self.imdb_list(link().imdb_popular)
self.list = cache(self.imdb_list, link().imdb_popular)
index().showList(self.list)
index().nextList(self.list)
def rating(self):
#self.list = self.imdb_list(link().imdb_rating)
self.list = cache(self.imdb_list, link().imdb_rating)
index().showList(self.list)
index().nextList(self.list)
def views(self):
#self.list = self.imdb_list(link().imdb_views)
self.list = cache(self.imdb_list, link().imdb_views)
index().showList(self.list)
index().nextList(self.list)
def active(self):
#self.list = self.imdb_list(link().imdb_active)
self.list = cache(self.imdb_list, link().imdb_active)
index().showList(self.list)
index().nextList(self.list)
def trending(self, idx=True):
#self.list = self.trakt_list(link().trakt_trending % link().trakt_key)
self.list = cache2(self.trakt_list, link().trakt_trending % link().trakt_key)
if idx == False: return self.list[:100]
index().showList(self.list[:100])
def search(self, query=None):
if query is None:
self.query = common.getUserInput(language(30362).encode("utf-8"), '')
else:
self.query = query
if not (self.query is None or self.query == ''):
self.query = link().imdb_search % urllib.quote_plus(self.query)
self.list = self.imdb_list(self.query)
index().showList(self.list)
def imdb_list(self, url):
try:
result = getUrl(url.replace(link().imdb_base, link().imdb_akas)).result
result = result.decode('iso-8859-1').encode('utf-8')
shows = common.parseDOM(result, "tr", attrs = { "class": ".+?" })
except:
return
try:
next = common.parseDOM(result, "span", attrs = { "class": "pagination" })[0]
name = common.parseDOM(next, "a")[-1]
if 'laquo' in name: raise Exception()
next = common.parseDOM(next, "a", ret="href")[-1]
next = '%s%s' % (link().imdb_akas, next)
next = common.replaceHTMLCodes(next)
next = next.encode('utf-8')
except:
next = ''
for show in shows:
try:
name = common.parseDOM(show, "a")[1]
name = common.replaceHTMLCodes(name)
name = name.encode('utf-8')
year = common.parseDOM(show, "span", attrs = { "class": "year_type" })[0]
year = re.sub('[^0-9]', '', year)[:4]
year = year.encode('utf-8')
if int(year) > int((datetime.datetime.utcnow() - datetime.timedelta(hours = 5)).strftime("%Y")): raise Exception()
url = common.parseDOM(show, "a", ret="href")[0]
url = '%s%s' % (link().imdb_base, url)
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
imdb = re.sub('[^0-9]', '', url.rsplit('tt', 1)[-1])
imdb = imdb.encode('utf-8')
try:
image = common.parseDOM(show, "img", ret="src")[0]
if not ('._SX' in image or '._SY' in image): raise Exception()
image = image.rsplit('._SX', 1)[0].rsplit('._SY', 1)[0] + '._SX500.' + image.rsplit('.', 1)[-1]
except:
image = link().imdb_image
image = common.replaceHTMLCodes(image)
image = image.encode('utf-8')
try:
genre = common.parseDOM(show, "span", attrs = { "class": "genre" })
genre = common.parseDOM(genre, "a")
genre = " / ".join(genre)
genre = common.replaceHTMLCodes(genre)
genre = genre.encode('utf-8')
except:
genre = ''
try:
plot = common.parseDOM(show, "span", attrs = { "class": "outline" })[0]
plot = common.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
except:
plot = ''
self.list.append({'name': name, 'url': url, 'image': image, 'year': year, 'imdb': imdb, 'genre': genre, 'plot': plot, 'next': next})
except:
pass
return self.list
def imdb_list2(self, url):
try:
result = getUrl(url, mobile=True).result
result = result.decode('iso-8859-1').encode('utf-8')
shows = common.parseDOM(result, "div", attrs = { "class": "col-xs.+?" })
except:
return
for show in shows:
try:
name = common.parseDOM(show, "span", attrs = { "class": "h3" })[0]
name = common.replaceHTMLCodes(name)
name = name.encode('utf-8')
year = common.parseDOM(show, "div", attrs = { "class": "unbold" })[0]
if not 'series' in year.lower(): raise Exception()
year = re.sub('[^0-9]', '', year)[:4]
year = re.sub("\n|[(]|[)]|\s", "", year)
year = year.encode('utf-8')
if int(year) > int((datetime.datetime.utcnow() - datetime.timedelta(hours = 5)).strftime("%Y")): raise Exception()
url = common.parseDOM(show, "a", ret="href")[0]
url = re.findall('tt(\d*)', url, re.I)[0]
url = link().imdb_title % url
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
imdb = re.sub('[^0-9]', '', url.rsplit('tt', 1)[-1])
imdb = imdb.encode('utf-8')
try:
image = common.parseDOM(show, "img", ret="src")[0]
if not ('_SX' in image or '_SY' in image): raise Exception()
image = image.rsplit('_SX', 1)[0].rsplit('_SY', 1)[0] + '_SX500.' + image.rsplit('.', 1)[-1]
except:
image = link().imdb_image
image = common.replaceHTMLCodes(image)
image = image.encode('utf-8')
self.list.append({'name': name, 'url': url, 'image': image, 'year': year, 'imdb': imdb, 'genre': '', 'plot': ''})
except:
pass
return self.list
def imdb_list3(self, url):
try:
url = url.replace(link().imdb_base, link().imdb_akas)
result = getUrl(url).result
try:
threads = []
pages = common.parseDOM(result, "div", attrs = { "class": "pagination" })[0]
pages = re.compile('.+?\d+.+?(\d+)').findall(pages)[0]
for i in range(1, int(pages)):
self.data.append('')
showsUrl = url.replace('&start=1', '&start=%s' % str(i*100+1))
threads.append(Thread(self.thread, showsUrl, i-1))
[i.start() for i in threads]
[i.join() for i in threads]
for i in self.data: result += i
except:
pass
result = result.replace('\n','')
shows = common.parseDOM(result, "div", attrs = { "class": "list_item.+?" })
except:
return
for show in shows:
try:
name = common.parseDOM(show, "a", attrs = { "onclick": ".+?" })[-1]
name = common.replaceHTMLCodes(name)
name = name.encode('utf-8')
year = common.parseDOM(show, "span", attrs = { "class": "year_type" })[0]
if not 'series' in year.lower(): raise Exception()
year = re.compile('[(](\d{4})').findall(year)[0]
year = year.encode('utf-8')
if int(year) > int((datetime.datetime.utcnow() - datetime.timedelta(hours = 5)).strftime("%Y")): raise Exception()
url = common.parseDOM(show, "a", ret="href")[0]
url = '%s%s' % (link().imdb_base, url)
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
try:
image = common.parseDOM(show, "img", ret="src")[0]
if not ('._SX' in image or '._SY' in image): raise Exception()
image = image.rsplit('._SX', 1)[0].rsplit('._SY', 1)[0] + '._SX500.' + image.rsplit('.', 1)[-1]
except:
image = link().imdb_image
image = common.replaceHTMLCodes(image)
image = image.encode('utf-8')
imdb = re.sub('[^0-9]', '', url.rsplit('tt', 1)[-1])
imdb = imdb.encode('utf-8')
try:
plot = common.parseDOM(show, "div", attrs = { "class": "item_description" })[0]
plot = plot.rsplit('<span>', 1)[0].strip()
plot = common.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
except:
plot = ''
self.list.append({'name': name, 'url': url, 'image': image, 'year': year, 'imdb': imdb, 'genre': '', 'plot': plot})
except:
pass
return self.list
def trakt_list(self, url):
try:
post = urllib.urlencode({'username': link().trakt_user, 'password': link().trakt_password})
result = getUrl(url, post=post).result
result = json.loads(result)
shows = []
try: result = result['items']
except: pass
for i in result:
try: shows.append(i['show'])
except: pass
if shows == []:
shows = result
except:
return
for show in shows:
try:
name = show['title']
name = common.replaceHTMLCodes(name)
name = name.encode('utf-8')
year = show['year']
year = re.sub('[^0-9]', '', str(year))
year = year.encode('utf-8')
if int(year) > int((datetime.datetime.utcnow() - datetime.timedelta(hours = 5)).strftime("%Y")): raise Exception()
imdb = show['imdb_id']
imdb = re.sub('[^0-9]', '', str(imdb))
imdb = imdb.encode('utf-8')
url = link().imdb_title % imdb
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
try: image = show['images']['poster']
except: image = show['poster']
image = common.replaceHTMLCodes(image)
image = image.encode('utf-8')
try:
genre = show['genres']
genre = " / ".join(genre)
genre = common.replaceHTMLCodes(genre)
genre = genre.encode('utf-8')
except:
genre = ''
try:
plot = show['overview']
plot = common.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
except:
plot = ''
self.list.append({'name': name, 'url': url, 'image': image, 'year': year, 'imdb': imdb, 'genre': genre, 'plot': plot})
except:
pass
return self.list
def thread(self, url, i):
try:
result = getUrl(url).result
self.data[i] = result
except:
return
class seasons:
def __init__(self):
self.list = []
def get(self, url, image, year, imdb, genre, plot, show, idx=True):
if idx == True:
#self.list = self.get_list(url, image, year, imdb, genre, plot, show)
self.list = cache2(self.get_list, url, image, year, imdb, genre, plot, show)
index().seasonList(self.list)
else:
self.list = self.get_list(url, image, year, imdb, genre, plot, show)
return self.list
def get_list(self, url, image, year, imdb, genre, plot, show):
if imdb == '0': imdb = re.sub('[^0-9]', '', url.rsplit('tt', 1)[-1])
try:
try:
result = getUrl(link().tvdb_series % imdb).result
show_alt = common.parseDOM(result, "SeriesName")[0]
tvdb = common.parseDOM(result, "seriesid")[0]
except:
result = getUrl(link().tvdb_series2 % urllib.quote_plus(show)).result
result = common.parseDOM(result, "Series")
result = [i for i in result if show == common.parseDOM(i, "SeriesName")[0] and year in common.parseDOM(i, "FirstAired")[0]][0]
show_alt = common.parseDOM(result, "SeriesName")[0]
tvdb = common.parseDOM(result, "seriesid")[0]
show_alt = common.replaceHTMLCodes(show_alt)
show_alt = show_alt.encode('utf-8')
tvdb = common.replaceHTMLCodes(tvdb)
tvdb = tvdb.encode('utf-8')
except:
pass
try:
self.list = []
seasonList = self.tvrage_list(url, image, year, imdb, tvdb, genre, plot, show, show_alt)
if not (seasonList == None or seasonList == []): return seasonList
except:
pass
try:
self.list = []
seasonList = self.tvdb_list(url, image, year, imdb, tvdb, genre, plot, show, show_alt)
if not (seasonList == None or seasonList == []): return seasonList
except:
pass
try:
self.list = []
seasonList = self.imdb_list(url, image, year, imdb, '0', genre, plot, show, show)
if not (seasonList == None or seasonList == []): return seasonList
except:
pass
def tvrage_list(self, url, image, year, imdb, tvdb, genre, plot, show, show_alt):
try:
traktUrl = link().trakt_summary % (link().trakt_key, tvdb)
result = getUrl(traktUrl).result
result = json.loads(result)
tvrage = result['tvrage_id']
tvrageUrl = link().tvrage_info % tvrage
result = getUrl(tvrageUrl).result
seasons = common.parseDOM(result, "Season", ret="no")
seasons = [i for i in seasons if not i == '0']
except:
return
for season in seasons:
try:
date = common.parseDOM(result, "Season", attrs = { "no": season })[0]
date = common.parseDOM(date, "airdate")[0]
date = date.encode('utf-8')
if date == '' or '-00' in date: raise Exception()
if int(re.sub('[^0-9]', '', str(date)) + '0000') + 10500 > int((datetime.datetime.utcnow() - datetime.timedelta(hours = 5)).strftime("%Y%m%d%H%M")): raise Exception()
num = '%01d' % int(season)
num = num.encode('utf-8')
name = '%s %s' % ('Season', num)
name = name.encode('utf-8')
self.list.append({'name': name, 'url': tvrageUrl, 'image': image, 'year': year, 'imdb': imdb, 'tvdb': tvdb, 'genre': genre, 'plot': plot, 'show': show, 'show_alt': show_alt, 'season': num, 'sort': '%10d' % int(num), 'idx_data': result})
except:
pass
self.list = sorted(self.list, key=itemgetter('sort'))
return self.list
def tvdb_list(self, url, image, year, imdb, tvdb, genre, plot, show, show_alt):
try:
tvdbUrl = link().tvdb_episodes % (link().tvdb_key, tvdb)
result = getUrl(tvdbUrl).result
seasons = common.parseDOM(result, "Episode")
seasons = [i for i in seasons if common.parseDOM(i, "EpisodeNumber")[0] == '1']
seasons = [i for i in seasons if not common.parseDOM(i, "SeasonNumber")[0] == '0']
except:
return
for season in seasons:
try:
date = common.parseDOM(season, "FirstAired")[0]
date = common.replaceHTMLCodes(date)
date = date.encode('utf-8')
if date == '' or '-00' in date: raise Exception()
if int(re.sub('[^0-9]', '', str(date)) + '0000') + 10500 > int((datetime.datetime.utcnow() - datetime.timedelta(hours = 5)).strftime("%Y%m%d%H%M")): raise Exception()
num = common.parseDOM(season, "SeasonNumber")[0]
num = '%01d' % int(num)
num = num.encode('utf-8')
name = '%s %s' % ('Season', num)
name = name.encode('utf-8')
self.list.append({'name': name, 'url': link().tvdb_base, 'image': image, 'year': year, 'imdb': imdb, 'tvdb': tvdb, 'genre': genre, 'plot': plot, 'show': show, 'show_alt': show_alt, 'season': num, 'sort': '%10d' % int(num), 'idx_data': result})
except:
pass
self.list = sorted(self.list, key=itemgetter('sort'))
return self.list
def imdb_list(self, url, image, year, imdb, tvdb, genre, plot, show, show_alt):
try:
imdbUrl = link().imdb_seasons % imdb
result = getUrl(imdbUrl).result
result = result.decode('iso-8859-1').encode('utf-8')
seasons = common.parseDOM(result, "select", attrs = { "id": "bySeason" })[0]
seasons = common.parseDOM(seasons, "option", ret="value")
seasons = [i for i in seasons if not i == '0']
seasons = [i for i in seasons if i.isdigit()]
except:
return
for season in seasons:
try:
num = '%01d' % int(season)
num = num.encode('utf-8')
name = '%s %s' % ('Season', num)
name = name.encode('utf-8')
url = link().imdb_episodes % (imdb, season)
url = url.encode('utf-8')
if season == seasons[-1]:
result = getUrl(url).result
dateDict = {'Jan.':'01', 'Feb.':'02', 'Mar.':'03', 'Apr.':'04', 'May':'05', 'Jun.':'06', 'Jul.':'07', 'Aug.':'08', 'Sep.':'09', 'Oct.':'10', 'Nov.':'11', 'Dec.':'12'}
date = common.parseDOM(result, "div", attrs = { "class": "airdate" })[0]
for i in dateDict: date = date.replace(i, dateDict[i])
date = re.findall('(\d+) (\d+) (\d+)', date, re.I)[0]
date = '%04d-%02d-%02d' % (int(date[2]), int(date[1]), int(date[0]))
if int(re.sub('[^0-9]', '', str(date)) + '0000') + 10500 > int((datetime.datetime.utcnow() - datetime.timedelta(hours = 5)).strftime("%Y%m%d%H%M")): raise Exception()
self.list.append({'name': name, 'url': url, 'image': image, 'year': year, 'imdb': imdb, 'tvdb': tvdb, 'genre': genre, 'plot': plot, 'show': show, 'show_alt': show_alt, 'season': num, 'sort': '%10d' % int(num), 'idx_data': ''})
except:
pass
self.list = sorted(self.list, key=itemgetter('sort'))
return self.list
class episodes:
def __init__(self):
self.list = []
def get(self, name, url, image, year, imdb, tvdb, genre, plot, show, show_alt, idx_data='', idx=True):
if idx == True:
#self.list = self.get_list(name, url, image, year, imdb, tvdb, genre, plot, show, show_alt, idx_data)
self.list = cache(self.get_list, name, url, image, year, imdb, tvdb, genre, plot, show, show_alt, idx_data)
index().episodeList(self.list)
else:
self.list = self.get_list(name, url, image, year, imdb, tvdb, genre, plot, show, show_alt, idx_data)
return self.list
def calendar(self, url):
#self.list = self.trakt_list(url)
self.list = cache2(self.trakt_list, url)
index().episodeList(self.list)
def get_list(self, name, url, image, year, imdb, tvdb, genre, plot, show, show_alt, idx_data):
if url.startswith(link().tvrage_base):
episodeList = self.tvrage_list(name, url, image, year, imdb, tvdb, genre, plot, show, show_alt, idx_data)
elif url == link().tvdb_base:
episodeList = self.tvdb_list(name, url, image, year, imdb, tvdb, genre, plot, show, show_alt, idx_data)
else:
episodeList = self.imdb_list(name, url, image, year, imdb, tvdb, genre, plot, show, show_alt, idx_data)
return episodeList
def tvrage_list(self, name, url, image, year, imdb, tvdb, genre, plot, show, show_alt, idx_data):
try:
season = re.sub('[^0-9]', '', name)
season = season.encode('utf-8')
if not idx_data == '': result = idx_data
else: result = getUrl(url).result
episodes = common.parseDOM(result, "Season", attrs = { "no": season })[0]
episodes = common.parseDOM(episodes, "episode")
episodes = [i for i in episodes if not common.parseDOM(i, "seasonnum")[0] == '00']
episodes = [i for i in episodes if not common.parseDOM(i, "seasonnum")[0] == '0']
except:
return
for episode in episodes:
try:
date = common.parseDOM(episode, "airdate")[0]
date = common.replaceHTMLCodes(date)
date = date.encode('utf-8')
if date == '' or '-00' in date: raise Exception()
if int(re.sub('[^0-9]', '', str(date)) + '0000') + 10500 > int((datetime.datetime.utcnow() - datetime.timedelta(hours = 5)).strftime("%Y%m%d%H%M")): raise Exception()
title = common.parseDOM(episode, "title")[0]
title = common.replaceHTMLCodes(title)
title = title.encode('utf-8')
num = common.parseDOM(episode, "seasonnum")[0]
num = re.sub('[^0-9]', '', '%01d' % int(num))
num = num.encode('utf-8')
name = show_alt + ' S' + '%02d' % int(season) + 'E' + '%02d' % int(num)
name = common.replaceHTMLCodes(name)
name = name.encode('utf-8')
try: thumb = common.parseDOM(episode, "screencap")[0]
except: thumb = image
thumb = common.replaceHTMLCodes(thumb)
thumb = thumb.encode('utf-8')
self.list.append({'name': name, 'url': name, 'image': thumb, 'date': date, 'year': year, 'imdb': imdb, 'tvdb': tvdb, 'genre': genre, 'plot': plot, 'title': title, 'show': show, 'show_alt': show_alt, 'season': season, 'episode': num, 'sort': '%10d' % int(num)})
except:
pass
self.list = sorted(self.list, key=itemgetter('sort'))
return self.list
def tvdb_list(self, name, url, image, year, imdb, tvdb, genre, plot, show, show_alt, idx_data):
try:
season = re.sub('[^0-9]', '', name)
season = season.encode('utf-8')
tvdbUrl = link().tvdb_episodes % (link().tvdb_key, tvdb)
if not idx_data == '': result = idx_data
else: result = getUrl(tvdbUrl).result
episodes = common.parseDOM(result, "Episode")
episodes = [i for i in episodes if '%01d' % int(common.parseDOM(i, "SeasonNumber")[0]) == season]
episodes = [i for i in episodes if not common.parseDOM(i, "EpisodeNumber")[0] == '0']
except:
return
for episode in episodes:
try:
date = common.parseDOM(episode, "FirstAired")[0]
date = common.replaceHTMLCodes(date)
date = date.encode('utf-8')
if date == '' or '-00' in date: raise Exception()
if int(re.sub('[^0-9]', '', str(date)) + '0000') + 10500 > int((datetime.datetime.utcnow() - datetime.timedelta(hours = 5)).strftime("%Y%m%d%H%M")): raise Exception()
title = common.parseDOM(episode, "EpisodeName")[0]
title = common.replaceHTMLCodes(title)
title = title.encode('utf-8')
num = common.parseDOM(episode, "EpisodeNumber")[0]
num = re.sub('[^0-9]', '', '%01d' % int(num))
num = num.encode('utf-8')
name = show_alt + ' S' + '%02d' % int(season) + 'E' + '%02d' % int(num)
name = common.replaceHTMLCodes(name)
name = name.encode('utf-8')
thumb = common.parseDOM(episode, "filename")[0]
if not thumb == '': thumb = link().tvdb_thumb + thumb
else: thumb = image
thumb = common.replaceHTMLCodes(thumb)
thumb = thumb.encode('utf-8')
try: desc = common.parseDOM(episode, "Overview")[0]
except: desc = plot
desc = common.replaceHTMLCodes(desc)
desc = desc.encode('utf-8')
self.list.append({'name': name, 'url': name, 'image': thumb, 'date': date, 'year': year, 'imdb': imdb, 'tvdb': tvdb, 'genre': genre, 'plot': desc, 'title': title, 'show': show, 'show_alt': show_alt, 'season': season, 'episode': num, 'sort': '%10d' % int(num)})
except:
pass
self.list = sorted(self.list, key=itemgetter('sort'))
return self.list
def imdb_list(self, name, url, image, year, imdb, tvdb, genre, plot, show, show_alt, idx_data):
try:
season = re.sub('[^0-9]', '', name)
season = season.encode('utf-8')
result = getUrl(url.replace(link().imdb_base, link().imdb_akas)).result
result = result.decode('iso-8859-1').encode('utf-8')
episodes = common.parseDOM(result, "div", attrs = { "class": "list_item.+?" })
episodes = [i for i in episodes if not common.parseDOM(i, "meta", ret="content", attrs = { "itemprop": "episodeNumber" })[0] == '0']
except:
return
for episode in episodes:
try:
dateDict = {'Jan.':'01', 'Feb.':'02', 'Mar.':'03', 'Apr.':'04', 'May':'05', 'Jun.':'06', 'Jul.':'07', 'Aug.':'08', 'Sep.':'09', 'Oct.':'10', 'Nov.':'11', 'Dec.':'12'}
date = common.parseDOM(episode, "div", attrs = { "class": "airdate" })[0]
for i in dateDict: date = date.replace(i, dateDict[i])
date = re.findall('(\d+) (\d+) (\d+)', date, re.I)[0]
date = '%04d-%02d-%02d' % (int(date[2]), int(date[1]), int(date[0]))
if int(re.sub('[^0-9]', '', str(date)) + '0000') + 10500 > int((datetime.datetime.utcnow() - datetime.timedelta(hours = 5)).strftime("%Y%m%d%H%M")): raise Exception()
title = common.parseDOM(episode, "a", ret="title")[0]
title = common.replaceHTMLCodes(title)
title = title.encode('utf-8')
num = common.parseDOM(episode, "meta", ret="content", attrs = { "itemprop": "episodeNumber" })[0]
num = re.sub('[^0-9]', '', '%01d' % int(num))
num = num.encode('utf-8')
name = show_alt + ' S' + '%02d' % int(season) + 'E' + '%02d' % int(num)
name = common.replaceHTMLCodes(name)
name = name.encode('utf-8')
try:
thumb = common.parseDOM(episode, "img", ret="src")[0]
if not ('_SX' in thumb or '_SY' in thumb): raise Exception()
thumb = re.sub('_CR.+?_', '_', re.sub('_SY.+?_', '_', re.sub('_SX.+?_', '_', thumb)))
except:
thumb = image
thumb = common.replaceHTMLCodes(thumb)
thumb = thumb.encode('utf-8')
try:
desc = common.parseDOM(episode, "div", attrs = { "itemprop": "description" })[0]
if 'update=tt' in desc: raise Exception()
except:
desc = plot
desc = common.replaceHTMLCodes(desc)
desc = desc.encode('utf-8')
self.list.append({'name': name, 'url': name, 'image': thumb, 'date': date, 'year': year, 'imdb': imdb, 'tvdb': tvdb, 'genre': genre, 'plot': desc, 'title': title, 'show': show, 'show_alt': show_alt, 'season': season, 'episode': num, 'sort': '%10d' % int(num)})
except:
pass
self.list = sorted(self.list, key=itemgetter('sort'))
return self.list
def trakt_list(self, date):
try:
traktUrl = link().trakt_calendar % (link().trakt_key, re.sub('[^0-9]', '', str(date)))
result = getUrl(traktUrl).result
result = json.loads(result)[0]
episodes = result['episodes']
except:
return
for episode in episodes:
try:
title = episode['episode']['title']
title = common.replaceHTMLCodes(title)
title = title.encode('utf-8')
season = episode['episode']['season']
season = re.sub('[^0-9]', '', '%01d' % int(season))
season = season.encode('utf-8')
num = episode['episode']['number']
num = re.sub('[^0-9]', '', '%01d' % int(num))
num = num.encode('utf-8')
show = episode['show']['title']
show = common.replaceHTMLCodes(show)
show = show.encode('utf-8')
name = show + ' S' + '%02d' % int(season) + 'E' + '%02d' % int(num)
name = common.replaceHTMLCodes(name)
name = name.encode('utf-8')
year = episode['show']['year']
year = re.sub('[^0-9]', '', str(year))
year = year.encode('utf-8')
imdb = episode['show']['imdb_id']
imdb = re.sub('[^0-9]', '', str(imdb))
if imdb == '': raise Exception()
imdb = imdb.encode('utf-8')
tvdb = episode['show']['tvdb_id']
tvdb = re.sub('[^0-9]', '', str(tvdb))
if tvdb == '': tvdb = '0'
tvdb = tvdb.encode('utf-8')
thumb = episode['episode']['images']['screen']
if thumb == '': thumb = episode['show']['images']['poster']
thumb = common.replaceHTMLCodes(thumb)
thumb = thumb.encode('utf-8')
try:
genre = episode['show']['genres']
genre = " / ".join(genre)
genre = common.replaceHTMLCodes(genre)
genre = genre.encode('utf-8')
except:
genre = ''
try:
desc = episode['episode']['overview']
if desc == '': desc = episode['show']['overview']
desc = common.replaceHTMLCodes(desc)
desc = desc.encode('utf-8')
except:
desc = ''
self.list.append({'name': name, 'url': name, 'image': thumb, 'date': date, 'year': year, 'imdb': imdb, 'tvdb': tvdb, 'genre': genre, 'plot': desc, 'title': title, 'show': show, 'show_alt': show, 'season': season, 'episode': num})
except:
pass
self.list = sorted(self.list, key=itemgetter('name'))
return self.list
class resolver:
def __init__(self):
self.sources_dict()
self.sources = []
def run(self, name, title, imdb, tvdb, year, season, episode, show, show_alt, url):
try:
self.sources = self.sources_get(name, title, imdb, tvdb, year, season, episode, show, show_alt, self.hostDict)
self.sources = self.sources_filter()
if self.sources == []: raise Exception()
autoplay = getSetting("autoplay")
if index().getProperty('PseudoTVRunning') == 'True':
autoplay = 'true'
elif not xbmc.getInfoLabel('Container.FolderPath').startswith(sys.argv[0]):
autoplay = getSetting("autoplay_library")
if url == 'play://':
url = self.sources_direct()
elif url == 'sources://' or url == 'download://' or not autoplay == 'true':
url = self.sources_dialog()
else:
url = self.sources_direct()
if url is None: raise Exception()
if url == 'download://': return url
if url == 'close://': return
if getSetting("playback_info") == 'true':
index().infoDialog(self.selectedSource, header=name)
player().run(name, url, imdb)
return url
except:
if not index().getProperty('PseudoTVRunning') == 'True':
index().infoDialog(language(30318).encode("utf-8"))
return
def sources_get(self, name, title, imdb, tvdb, year, season, episode, show, show_alt, hostDict):
threads = []
global icefilms_sources
icefilms_sources = []
if getSetting("icefilms") == 'true':
threads.append(Thread(icefilms().get, name, title, imdb, tvdb, year, season, episode, show, show_alt, hostDict))
global primewire_sources
primewire_sources = []
if getSetting("primewire") == 'true':
threads.append(Thread(primewire().get, name, title, imdb, tvdb, year, season, episode, show, show_alt, hostDict))
global watchseries_sources
watchseries_sources = []
threads.append(Thread(watchseries().get, name, title, imdb, tvdb, year, season, episode, show, show_alt, hostDict))
global tvonline_sources
tvonline_sources = []
if getSetting("tvonline") == 'true':
threads.append(Thread(tvonline().get, name, title, imdb, tvdb, year, season, episode, show, show_alt, hostDict))
global ororotv_sources
ororotv_sources = []
if getSetting("ororotv") == 'true':
threads.append(Thread(ororotv().get, name, title, imdb, tvdb, year, season, episode, show, show_alt, hostDict))
global putlockertv_sources
putlockertv_sources = []
if getSetting("putlockertv") == 'true':
threads.append(Thread(putlockertv().get, name, title, imdb, tvdb, year, season, episode, show, show_alt, hostDict))
global clickplay_sources
clickplay_sources = []
if getSetting("clickplay") == 'true':
threads.append(Thread(clickplay().get, name, title, imdb, tvdb, year, season, episode, show, show_alt, hostDict))
global vkbox_sources
vkbox_sources = []
if getSetting("vkbox") == 'true':
threads.append(Thread(vkbox().get, name, title, imdb, tvdb, year, season, episode, show, show_alt, hostDict))
global istreamhd_sources
istreamhd_sources = []
if getSetting("istreamhd") == 'true':
threads.append(Thread(istreamhd().get, name, title, imdb, tvdb, year, season, episode, show, show_alt, hostDict))
global simplymovies_sources
simplymovies_sources = []
if getSetting("simplymovies") == 'true':
threads.append(Thread(simplymovies().get, name, title, imdb, tvdb, year, season, episode, show, show_alt, hostDict))
global moviestorm_sources
moviestorm_sources = []
if getSetting("moviestorm") == 'true':
threads.append(Thread(moviestorm().get, name, title, imdb, tvdb, year, season, episode, show, show_alt, hostDict))
global noobroom_sources
noobroom_sources = []
if getSetting("noobroom") == 'true':
threads.append(Thread(noobroom().get, name, title, imdb, tvdb, year, season, episode, show, show_alt, hostDict))
[i.start() for i in threads]
[i.join() for i in threads]
self.sources = icefilms_sources + primewire_sources + watchseries_sources + tvonline_sources + ororotv_sources + putlockertv_sources + vkbox_sources + clickplay_sources + istreamhd_sources + simplymovies_sources + moviestorm_sources + noobroom_sources
return self.sources
def sources_resolve(self, url, provider):
try:
if provider == 'Icefilms': url = icefilms().resolve(url)
elif provider == 'Primewire': url = primewire().resolve(url)
elif provider == 'Watchseries': url = watchseries().resolve(url)
elif provider == 'TVonline': url = tvonline().resolve(url)
elif provider == 'OroroTV': url = ororotv().resolve(url)
elif provider == 'PutlockerTV': url = putlockertv().resolve(url)
elif provider == 'Clickplay': url = clickplay().resolve(url)
elif provider == 'VKBox': url = vkbox().resolve(url)
elif provider == 'iStreamHD': url = istreamhd().resolve(url)
elif provider == 'Simplymovies': url = simplymovies().resolve(url)
elif provider == 'Moviestorm': url = moviestorm().resolve(url)
elif provider == 'Noobroom': url = noobroom().resolve(url)
return url
except:
return
def sources_filter(self):
#hd_rank = ['VK', 'Firedrive', 'Movreel', 'Billionuploads', '180upload', 'Hugefiles', 'Noobroom']
#sd_rank = ['TVonline', 'OroroTV', 'VK', 'Firedrive', 'Putlocker', 'Sockshare', 'Mailru', 'iShared', 'Movreel', 'Played', 'Promptfile', 'Mightyupload', 'Gorillavid', 'Divxstage', 'Noobroom']
hd_rank = [getSetting("hosthd1"), getSetting("hosthd2"), getSetting("hosthd3"), getSetting("hosthd4"), getSetting("hosthd5"), getSetting("hosthd6"), getSetting("hosthd7")]
sd_rank = [getSetting("host1"), getSetting("host2"), getSetting("host3"), getSetting("host4"), getSetting("host5"), getSetting("host6"), getSetting("host7"), getSetting("host8"), getSetting("host9"), getSetting("host10"), getSetting("host11"), getSetting("host12"), getSetting("host13"), getSetting("host14"), getSetting("host15")]
for i in range(len(self.sources)): self.sources[i]['source'] = self.sources[i]['source'].lower()
self.sources = sorted(self.sources, key=itemgetter('source'))
filter = []
for host in hd_rank: filter += [i for i in self.sources if i['quality'] == 'HD' and i['source'].lower() == host.lower()]
for host in sd_rank: filter += [i for i in self.sources if not i['quality'] == 'HD' and i['source'].lower() == host.lower()]
filter += [i for i in self.sources if not i['quality'] == 'HD' and not any(x == i['source'].lower() for x in [r.lower() for r in sd_rank])]
self.sources = filter
filter = []
filter += [i for i in self.sources if i['quality'] == 'HD']
filter += [i for i in self.sources if i['quality'] == 'SD']
self.sources = filter
if not getSetting("quality") == 'true':
self.sources = [i for i in self.sources if not i['quality'] == 'HD']
count = 1
for i in range(len(self.sources)):
self.sources[i]['source'] = '#'+ str(count) + ' | ' + self.sources[i]['provider'].upper() + ' | ' + self.sources[i]['source'].upper() + ' | ' + self.sources[i]['quality']
count = count + 1
return self.sources
def sources_dialog(self):
try:
sourceList, urlList, providerList = [], [], []
for i in self.sources:
sourceList.append(i['source'])
urlList.append(i['url'])
providerList.append(i['provider'])
select = index().selectDialog(sourceList)
if select == -1: return 'close://'
url = self.sources_resolve(urlList[select], providerList[select])
self.selectedSource = self.sources[select]['source']
return url
except:
return
def sources_direct(self):
u = None
for i in self.sources:
try:
if i['provider'] == 'Icefilms' and i['quality'] == 'HD': raise Exception()
url = self.sources_resolve(i['url'], i['provider'])
xbmc.sleep(1000)
if url is None: raise Exception()
if u is None: u == url
request = urllib2.Request(url.rsplit('|', 1)[0])
request.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0')
request.add_header('Cookie', 'video=true')
response = urllib2.urlopen(request, timeout=20)
chunk = response.read(16 * 1024)
response.close()
if 'text/html' in str(response.info()["Content-Type"]): raise Exception()
self.selectedSource = i['source']
return url
except:
pass
return u
def sources_dict(self):
self.hostDict = [
'2gb-hosting',
'allmyvideos',
#'180upload',
'bayfiles',
'bestreams',
#'billionuploads',
'castamp',
#'clicktoview',
'daclips',
'divxstage',
'donevideo',
'ecostream',
'filenuke',
'firedrive',
'flashx',
'gorillavid',
'hostingbulk',
#'hugefiles',
'jumbofiles',
'lemuploads',
'limevideo',
#'megarelease',
'mightyupload',
'movdivx',
'movpod',
'movreel',
'movshare',
'movzap',
'muchshare',
'nosvideo',
'novamov',
'nowvideo',
'played',
'playwire',
'primeshare',
'promptfile',
'purevid',
'putlocker',
'sharerepo',
'sharesix',
'sockshare',
'stagevu',
'streamcloud',
'thefile',
'uploadc',
'vidbull',
'videobb',
'videoweed',
'videozed',
#'vidhog',
#'vidplay',
'vidx',
#'vidxden',
#'watchfreeinhd',
'xvidstage',
'yourupload',
'youwatch',
'zalaa'
]
class icefilms:
def __init__(self):
self.base_link = 'http://www.icefilms.info'
self.search_link = 'http://www.icefilms.info/tv/a-z/%s'
self.video_link = 'http://www.icefilms.info/membersonly/components/com_iceplayer/video.php?vid=%s'
self.post_link = 'http://www.icefilms.info/membersonly/components/com_iceplayer/video.phpAjaxResp.php'
def get(self, name, title, imdb, tvdb, year, season, episode, show, show_alt, hostDict):
try:
global icefilms_sources
icefilms_sources = []
query = show.upper()
if query.startswith('THE '): query = query.replace('THE ', '')
elif query.startswith('A '): query = query.replace('A ', '')
if not query[0].isalpha(): query = '1'
query = self.search_link % query[0]
result = getUrl(query).result
result = result.decode('iso-8859-1').encode('utf-8')
url = re.compile('id=%s>.+?href=(.+?)>' % imdb).findall(result)[0]
url = '%s%s' % (self.base_link, url)
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
result = getUrl(url).result
result = result.decode('iso-8859-1').encode('utf-8')
id = re.compile('href=/ip.php[?]v=(.+?)&>%01dx%02d' % (int(season), int(episode))).findall(result)[0]
id = id.split('v=')[-1]
url = self.video_link % id
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
result = getUrl(url).result
result = result.decode('iso-8859-1').encode('utf-8')
sec = re.compile('lastChild[.]value="(.+?)"').findall(result)[0]
links = common.parseDOM(result, "div", attrs = { "class": "ripdiv" })
import random
try:
hd_links = ''
hd_links = [i for i in links if '>HD 720p<' in i][0]
hd_links = re.compile("onclick='go[(](.+?)[)]'>Source(.+?)</a>").findall(hd_links)
except:
pass
for url, host in hd_links:
try:
hosts = ['movreel', 'billionuploads', '180upload', 'hugefiles']
host = re.sub('<span\s.+?>|</span>|#\d*:','', host)
host = host.strip().lower()
if not host in hosts: raise Exception()
url = 'id=%s&t=%s&sec=%s&s=%s&m=%s&cap=&iqs=&url=' % (url, id, sec, random.randrange(5, 50), random.randrange(100, 300) * -1)
icefilms_sources.append({'source': host, 'quality': 'HD', 'provider': 'Icefilms', 'url': url})
except:
pass
try:
sd_links = ''
sd_links = [i for i in links if '>DVDRip / Standard Def<' in i][0]
sd_links = re.compile("onclick='go[(](.+?)[)]'>Source(.+?)</a>").findall(sd_links)
except:
pass
for url, host in sd_links:
try:
hosts = ['movreel']
host = re.sub('<span\s.+?>|</span>|#\d*:','', host)
host = host.strip().lower()
if not host in hosts: raise Exception()
url = 'id=%s&t=%s&sec=%s&s=%s&m=%s&cap=&iqs=&url=' % (url, id, sec, random.randrange(5, 50), random.randrange(100, 300) * -1)
icefilms_sources.append({'source': host, 'quality': 'SD', 'provider': 'Icefilms', 'url': url})
except:
pass
except:
return
def resolve(self, url):
try:
result = getUrl(self.post_link, post=url).result
url = result.split("?url=", 1)[-1]
url = urllib.unquote_plus(url)
import commonresolvers
url = commonresolvers.resolvers().get(url)
return url
except:
return
class primewire:
def __init__(self):
self.base_link = 'http://www.primewire.ag'
self.key_link = 'http://www.primewire.ag/index.php?search'
self.search_link = 'http://www.primewire.ag/index.php?search_keywords=%s&key=%s&search_section=2'
self.proxy_base_link = 'http://proxy.cyberunlocker.com'
self.proxy_link = 'http://proxy.cyberunlocker.com/browse.php?u=%s'
def get(self, name, title, imdb, tvdb, year, season, episode, show, show_alt, hostDict):
try:
global primewire_sources
primewire_sources = []
try:
result = getUrl(self.key_link).result
key = common.parseDOM(result, "input", ret="value", attrs = { "name": "key" })[0]
query = self.search_link % (urllib.quote_plus(re.sub('\'', '', show)), key)
except:
result = getUrl(self.proxy_link % urllib.quote_plus(urllib.unquote_plus(self.key_link)), referer=self.proxy_base_link).result
key = common.parseDOM(result, "input", ret="value", attrs = { "name": "key" })[0]
query = self.search_link % (urllib.quote_plus(re.sub('\'', '', show)), key)
query = self.proxy_link % urllib.quote_plus(urllib.unquote_plus(query))
result = getUrl(query, referer=query).result
result = result.decode('iso-8859-1').encode('utf-8')
result = common.parseDOM(result, "div", attrs = { "class": "index_item.+?" })
result = [i for i in result if any(x in re.compile('title="Watch (.+?)"').findall(i)[0] for x in ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)])]
result = uniqueList(result).list
match = [common.parseDOM(i, "a", ret="href")[0] for i in result]
if match == []: return
for i in match[:5]:
try:
if not i.startswith('http://'): i = '%s%s' % (self.base_link, i)
result = getUrl(i, referer=i).result
if any(x in self.cleantitle(result) for x in [str('>' + self.cleantitle(show) + '(%s)' % str(year) + '<'), str('>' + self.cleantitle(show_alt) + '(%s)' % str(year) + '<')]):
match2 = i
if any(x in self.cleantitle(result) for x in [str('>' + self.cleantitle(show) + '<'), str('>' + self.cleantitle(show_alt) + '<')]):
match2 = i
if str('tt' + imdb) in result:
match2 = i
break
except:
pass
if match2.startswith(self.proxy_base_link):
url = match2.replace(self.proxy_link % '','')
url = urllib.unquote_plus(url)
url = url.replace('/watch-','/tv-')
url += '/season-%01d-episode-%01d' % (int(season), int(episode))
url = self.proxy_link % urllib.quote_plus(urllib.quote_plus(url))
else:
url = match2.replace('/watch-','/tv-')
url += '/season-%01d-episode-%01d' % (int(season), int(episode))
result = getUrl(url, referer=url).result
result = result.decode('iso-8859-1').encode('utf-8')
links = common.parseDOM(result, "tbody")
for i in links:
try:
host = common.parseDOM(i, "a", ret="href", attrs = { "class": ".+?rater" })[0]
host = re.compile('domain=(.+?)[.]').findall(host)[0]
host = urllib.unquote_plus(host)
host = [x for x in hostDict if host.lower() == x.lower()][0]
host = host.encode('utf-8')
quality = common.parseDOM(i, "span", ret="class")[0]
if quality == 'quality_dvd': quality = 'SD'
else: raise Exception()
quality = quality.encode('utf-8')
url = common.parseDOM(i, "a", ret="href")[0]
if url.startswith(self.proxy_base_link):
url = url.replace(self.proxy_link % '','')
url = urllib.unquote_plus(url)
url = self.proxy_link % urllib.quote_plus(urllib.quote_plus(url))
else:
url = '%s%s' % (self.base_link, url)
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
primewire_sources.append({'source': host, 'quality': quality, 'provider': 'Primewire', 'url': url})
except:
pass
except:
return
def cleantitle(self, title):
title = re.sub('\n|\s(|[(])(UK|US|AU)(|[)])$|\s(vs|v[.])\s|(:|;|-|"|,|\'|\.|\?)|\s', '', title).lower()
return title
def resolve(self, url):
try:
result = getUrl(url, referer=self.proxy_base_link).result
url = common.parseDOM(result, "noframes")[0]
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
import urlresolver
host = urlresolver.HostedMediaFile(url)
if host: resolver = urlresolver.resolve(url)
if not resolver.startswith('http://'): return
if not resolver == url: return resolver
except:
return
class watchseries:
def __init__(self):
self.base_link = 'http://watchseries.ag'
self.search_link = 'http://watchseries.ag/search/%s'
self.episode_link = 'http://watchseries.ag/episode/%s_s%s_e%s.html'
def get(self, name, title, imdb, tvdb, year, season, episode, show, show_alt, hostDict):
try:
global watchseries_sources
watchseries_sources = []
query = self.search_link % urllib.quote_plus(show)
result = getUrl(query).result
result = result.decode('iso-8859-1').encode('utf-8')
result = result.replace(' (%s)' % str(int(year) - 1), ' (%s)' % year)
result = re.compile('href="(/serie/.+?)".+?[(]%s[)]' % year).findall(result)
result = uniqueList(result).list
match = [self.base_link + i for i in result]
if match == []: return
for i in match[:5]:
try:
result = getUrl(i).result
if any(x in self.cleantitle(result) for x in [str('>' + self.cleantitle(show) + '<'), str('>' + self.cleantitle(show_alt) + '<')]):
match2 = i
if str('tt' + imdb) in result:
match2 = i
break
except:
pass
url = match2.rsplit('/', 1)[-1]
url = self.episode_link % (url, season, episode)
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
result = getUrl(url).result
result = common.parseDOM(result, "div", attrs = { "id": "lang_1" })[0]
for host in hostDict:
try:
links = re.compile('<span>%s</span>.+?href="(.+?)"' % host.lower()).findall(result)
for url in links:
url = '%s%s' % (self.base_link, url)
watchseries_sources.append({'source': host, 'quality': 'SD', 'provider': 'Watchseries', 'url': url})
except:
pass
except:
return
def cleantitle(self, title):
title = re.sub('\n|\s(|[(])(UK|US|AU)(|[)])$|\s(vs|v[.])\s|(:|;|-|"|,|\'|\.|\?)|\s', '', title).lower()
return title
def resolve(self, url):
try:
result = getUrl(url).result
url = common.parseDOM(result, "a", ret="href", attrs = { "class": "myButton" })[0]
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
import urlresolver
host = urlresolver.HostedMediaFile(url)
if host: resolver = urlresolver.resolve(url)
if not resolver.startswith('http://'): return
if not resolver == url: return resolver
except:
return
class tvonline:
def __init__(self):
self.base_link = 'http://tvonline.cc'
self.search_link = 'http://tvonline.cc/searchlist.php'
def get(self, name, title, imdb, tvdb, year, season, episode, show, show_alt, hostDict):
try:
global tvonline_sources
tvonline_sources = []
query = self.search_link
post = 'keyword=%s' % urllib.quote_plus(show)
result = getUrl(query, post=post).result
result = common.parseDOM(result, "div", attrs = { "class": "tv_aes_l" })[0]
result = common.parseDOM(result, "li")
match = [i for i in result if any(x == self.cleantitle(common.parseDOM(i, "a")[-1]) for x in [self.cleantitle(show), self.cleantitle(show_alt)])]
match2 = [self.base_link + common.parseDOM(i, "a", ret="href")[-1] for i in match]
if match2 == []: return
for i in match2[:5]:
try:
result = getUrl(i).result
match3 = common.parseDOM(result, "span", attrs = { "class": "years" })[0]
if any(x in match3 for x in ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]):
match4 = result
break
except:
pass
result = common.parseDOM(match4, "li")
try: match5 = [i for i in result if i.startswith('S%01d, Ep%02d:' % (int(season), int(episode)))][0]
except: pass
try: match5 = [i for i in result if str('>' + self.cleantitle(title) + '<') in self.cleantitle(i)][0]
except: pass
url = common.parseDOM(match5, "a", ret="href")[0]
url = '%s%s' % (self.base_link, url)
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
tvonline_sources.append({'source': 'TVonline', 'quality': 'SD', 'provider': 'TVonline', 'url': url})
except:
return
def cleantitle(self, title):
title = re.sub('\n|\s(|[(])(UK|US|AU)(|[)])$|\s(vs|v[.])\s|(:|;|-|"|,|\'|\.|\?)|\s', '', title).lower()
return title
def resolve(self, url):
try:
self.login_link = 'http://tvonline.cc/login.php'
self.reg_link = 'http://tvonline.cc/reg.php'
self.key_link = base64.urlsafe_b64decode('X21ldGhvZD1QT1NUJiVzPWxvZ2luJlVzZXJVc2VybmFtZT1hOTQ2ODUxJnN1YnNjcmlwdGlvbnNQYXNzPWE5NDY4NTE=')
self.video_link = 'http://tvonline.cc/play.php?id=nktlltn-ekkn'
result = getUrl(self.reg_link, close=False).result
post = re.compile('name="(Token.+?)" value=".+?"').findall(result)[0]
post = self.key_link % post
result = getUrl(self.reg_link, post=post, referer=self.login_link, close=False).result
result = getUrl(self.video_link).result
result = common.parseDOM(result, "video", ret="src", attrs = { "id": "ipadvideo" })[0]
key5 = re.compile('key=\w*-(\w{5})').findall(result)[0]
dom = re.compile('//(.+?)[.]').findall(result)[0]
import random
splitkey = url.split('?id=')[-1].split('-')
key1 = splitkey[0]
key4 = splitkey[1]
keychar = "beklm"
key_length = 3
key2 = ""
for i in range(key_length):
next_index = random.randrange(len(keychar))
key2 = key2 + keychar[next_index]
keychar = "ntwyz"
key_length = 3
key3 = ""
for i in range(key_length):
next_index = random.randrange(len(keychar))
key3 = key3 + keychar[next_index]# friday k saturday w sunday z
url = 'http://%s.tvonline.cc/ip.mp4?key=%s-%s%s%s-%s' % (dom,key1,key5, key2, key3, key4)
return url
except:
return
class ororotv:
def __init__(self):
self.base_link = 'http://ororo.tv'
self.key_link = base64.urlsafe_b64decode('dXNlciU1QnBhc3N3b3JkJTVEPWMyNjUxMzU2JnVzZXIlNUJlbWFpbCU1RD1jMjY1MTM1NiU0MGRyZHJiLmNvbQ==')
self.sign_link = 'http://ororo.tv/users/sign_in'
def get(self, name, title, imdb, tvdb, year, season, episode, show, show_alt, hostDict):
try:
global ororotv_sources
ororotv_sources = []
result = getUrl(self.base_link).result
if not "'index show'" in result:
result = getUrl(self.sign_link, post=self.key_link, close=False).result
result = getUrl(self.base_link).result
result = common.parseDOM(result, "div", attrs = { "class": "index show" })
match = [i for i in result if any(x == self.cleantitle(common.parseDOM(i, "a", attrs = { "class": "name" })[0]) for x in [self.cleantitle(show), self.cleantitle(show_alt)])]
match2 = [i for i in match if any(x in i for x in ['>%s<' % str(year), '>%s<' % str(int(year)+1), '>%s<' % str(int(year)-1)])][0]
url = common.parseDOM(match2, "a", ret="href")[0]
url = '%s%s' % (self.base_link, url)
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
result = getUrl(url).result
url = common.parseDOM(result, "a", ret="data-href", attrs = { "href": "#%01d-%01d" % (int(season), int(episode)) })[0]
url = '%s%s' % (self.base_link, url)
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
ororotv_sources.append({'source': 'OroroTV', 'quality': 'SD', 'provider': 'OroroTV', 'url': url})
except:
return
def cleantitle(self, title):
title = re.sub('\n|\s(|[(])(UK|US|AU)(|[)])$|\s(vs|v[.])\s|(:|;|-|"|,|\'|\.|\?)|\s', '', title).lower()
return title
def resolve(self, url):
try:
result = getUrl(url).result
url = None
try: url = common.parseDOM(result, "source", ret="src", attrs = { "type": "video/webm" })[0]
except: pass
try: url = url = common.parseDOM(result, "source", ret="src", attrs = { "type": "video/mp4" })[0]
except: pass
if url is None: return
if not url.startswith('http://'): url = '%s%s' % (self.base_link, url)
url = '%s|Cookie=%s' % (url, urllib.quote_plus('video=true'))
return url
except:
return
class putlockertv:
def __init__(self):
self.base_link = 'http://putlockertvshows.me'
def get(self, name, title, imdb, tvdb, year, season, episode, show, show_alt, hostDict):
try:
global putlockertv_sources
putlockertv_sources = []
search = 'http://www.imdbapi.com/?i=tt%s' % imdb
search = getUrl(search).result
search = json.loads(search)
country = [i.strip() for i in search['Country'].split(',')]
if 'UK' in country and not 'USA' in country: return
result = getUrl(self.base_link).result
result = common.parseDOM(result, "tr", attrs = { "class": "fc" })
match = [i for i in result if any(x == self.cleantitle(common.parseDOM(i, "a")[0]) for x in [self.cleantitle(show), self.cleantitle(show_alt)])][0]
url = common.parseDOM(match, "a", ret="href")[0]
url = '%s%s/ifr/s%02de%02d.html' % (self.base_link, url, int(season), int(episode))
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
result = getUrl(url).result
url = common.parseDOM(result, "div", ret="onclick", attrs = { "class": "badsvideo" })
if url == []:
url = common.parseDOM(result, "iframe", ret="src")[-1]
url = '%s%s' % (self.base_link, url)
result = getUrl(url).result
url = common.parseDOM(result, "div", ret="onclick", attrs = { "class": "badsvideo" })
url = re.compile(".*'(.+?)'").findall(url[0])[0]
url = '%s%s' % (self.base_link, url)
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
result = getUrl(url).result
url = common.parseDOM(result, "iframe", ret="src")[0]
url = url.replace('putlocker', 'firedrive')
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
if 'firedrive' in url: source = 'Firedrive'
elif 'mail.ru' in url: source = 'Mailru'
else: return
putlockertv_sources.append({'source': source, 'quality': 'SD', 'provider': 'PutlockerTV', 'url': url})
except:
return
def cleantitle(self, title):
title = re.sub('\n|\s(|[(])(UK|US|AU)(|[)])$|\s(vs|v[.])\s|(:|;|-|"|,|\'|\.|\?)|\s', '', title).lower()
return title
def resolve(self, url):
try:
import commonresolvers
url = commonresolvers.resolvers().get(url)
return url
except:
return
class clickplay:
def __init__(self):
self.base_link = 'http://clickplay.to'
self.search_link = 'http://clickplay.to/search/%s'
def get(self, name, title, imdb, tvdb, year, season, episode, show, show_alt, hostDict):
try:
global clickplay_sources
clickplay_sources = []
query = self.search_link % urllib.quote_plus(' '.join([i for i in show.split() if i not in ['The','the','A','a']]))
result = getUrl(query).result
result = common.parseDOM(result, "div", attrs = { "id": "video_list" })[0]
result = result.split('</a>')
match = [i for i in result if any(x in self.cleantitle(i) for x in [str('>' + self.cleantitle(show) + '(%s)' % str(year) + '<'), str('>' + self.cleantitle(show) + '(%s)' % str(int(year)+1) + '<'), str('>' + self.cleantitle(show) + '(%s)' % str(int(year)-1) + '<'), str('>' + self.cleantitle(show_alt) + '(%s)' % str(year) + '<'), str('>' + self.cleantitle(show_alt) + '(%s)' % str(int(year)+1) + '<'), str('>' + self.cleantitle(show_alt) + '(%s)' % str(int(year)-1) + '<')])][0]
url = common.parseDOM(match, "a", ret="href")[0]
url = '%sseason-%01d/episode-%01d' % (url, int(season), int(episode))
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
import decrypter
result = getUrl(url).result
links = re.compile('<a href="([?]link_id=.+?)".+?rel="noindex, nofollow".+?\[720p\].+?</a>').findall(result)
u = re.compile('content="(%s.+?)"' % url).findall(result)[0]
for i in links[:5]:
try:
result = getUrl(u + i).result
url = re.compile('proxy[.]link=clickplay[*](.+?)"').findall(result)[-1]
url = decrypter.decrypter(198,128).decrypt(url,base64.urlsafe_b64decode('bW5pcUpUcUJVOFozS1FVZWpTb00='),'ECB').split('\0')[0]
if 'vk.com' in url:
import commonresolvers
vk = commonresolvers.resolvers().vk(url)
for i in vk: clickplay_sources.append({'source': 'VK', 'quality': i['quality'], 'provider': 'Clickplay', 'url': i['url']})
elif 'firedrive' in url:
clickplay_sources.append({'source': 'Firedrive', 'quality': 'HD', 'provider': 'Clickplay', 'url': url})
elif 'mail.ru' in url:
clickplay_sources.append({'source': 'Mailru', 'quality': 'SD', 'provider': 'Clickplay', 'url': url})
except:
pass
except:
return
def cleantitle(self, title):
title = re.sub('\n|\s(|[(])(UK|US|AU)(|[)])$|\s(vs|v[.])\s|(:|;|-|"|,|\'|\.|\?)|\s', '', title).lower()
return title
def resolve(self, url):
try:
import commonresolvers
url = commonresolvers.resolvers().get(url)
return url
except:
return
class vkbox:
def __init__(self):
self.base_link = 'http://mobapps.cc'
self.data_link = 'http://mobapps.cc/data/data_en.zip'
self.episodes_link = 'http://mobapps.cc/api/serials/e/?h=%s&u=%s&y=%s'
self.tv_link = 'tv_lite.json'
def get(self, name, title, imdb, tvdb, year, season, episode, show, show_alt, hostDict):
try:
global vkbox_sources
vkbox_sources = []
search = 'http://www.imdbapi.com/?i=tt%s' % imdb
search = getUrl(search).result
search = json.loads(search)
country = [i.strip() for i in search['Country'].split(',')]
if 'UK' in country and not 'USA' in country: return
result = self.getdata()
#result = cache2(self.getdata)
result = json.loads(result)
match = [i['id'] for i in result if any(x == self.cleantitle(i['title']) for x in [self.cleantitle(show), self.cleantitle(show_alt)])][0]
url = self.episodes_link % (match, season, episode)
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
request = urllib2.Request(url,None)
request.add_header('User-Agent', 'android-async-http/1.4.1 (http://loopj.com/android-async-http)')
response = urllib2.urlopen(request, timeout=10)
result = response.read()
response.close()
param = re.findall('"lang":"en","apple":(\d+?),"google":(\d+?),"microsoft":"(.+?)"', result, re.I)
num = int(match) + int(season) + int(episode)
url = 'https://vk.com/video_ext.php?oid=%s&id=%s&hash=%s' % (str(int(param[0][0]) + num), str(int(param[0][1]) + num), param[0][2])
import commonresolvers
url = commonresolvers.resolvers().vk(url)
for i in url: vkbox_sources.append({'source': 'VK', 'quality': i['quality'], 'provider': 'VKBox', 'url': i['url']})
except:
return
def getdata(self):
try:
import zipfile, StringIO
data = urllib2.urlopen(self.data_link, timeout=10).read()
zip = zipfile.ZipFile(StringIO.StringIO(data))
read = zip.open(self.tv_link)
result = read.read()
return result
except:
return
def cleantitle(self, title):
title = re.sub('\n|\s(|[(])(UK|US|AU)(|[)])$|\s(vs|v[.])\s|(:|;|-|"|,|\'|\.|\?)|\s', '', title).lower()
return title
def resolve(self, url):
return url
class istreamhd:
def __init__(self):
self.base_link = 'http://istreamhd.org'
self.login_link = 'aHR0cDovL2lzdHJlYW1oZC5vcmcvYXBpL2F1dGhlbnRpY2F0ZS5waHA='
self.search_link = 'aHR0cDovL2lzdHJlYW1oZC5vcmcvYXBpL3NlYXJjaC5waHA='
self.show_link = 'aHR0cDovL2lzdHJlYW1oZC5vcmcvYXBpL2dldF9zaG93LnBocA=='
self.get_link = 'aHR0cDovL2lzdHJlYW1oZC5vcmcvYXBpL2dldF92aWRlby5waHA='
self.mail, self.password = getSetting("istreamhd_mail"), getSetting("istreamhd_password")
def get(self, name, title, imdb, tvdb, year, season, episode, show, show_alt, hostDict):
try:
global istreamhd_sources
istreamhd_sources = []
if (self.mail == '' or self.password == ''): raise Exception()
post = urllib.urlencode({'e-mail': self.mail, 'password': self.password})
result = getUrl(base64.urlsafe_b64decode(self.login_link), post=post).result
result = json.loads(result)
token = result['auth']['token']
post = urllib.urlencode({'token': token, 'query': show})
result = getUrl(base64.urlsafe_b64decode(self.search_link), post=post).result
result = json.loads(result)
url = result['result']['items']
url = [i for i in url if str('tt' + imdb) in i['poster']][0]
post = urllib.urlencode({'token': token, 'show': url['title'], 'cat_id': url['cat_id']})
result = getUrl(base64.urlsafe_b64decode(self.show_link), post=post).result
result = json.loads(result)
url = result['result']['items']
url = [i for i in url if i['season'] == str('%01d' % int(season)) and i['episode'] == str('%01d' % int(episode))][0]
url = url['vid_id']
post = urllib.urlencode({'token': token, 'vid_id': url})
result = getUrl(base64.urlsafe_b64decode(self.get_link), post=post).result
result = json.loads(result)
url = result['video']['url']
url = url.replace('http://', 'https://')
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
import commonresolvers
url = commonresolvers.resolvers().vk(url)
for i in url: istreamhd_sources.append({'source': 'VK', 'quality': i['quality'], 'provider': 'iStreamHD', 'url': i['url']})
except:
return
def resolve(self, url):
return url
class simplymovies:
def __init__(self):
self.base_link = 'http://simplymovies.net'
self.search_link = 'http://simplymovies.net/tv_shows.php?searchTerm='
def get(self, name, title, imdb, tvdb, year, season, episode, show, show_alt, hostDict):
try:
global simplymovies_sources
simplymovies_sources = []
query = self.search_link + urllib.quote_plus(show.replace(' ', '-'))
result = getUrl(query).result
url = common.parseDOM(result, "div", attrs = { "class": "movieInfoHolder" })
try: match = [i for i in url if any(x in self.cleantitle(i) for x in [str('>' + self.cleantitle(show) + '<'), str('>' + self.cleantitle(show_alt) + '<')])][0]
except: pass
try: match = [i for i in url if str('tt' + imdb) in i][0]
except: pass
url = common.parseDOM(match, "a", ret="href")[0]
url = '%s/%s' % (self.base_link, url)
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
result = getUrl(url).result
url = result.split('<h3>')
url = [i for i in url if str('Season %01d</h3>' % int(season)) in i][-1]
url = url.replace(':','<')
url = re.compile('.*href="(.+?)">Episode %01d<' % int(episode)).findall(url)[0]
url = '%s/%s' % (self.base_link, url)
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
result = getUrl(url).result
url = common.parseDOM(result, "iframe", ret="src", attrs = { "class": "videoPlayerIframe" })[0]
url = common.replaceHTMLCodes(url)
url = url.replace('http://', 'https://')
url = url.encode('utf-8')
import commonresolvers
url = commonresolvers.resolvers().vk(url)
for i in url: simplymovies_sources.append({'source': 'VK', 'quality': i['quality'], 'provider': 'Simplymovies', 'url': i['url']})
except:
return
def cleantitle(self, title):
title = re.sub('\n|\s(|[(])(UK|US|AU)(|[)])$|\s(vs|v[.])\s|(:|;|-|"|,|\'|\.|\?)|\s', '', title).lower()
return title
def resolve(self, url):
return url
class moviestorm:
def __init__(self):
self.base_link = 'http://moviestorm.eu'
self.search_link = 'http://moviestorm.eu/search?q=%s'
def get(self, name, title, imdb, tvdb, year, season, episode, show, show_alt, hostDict):
try:
global moviestorm_sources
moviestorm_sources = []
query = self.search_link % (urllib.quote_plus(show))
result = getUrl(query).result
url = common.parseDOM(result, "div", attrs = { "class": "movie_box" })
url = [i for i in url if str('tt' + imdb) in i][0]
url = common.parseDOM(url, "a", ret="href")[0]
url = '%s?season=%01d&episode=%01d' % (url, int(season), int(episode))
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
result = getUrl(url).result
result = common.parseDOM(result, "div", attrs = { "id": "searialinks" })[0]
links = re.compile('"(http://ishared.eu/.+?)"').findall(result)
for url in links:
moviestorm_sources.append({'source': 'iShared', 'quality': 'SD', 'provider': 'Moviestorm', 'url': url})
except:
return
def resolve(self, url):
try:
result = getUrl(url).result
url = re.compile('path:"(.+?)"').findall(result)[0]
return url
except:
return
class noobroom:
def __init__(self):
self.base_link = 'http://noobroom5.com'
self.search_link = 'http://noobroom5.com/search.php?q=%s'
self.login_link = 'http://noobroom5.com/login.php'
self.login2_link = 'http://noobroom5.com/login2.php'
self.mail, self.password = getSetting("noobroom_mail"), getSetting("noobroom_password")
def get(self, name, title, imdb, tvdb, year, season, episode, show, show_alt, hostDict):
try:
global noobroom_sources
noobroom_sources = []
if (self.mail == '' or self.password == ''): raise Exception()
search = 'http://www.imdbapi.com/?i=tt%s' % imdb
search = getUrl(search).result
search = json.loads(search)
country = [i.strip() for i in search['Country'].split(',')]
if 'UK' in country and not 'USA' in country: return
query = self.search_link % (urllib.quote_plus(show))
result = self.login()
result = getUrl(query).result
url = re.compile('(<i>TV Series</i>.+)').findall(result)[0]
url = url.split("><a ")
url = [i for i in url if any(x in self.cleantitle(i) for x in [str('>' + self.cleantitle(show) + '<'), str('>' + self.cleantitle(show_alt) + '<')])][0]
url = re.compile("href='(.+?)'").findall(url)[0]
url = '%s%s' % (self.base_link, url)
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
result = getUrl(url).result
url = re.compile("<b>%01dx%02d .+?style=.+? href='(.+?)'" % (int(season), int(episode))).findall(result)[0]
url = '%s%s' % (self.base_link, url)
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
result = getUrl(url).result
links = re.compile('"file": "(.+?)"').findall(result)
try: u = [i for i in links if 'type=flv' in i][0]
except: pass
try: u = [i for i in links if 'type=mp4' in i][0]
except: pass
url = '%s%s' % (self.base_link, u)
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
try:
quality = 'SD'
q = re.compile('"width": "(.+?)"').findall(result)[0]
if int(q) > 720: quality = 'HD'
except:
pass
noobroom_sources.append({'source': 'Noobroom', 'quality': quality, 'provider': 'Noobroom', 'url': url})
except:
return
def login(self):
try:
post = urllib.urlencode({'email': self.mail, 'password': self.password})
result = getUrl(self.login_link, close=False).result
cookie = getUrl(self.login_link, output='cookie').result
result = urllib2.Request(self.login2_link, post)
result = urllib2.urlopen(result, timeout=10)
except:
return
def cleantitle(self, title):
title = re.sub('\n|\s(|[(])(UK|US|AU)(|[)])$|\s(vs|v[.])\s|(:|;|-|"|,|\'|\.|\?)|\s', '', title).lower()
return title
def resolve(self, url):
try:
result = self.login()
try: u = getUrl(url, output='geturl').result
except: pass
try: u = getUrl(url.replace('&hd=0', '&hd=1'), output='geturl').result
except: pass
return u
except:
return
main() | gpl-2.0 |
charlesthk/django-nightmare-pdf | nightmare_pdf/generators.py | 1 | 2545 | import subprocess
import os
from django.core.validators import URLValidator
from nightmare_pdf.settings import pdf_settings
from django.http import (
HttpResponse,
Http404
)
from django.core.files.base import ContentFile
from .models import PdfDoc
from .utils import get_random_filename
validate_url = URLValidator(schemes=['https', 'http'])
class PDFGenerator(object):
def __init__(self, url, timeout=1000, page_size='A4', landscape=0,
print_background=1, margins_type=1, script=pdf_settings.DEFAULT_RENDER_SCRIPT,
temp_dir=pdf_settings.DEFAULT_TEMP_DIR):
validate_url(url)
self.url = url
self.filename = self.__get_random_filename()
self.filepath = self.__get_filepath()
self.timeout = timeout
self.page_size = page_size
self.landscape = landscape
self.print_background = print_background
self.margins_type = margins_type
self.script = script
self.temp_dir = temp_dir
self.pdf_data = None
self.__generate()
self.__set_pdf_data()
self.__remove_source_file()
def __get_random_filename(self):
name = get_random_filename(20)
return "%s.pdf" % name
def __get_filepath(self):
return os.path.join(pdf_settings.DEFAULT_TEMP_DIR, self.filename)
def __generate(self):
"""
call the following command:
node render_pdf.js [url] [filepath]
--timeout [timeout]
--pageSize [page_size]
--landscape [landscape]
--printBackground [print_background]
--marginsType [margins_type]
"""
command = [
pdf_settings.NODE_PATH,
self.script,
self.url,
self.filepath,
'--timeout',
str(self.timeout),
'--pageSize',
self.page_size,
'--landscape',
str(self.landscape),
'--printBackground',
str(self.print_background),
'--marginsType',
str(self.margins_type)
]
return subprocess.call(command)
def __set_pdf_data(self):
with open(self.filepath) as pdf:
self.pdf_data = pdf.read()
def get_content_file(self, filename):
return ContentFile(self.pdf_data, name=filename)
def get_data(self):
return self.pdf_data
def get_http_response(self, filename):
response = HttpResponse(self.pdf_data, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="%s.pdf"' % filename
return response
def __remove_source_file(self):
return subprocess.call(['rm', self.filepath])
def save(self, filename, title='', description=''):
file = self.get_content_file(filename)
document = PdfDoc(
title=title,
description=description,
document=file)
document.save()
return document
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.